├── .cargo └── config.toml ├── .devcontainer.json ├── .dockerignore ├── .env.example ├── .envrc ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── audit.dontdoyml │ ├── manual.yml │ ├── on_main.yml │ └── on_pull_request.yml ├── .gitignore ├── .gitmodules ├── .pre-commit-config.yaml ├── .vscode └── bookmarks.json ├── CHANGELOG.md ├── CODEOWNERS ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── api ├── bruno.json ├── environments │ └── local.bru ├── plasma-get.bru └── plasma-put.bru ├── bin ├── cli │ ├── Cargo.toml │ ├── README.md │ └── src │ │ └── main.rs └── sidecar │ ├── Cargo.toml │ ├── Dockerfile │ └── src │ ├── main.rs │ └── plasma.rs ├── cliff.toml ├── contracts ├── blob-registry │ ├── Cargo.toml │ ├── src │ │ ├── events.rs │ │ └── lib.rs │ └── tests │ │ └── tests.rs └── blob-store │ ├── Cargo.toml │ ├── build.sh │ ├── src │ └── lib.rs │ └── tests │ └── tests.rs ├── crates ├── da-rpc-sys │ ├── Cargo.toml │ ├── Dockerfile │ ├── Makefile │ ├── build.rs │ └── src │ │ └── lib.rs ├── da-rpc │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── near │ │ ├── config.rs │ │ └── mod.rs ├── http-api-data │ ├── Cargo.toml │ └── src │ │ └── lib.rs └── primitives │ ├── Cargo.toml │ └── src │ └── lib.rs ├── devenv.lock ├── devenv.nix ├── devenv.yaml ├── docker-compose.yml ├── docs ├── OP-Alt-DA.md ├── README.md ├── da_rpc_client.md ├── optimisim_containers.md ├── system_context.md └── test.md ├── eth ├── .editorconfig ├── .env.example ├── .gitignore ├── .prettierignore ├── .prettierrc.yml ├── .solhint.json ├── README.md ├── bun.lockb ├── foundry.toml ├── justfile ├── package.json ├── remappings.txt ├── script │ ├── Base.s.sol │ └── Deploy.s.sol ├── src │ └── NearDataAvailability.sol └── test │ └── NearDataAvailability.t.sol ├── flake.lock ├── flake.nix ├── go.mod ├── go.sum ├── gopkg ├── da-rpc │ ├── README.md │ ├── lib │ │ └── libnear_da_rpc_sys.h │ ├── near.go │ ├── near_darwin.go │ ├── near_test.go │ └── near_unix.go └── sidecar │ ├── near.go │ └── near_test.go ├── http-config.template.json ├── justfile ├── recipe.json ├── rust-toolchain.toml ├── scripts └── enrich.sh ├── taplo.toml ├── test ├── genesis.json ├── http-sidecar.json ├── node_key.json ├── sandbox.Dockerfile └── validator_key.json └── ws.code-workspace /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = ["-C", "link-args=-s"] 3 | -------------------------------------------------------------------------------- /.devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "customizations": { 3 | "vscode": { 4 | "extensions": [ 5 | "mkhl.direnv" 6 | ] 7 | } 8 | }, 9 | "image": "ghcr.io/cachix/devenv:latest", 10 | "overrideCommand": false, 11 | "updateContentCommand": "devenv test" 12 | } 13 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .github 2 | .vscode 3 | .direnv 4 | .git 5 | 6 | node_modules 7 | **/node_modules 8 | 9 | .env 10 | **/.env 11 | 12 | # Ignore rust target dir to avoid accidentally borking dependencies 13 | target 14 | gopkg/*/lib/*.a 15 | gopkg/*/lib/*.so 16 | # Same as above 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | TEST_NEAR_ACCOUNT=sdksjfhshdfkj.testnet # Used in tests 2 | TEST_NEAR_SECRET=ed25519:welwekrj # Same 3 | 4 | # If you use nixos, you needa build near-sandbox NEAR_SANDBOX_BIN_PATH=/etc/profiles/per-user/common/bin/near-sandbox 5 | 6 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | source_url "https://raw.githubusercontent.com/cachix/devenv/95f329d49a8a5289d31e0982652f7058a189bfca/direnvrc" "sha256-d+8cBpDfDBj41inrADaJt+bDWhOktwslgoP5YiGJ1v0=" 2 | 3 | use devenv 4 | 5 | # FIXME: Can't remember why we had this but it borks the dotenv integration 6 | # https://github.com/cachix/devenv/issues/1084#issuecomment-2035339243 7 | # for env in shellHook AR AR_FOR_BUILD AS AS_FOR_BUILD CC CC_FOR_BUILD CONFIG_SHELL CXX \ 8 | # CXX_FOR_BUILD DETERMINISTIC_BUILD DEVENV_RUNTIME GETTEXTDATADIRS_FOR_BUILD LD \ 9 | # LD_DYLD_PATH LD_FOR_BUILD MACOSX_DEPLOYMENT_TARGET NIX_BINTOOLS \ 10 | # NIX_BINTOOLS_FOR_BUILD NIX_BINTOOLS_WRAPPER_TARGET_BUILD_x86_64_apple_darwin \ 11 | # NIX_BINTOOLS_WRAPPER_TARGET_HOST_aarch64_unknown_linux_gnu \ 12 | # NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_apple_darwin \ 13 | # NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu NIX_CC \ 14 | # NIX_CC_FOR_BUILD NIX_CC_WRAPPER_TARGET_BUILD_x86_64_apple_darwin \ 15 | # NIX_CC_WRAPPER_TARGET_HOST_aarch64_unknown_linux_gnu \ 16 | # NIX_CC_WRAPPER_TARGET_HOST_x86_64_apple_darwin \ 17 | # NIX_CC_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu NIX_CFLAGS_COMPILE \ 18 | # NIX_CFLAGS_COMPILE_FOR_BUILD NIX_COREFOUNDATION_RPATH NIX_DONT_SET_RPATH \ 19 | # NIX_DONT_SET_RPATH_FOR_BUILD NIX_ENFORCE_NO_NATIVE NIX_HARDENING_ENABLE \ 20 | # NIX_IGNORE_LD_THROUGH_GCC NIX_LDFLAGS NIX_LDFLAGS_FOR_BUILD NIX_NO_SELF_RPATH \ 21 | # NIX_PKG_CONFIG_WRAPPER_TARGET_HOST_x86_64_apple_darwin NIX_STORE NM NM_FOR_BUILD \ 22 | # NODE_PATH OBJCOPY OBJDUMP PATH_LOCALE PKG_CONFIG PYTHONHASHSEED PYTHONNOUSERSITE \ 23 | # PYTHONPATH RANLIB RANLIB_FOR_BUILD READELF SIZE SIZE_FOR_BUILD SOURCE_DATE_EPOCH \ 24 | # STRINGS STRINGS_FOR_BUILD STRIP STRIP_FOR_BUILD WINDRES ZERO_AR_DATE \ 25 | # __darwinAllowLocalNetworking __impureHostDeps __propagatedImpureHostDeps \ 26 | # __propagatedSandboxProfile __sandboxProfile cmakeFlags configureFlags \ 27 | # dontAddDisableDepTrack mesonFlags system; do 28 | # unset $env; 29 | # done 30 | 31 | # export PKG_CONFIG_PATH="$DEVENV_PROFILE/lib/pkgconfig:''${PKG_CONFIG_PATH-}" 32 | # export LD_LIBRARY_PATH="$DEVENV_PROFILE/lib:''${LD_LIBRARY_PATH-}" 33 | # export LIBRARY_PATH="$DEVENV_PROFILE/lib:''${LIBRARY_PATH-}" 34 | # export C_INCLUDE_PATH="$DEVENV_PROFILE/include:''${C_INCLUDE_PATH-}" 35 | # export XDG_DATA_DIRS="$DEVENV_PROFILE/share:''${XDG_DATA_DIRS-}" 36 | # export XDG_CONFIG_DIRS="$DEVENV_PROFILE/etc/xdg:''${XDG_CONFIG_DIRS-}" 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/audit.dontdoyml: -------------------------------------------------------------------------------- 1 | name: Security audit 2 | on: 3 | push: 4 | paths: 5 | - '**/Cargo.toml' 6 | - '**/Cargo.lock' 7 | jobs: 8 | security_audit: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v1 12 | - uses: actions-rs/audit-check@v1 13 | with: 14 | token: ${{ secrets.CEJAS_PERSONAL_ACCESS_TOKEN }} 15 | -------------------------------------------------------------------------------- /.github/workflows/manual.yml: -------------------------------------------------------------------------------- 1 | name: "Manual Deploying Rollup Data Availability" 2 | 3 | on: workflow_dispatch 4 | 5 | jobs: 6 | git-submodules-update: 7 | name: "Build and deploy Rollup Data Availability" 8 | runs-on: ubuntu-latest 9 | permissions: 10 | contents: read 11 | packages: write 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | 16 | - name: Login to GitHub Container Registry 17 | uses: docker/login-action@v3 18 | with: 19 | registry: ghcr.io 20 | username: ${{ github.actor }} 21 | password: ${{ secrets.GITHUB_TOKEN }} 22 | 23 | # - name: Build "da-rpc" Docker image and push 24 | # uses: docker/build-push-action@v5 25 | # with: 26 | # context: . 27 | # push: true 28 | # file: ./crates/da-rpc-sys/Dockerfile 29 | # tags: | 30 | # ghcr.io/nuffle-labs/data-availability/da-rpc:${{ github.sha }} 31 | # ghcr.io/nuffle-labs/data-availability/da-rpc:latest 32 | 33 | - name: Build "sidecar" Docker image and push 34 | uses: docker/build-push-action@v5 35 | with: 36 | context: . 37 | push: true 38 | file: ./bin/sidecar/Dockerfile 39 | tags: | 40 | ghcr.io/nuffle-labs/data-availability/sidecar:${{ github.sha }} 41 | ghcr.io/nuffle-labs/data-availability/sidecar:latest 42 | 43 | changelog: 44 | name: Generate changelog 45 | runs-on: ubuntu-latest 46 | outputs: 47 | release_body: ${{ steps.git-cliff.outputs.content }} 48 | steps: 49 | - name: Checkout 50 | uses: actions/checkout@v4 51 | with: 52 | fetch-depth: 0 53 | 54 | - name: Generate a changelog 55 | uses: orhun/git-cliff-action@v3 56 | id: git-cliff 57 | with: 58 | config: cliff.toml 59 | args: -vv --latest --strip header 60 | env: 61 | OUTPUT: CHANGES.md 62 | GITHUB_REPO: ${{ github.repository }} 63 | 64 | # use release body in the same job 65 | - name: Upload the binary releases 66 | uses: svenstaro/upload-release-action@v2 67 | with: 68 | file: binary_release.zip 69 | repo_token: ${{ secrets.GITHUB_TOKEN }} 70 | tag: ${{ github.ref }} 71 | body: ${{ steps.git-cliff.outputs.content }} 72 | -------------------------------------------------------------------------------- /.github/workflows/on_main.yml: -------------------------------------------------------------------------------- 1 | name: "Deploying Rollup Data Availability" 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | env: 9 | REPOSITORY: "us-docker.pkg.dev/pagoda-solutions-dev/rollup-data-availability" 10 | 11 | jobs: 12 | git-submodules-update: 13 | name: "Build and deploy Rollup Data Availability" 14 | runs-on: ubuntu-latest 15 | permissions: 16 | contents: read 17 | packages: write 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v3 21 | 22 | - name: Login to GitHub Container Registry 23 | uses: docker/login-action@v3 24 | with: 25 | registry: ghcr.io 26 | username: ${{ github.actor }} 27 | password: ${{ secrets.GITHUB_TOKEN }} 28 | 29 | - name: Build "da-rpc" Docker image and push 30 | uses: docker/build-push-action@v5 31 | with: 32 | context: . 33 | push: true 34 | file: ./crates/da-rpc-sys/Dockerfile 35 | tags: | 36 | ghcr.io/nuffle-labs/data-availability/da-rpc:${{ github.sha }} 37 | ghcr.io/nuffle-labs/data-availability/da-rpc:latest 38 | 39 | - name: Build "sidecar" Docker image and push 40 | uses: docker/build-push-action@v5 41 | with: 42 | context: . 43 | push: true 44 | file: ./bin/sidecar/Dockerfile 45 | tags: | 46 | ghcr.io/nuffle-labs/data-availability/sidecar:${{ github.sha }} 47 | ghcr.io/nuffle-labs/data-availability/sidecar:latest 48 | 49 | changelog: 50 | name: Generate changelog 51 | runs-on: ubuntu-latest 52 | outputs: 53 | release_body: ${{ steps.git-cliff.outputs.content }} 54 | steps: 55 | - name: Checkout 56 | uses: actions/checkout@v4 57 | with: 58 | fetch-depth: 0 59 | 60 | - name: Generate a changelog 61 | uses: orhun/git-cliff-action@v3 62 | id: git-cliff 63 | with: 64 | config: cliff.toml 65 | args: -vv --latest --strip header 66 | env: 67 | OUTPUT: CHANGES.md 68 | GITHUB_REPO: ${{ github.repository }} 69 | 70 | # use release body in the same job 71 | - name: Upload the binary releases 72 | uses: svenstaro/upload-release-action@v2 73 | with: 74 | file: binary_release.zip 75 | repo_token: ${{ secrets.GITHUB_TOKEN }} 76 | tag: ${{ github.ref }} 77 | body: ${{ steps.git-cliff.outputs.content }} 78 | -------------------------------------------------------------------------------- /.github/workflows/on_pull_request.yml: -------------------------------------------------------------------------------- 1 | name: "Check PR is ready for merge" 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | changes: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | pull-requests: read 13 | steps: 14 | # For pull requests it's not necessary to checkout the code 15 | - uses: dorny/paths-filter@v3 16 | id: filter 17 | with: 18 | filters: | 19 | rust: 20 | - '.github/**' 21 | - '**/*.rs' 22 | - '**/Cargo.toml' 23 | - '**/Cargo.lock' 24 | - '**/rust-toolchain**' 25 | - 'flake.nix' 26 | - 'flake.lock' 27 | - 'scripts/**' 28 | - 'justfile' 29 | go: 30 | - '.github/**' 31 | - 'crates/da-rpc-sys/build.rs' 32 | - 'crates/da-rpc-sys/src/lib.rs' 33 | - '**/*.go' 34 | - '**/go.mod' 35 | - '**/go.sum' 36 | - '**/gopkg.lock' 37 | - 'scripts/**' 38 | - 'justfile' 39 | eth: 40 | - '.github/**' 41 | - '**/*.sol' 42 | - 'eth/foundry.toml' 43 | - 'eth/justfile' 44 | - 'eth/package.json' 45 | - 'eth/package-lock.json' 46 | - 'scripts/**' 47 | - 'justfile' 48 | outputs: 49 | rust: ${{ steps.filter.outputs.rust }} 50 | go: ${{ steps.filter.outputs.go }} 51 | eth: ${{ steps.filter.outputs.eth }} 52 | 53 | rust: 54 | needs: changes 55 | if: ${{ needs.changes.outputs.rust == 'true' }} 56 | runs-on: ubuntu-latest 57 | steps: 58 | - name: Checkout 59 | uses: actions/checkout@v3 60 | with: 61 | submodules: "recursive" 62 | 63 | - uses: actions-rust-lang/setup-rust-toolchain@v1 64 | 65 | - uses: Swatinem/rust-cache@v2 66 | 67 | - name: "Install cargo-nextest" 68 | uses: taiki-e/install-action@v2 69 | with: 70 | tool: nextest 71 | 72 | - name: 📜 Lint code format 73 | uses: actions-rs/cargo@v1 74 | with: 75 | command: fmt 76 | args: --all -- --check 77 | 78 | - name: "Build contracts" 79 | run: make build-contracts 80 | 81 | - name: "Ensure target dirs exist" 82 | run: mkdir -p target/near/near_da_blob_store target/near/blob_registry 83 | 84 | - name: "Run tests" 85 | run: TEST_NEAR_ACCOUNT=${{ secrets.TEST_NEAR_ACCOUNT }} TEST_NEAR_SECRET=${{ secrets.TEST_NEAR_SECRET }} cargo nextest run --workspace --locked 86 | # Nice to have, turned off for now 87 | # - name: "Check for bloat" 88 | # uses: orf/cargo-bloat-action@v1 89 | # with: 90 | # token: ${{ secrets.CEJAS_PERSONAL_ACCESS_TOKEN }} 91 | go: 92 | needs: changes 93 | if: ${{ needs.changes.outputs.go == 'true' }} 94 | runs-on: ubuntu-latest 95 | steps: 96 | - name: Checkout 97 | uses: actions/checkout@v3 98 | with: 99 | submodules: "recursive" 100 | 101 | - uses: actions-rust-lang/setup-rust-toolchain@v1 102 | 103 | - uses: Swatinem/rust-cache@v2 104 | 105 | - name: Login to GitHub Container Registry 106 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $ --password-stdin 107 | 108 | - uses: hoverkraft-tech/compose-action@v2.0.0 109 | 110 | # - name: Enrich the test config 111 | # run: | 112 | # HTTP_API_TEST_SECRET_KEY=${{ secrets.HTTP_API_TEST_SECRET_KEY }} \ 113 | # HTTP_API_TEST_ACCOUNT_ID=${{ secrets.HTTP_API_TEST_ACCOUNT_ID }} \ 114 | # HTTP_API_TEST_NAMESPACE=${{ secrets.HTTP_API_TEST_NAMESPACE }} \ 115 | # scripts/enrich.sh 116 | 117 | - name: "Ubuntu is missing libm :<" 118 | run: sudo apt-get update && sudo apt-get install -y build-essential 119 | 120 | - name: "Install FFI library" 121 | run: make da-rpc-sys 122 | 123 | - run: sudo cp ./gopkg/da-rpc/lib/* /usr/local/lib 124 | 125 | - name: "Test gopkg" 126 | working-directory: ./gopkg/da-rpc 127 | run: go test -v 128 | 129 | 130 | - name: "Test sidecar api" 131 | working-directory: ./gopkg/sidecar 132 | run: go test -v 133 | 134 | eth-contracts: 135 | needs: changes 136 | if: ${{ needs.changes.outputs.eth == 'true' }} 137 | runs-on: ubuntu-latest 138 | defaults: 139 | run: 140 | working-directory: ./eth 141 | steps: 142 | - name: Checkout 143 | uses: actions/checkout@v3 144 | with: 145 | submodules: recursive 146 | token: ${{ secrets.GITHUB_TOKEN }} 147 | 148 | - name: "Install Foundry" 149 | uses: "foundry-rs/foundry-toolchain@v1" 150 | 151 | - name: "Install Bun" 152 | uses: "oven-sh/setup-bun@v1" 153 | 154 | - name: "Install the Node.js dependencies" 155 | run: "bun install" 156 | 157 | - name: "Lint the code" 158 | run: "bun run lint" 159 | 160 | - name: "Add lint summary" 161 | run: | 162 | echo "## Lint result" >> $GITHUB_STEP_SUMMARY 163 | echo "✅ Passed" >> $GITHUB_STEP_SUMMARY 164 | 165 | - name: "Build the contracts and print their size" 166 | run: "forge build --sizes" 167 | 168 | - name: "Add build summary" 169 | run: | 170 | echo "## Build result" >> $GITHUB_STEP_SUMMARY 171 | echo "✅ Passed" >> $GITHUB_STEP_SUMMARY 172 | 173 | - name: "Show the Foundry config" 174 | run: "forge config" 175 | 176 | - name: "Generate a fuzz seed that changes weekly to avoid burning through RPC allowance" 177 | run: > 178 | echo "FOUNDRY_FUZZ_SEED=$( 179 | echo $(($EPOCHSECONDS - $EPOCHSECONDS % 604800)) 180 | )" >> $GITHUB_ENV 181 | 182 | - name: "Run the tests" 183 | run: "forge test --gas-report" 184 | 185 | - name: "Add test summary" 186 | run: | 187 | echo "## Tests result" >> $GITHUB_STEP_SUMMARY 188 | echo "✅ Passed" >> $GITHUB_STEP_SUMMARY 189 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .direnv 2 | .devenv 3 | 4 | # Generated by Cargo 5 | # will have compiled files and executables 6 | debug/ 7 | target/ 8 | lib/ 9 | 10 | # Macos files 11 | **/.DS_Store 12 | 13 | #.envs 14 | .env 15 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 16 | # These are backup files generated by rustfmt 17 | **/*.rs.bk 18 | **/*.swp 19 | 20 | # MSVC Windows builds of rustc generate these, which store debugging information 21 | *.pdb 22 | 23 | op-stack/optimism-private 24 | **throwaway-key.json 25 | 26 | **.env 27 | 28 | # IDE 29 | .idea 30 | 31 | # Http api 32 | http-config.json 33 | http-config.ci.json 34 | 35 | .devenv.flake.nix 36 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nuffle-Labs/data-availability/28b6b80099a4be5b66959c0903b730451cfc314a/.gitmodules -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | /nix/store/6mb4p26xygkrndznczn0k0zkw0bdn4gh-pre-commit-config.json -------------------------------------------------------------------------------- /.vscode/bookmarks.json: -------------------------------------------------------------------------------- 1 | { 2 | "files": [ 3 | { 4 | "path": "op-stack/openrpc/types/blob/blob.go", 5 | "bookmarks": [ 6 | { 7 | "line": 96, 8 | "column": 1, 9 | "label": "Blob DTO" 10 | } 11 | ] 12 | }, 13 | { 14 | "path": "op-stack/openrpc/types/blob/commitment.go", 15 | "bookmarks": [ 16 | { 17 | "line": 17, 18 | "column": 20, 19 | "label": "Create Blob commitment" 20 | } 21 | ] 22 | }, 23 | { 24 | "path": "op-stack/optimism/op-node/rollup/derive/calldata_source.go", 25 | "bookmarks": [ 26 | { 27 | "line": 125, 28 | "column": 17, 29 | "label": "Decode EVM data" 30 | } 31 | ] 32 | }, 33 | { 34 | "path": "op-stack/optimism/op-service/txmgr/txmgr.go", 35 | "bookmarks": [ 36 | { 37 | "line": 214, 38 | "column": 26, 39 | "label": "Send blob" 40 | } 41 | ] 42 | }, 43 | { 44 | "path": "contracts/blob-store/src/lib.rs", 45 | "bookmarks": [ 46 | { 47 | "line": 14, 48 | "column": 19, 49 | "label": "Blob store contract" 50 | } 51 | ] 52 | }, 53 | { 54 | "path": "op-stack/optimism-rs/src/driver/mod.rs", 55 | "bookmarks": [ 56 | { 57 | "line": 262, 58 | "column": 20, 59 | "label": "Decode EVM tx state" 60 | }, 61 | { 62 | "line": 270, 63 | "column": 19, 64 | "label": "From Decoded transactions" 65 | } 66 | ] 67 | }, 68 | { 69 | "path": "op-stack/optimism-rs/src/l1/mod.rs", 70 | "bookmarks": [ 71 | { 72 | "line": 427, 73 | "column": 11, 74 | "label": "New L1Info from block" 75 | }, 76 | { 77 | "line": 467, 78 | "column": 11, 79 | "label": "Filter tx data from evm" 80 | } 81 | ] 82 | }, 83 | { 84 | "path": "op-stack/optimism-rs/src/derive/stages/batcher_transactions.rs", 85 | "bookmarks": [ 86 | { 87 | "line": 20, 88 | "column": 9, 89 | "label": "Batcher Tx iterator" 90 | }, 91 | { 92 | "line": 46, 93 | "column": 49, 94 | "label": "Creating Batcher FrameData" 95 | }, 96 | { 97 | "line": 59, 98 | "column": 30, 99 | "label": "" 100 | } 101 | ] 102 | }, 103 | { 104 | "path": "op-stack/optimism/op-node/rollup/derive/channel_bank.go", 105 | "bookmarks": [ 106 | { 107 | "line": 141, 108 | "column": 26, 109 | "label": "Frame iterator" 110 | } 111 | ] 112 | } 113 | ] 114 | } -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | ## [0.4.0] - 2024-05-14 6 | 7 | ### Features 8 | 9 | - [**breaking**] Migrate to new http api 10 | - Race archival and base 11 | - Update TGAS for max tx size to be 20 12 | - Introduce settlement mode with pessimistic default for now 13 | - Upgrade cli to use blob structure and mode 14 | - Expose mode to the cli 15 | 16 | ### Miscellaneous Tasks 17 | 18 | - Auto changelog 19 | - Bump version and lints 20 | 21 | ### Testing 22 | 23 | - Migrate to a different test blockhash 24 | 25 | ## [0.3.0] - 2024-05-08 26 | 27 | ### Bug Fixes 28 | 29 | - Remove test for log 30 | - Localnet url 31 | - Removed hardcoded Testnet from NewConfig 32 | - Clear error on GetDAError 33 | - Import ffi_helpers Nullable trait 34 | - Remove unnecessary unsafe block 35 | - Ffi, ci for everything, mocks, tests 36 | 37 | ### Co-authored-by 38 | 39 | - Don <37594653+dndll@users.noreply.github.com> 40 | 41 | ### Documentation 42 | 43 | - Update readme 44 | - Add notes on nitro 45 | 46 | ### Features 47 | 48 | - Http server 49 | - Added darwin support for gopkg 50 | - Added api for new_client_file 51 | - Add free_client implementation in go 52 | - Localnet listening to an arbitrary loopback address (#91) 53 | - Replace localnet with customnet 54 | - Use urls instead of SocketAddr for custom 55 | - Add clear_error to da-rpc-sys 56 | - Add clear_error to header bindings 57 | - Remove erasure-commit DAS 58 | - [**breaking**] Remove namespace id in blob 59 | - Initial eth DA tracking contract 60 | - [**breaking**] Sidecar go module 61 | - Bump version to 0.3.0 62 | 63 | ### Miscellaneous Tasks 64 | 65 | - Remove bin directory from workspace 66 | - Remove optimism submodule 67 | - Remove cdk submodule 68 | - Remove cdk contracts submodule 69 | - Library linkage 70 | - Remove deserialization & return value 71 | - Use const slice ref for storage keys 72 | - Bump MSRV 73 | - CODEOWNERS 74 | - Create LICENSE 75 | - Move near-da-primitives out of blob store contract 76 | - Slight reuse 77 | - Actions and cleanup 78 | 79 | ### Refactor 80 | 81 | - Remove unnecessary some check on clear_error 82 | - Fix import order 83 | 84 | ### Testing 85 | 86 | - Add error clearing to error handling test 87 | - Remove unnecessary derefs 88 | - Remove unneeded clear since we take the err already 89 | - Add bypass flag for verification until LC is done 90 | 91 | ### Build 92 | 93 | - Lockfile 94 | - Http api docker image 95 | 96 | ## [0.2.3] - 2023-11-15 97 | 98 | ### Bug Fixes 99 | 100 | - Make sure errors aren't causing segfaults 101 | - Blobs are optional from the contract 102 | - Import math libs in go 103 | - Scripts were moved around 104 | - Make the network lowercase 105 | - Render in github 106 | - Builds for macos 107 | - Cargo build should be locked 108 | - Borsh has been updated and the lockfile wasn't force locked 109 | - This project builds binaries - lockfile committed 110 | - Cdk image should be tagged on rebuild 111 | - All the repos are public now - no need for access token 112 | - Commit the header file for libnear_da_rpc_sys 113 | 114 | ### Co-authored-by 115 | 116 | - Don <37594653+dndll@users.noreply.github.com> 117 | - Don <37594653+dndll@users.noreply.github.com> 118 | - Don <37594653+dndll@users.noreply.github.com> 119 | - Jacob 120 | 121 | ### Documentation 122 | 123 | - Readme and scripts 124 | - Update docs for readme 125 | - Arch class diagram for rpc 126 | - Use mermaid code blocks 127 | - Add system context 128 | - Fix render styling 129 | - Add note on fisherman actor 130 | - Add container diagram for optimism 131 | - Add architecture directory to the repository 132 | - Add how-to-integrate comment in the readme 133 | - Fix typo 134 | - Update commitment proposals 135 | 136 | ### Features 137 | 138 | - Update submodules to use DA over NEAR 139 | - Op-rpc with exposed ffi 140 | - Use shared primitives for client & contract 141 | - Ffi client reads 142 | - Generate bindings on build 143 | - Sys crate for go 144 | - Migrate ffi to a sys crate 145 | - Introduce a naive merkleization of commitment blobs 146 | - Use a number instead of unbounded bytes for namespaces 147 | - Allow a user to provide sk instead of a file 148 | - [**breaking**] Remove blobs from state 149 | - Light client failover 150 | - Expose module for near-op-rpc-sys 151 | - Utilise go module for ffi client 152 | - Near DA on polygon CDK 153 | - [**breaking**] Migrate naming to da-rpc 154 | - [**breaking**] Migrate go package to da-rpc-go 155 | - Optimize contract 156 | - Kzg commitments over rs encoded grids 157 | - Kzg codeword proof verification 158 | - Crate 159 | - Commit to columns individually 160 | - Commit to the root 161 | - [**breaking**] Convert witness points to affine 162 | 163 | ### Miscellaneous Tasks 164 | 165 | - Submodules 166 | - Add nix compat 167 | - Switch to near branch for openrpc 168 | - Remove njs for now 169 | - Move contract from near-openrpc to here 170 | - Bookmarks 171 | - Add op node to workspace 172 | - Go workspace 173 | - Submod update 174 | - Combing through magi 175 | - Don't override contracts release profile 176 | - Submodules 177 | - Submodules 178 | - Submodules 179 | - Bump rust version and use optimised resolver 180 | - Update light client submodule 181 | - Update LC 182 | - Update submodules 183 | - Scripts for deploying and building 184 | - Set toolchain to stable 185 | - Update submodule 186 | - Add another node to devnet 187 | - Use private repository from near for optimism 188 | - Remove openrpc 189 | - Remove CDK DA 190 | - Remove optimism-rs for now 191 | - Update submodule 192 | - Add how to get validium contracts image 193 | - Op-stack repository structure 194 | - Cdk stack repository structure 195 | - Submodules track main 196 | - Add tests for rust and go 197 | - Circumvent binstall 198 | - Update submodule for lc 199 | - Submodule 200 | - Fix CDK sequencer spam 201 | - Cdk submodule 202 | - Add time unit to readme for epoch 203 | - Unified dependencies 204 | - Fmt toml and rust 205 | - Remove point compression for now 206 | - Lints and fmt 207 | - Remove light-client submodule 208 | - Use unpublished version until audit 209 | - Publish images 210 | - Mv gopkg so go can read it 211 | 212 | ### Refactor 213 | 214 | - Get_all returns all blobs for a namespace 215 | 216 | ### Testing 217 | 218 | - Compile the contract at test time 219 | - Add kzg from g1 test 220 | - Ignore integration tests 221 | 222 | ### Build 223 | 224 | - Add makefile for building optimised contract 225 | - Dockerfile for op-rpc 226 | - Use light client in devnet docker 227 | - Create makefile entry to push images to the artifact reg 228 | - Update version 229 | 230 | ### Wip 231 | 232 | - Contract flat storage 233 | 234 | 235 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @dndll 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [profile.release] 2 | codegen-units = 1 3 | debug = false 4 | lto = true 5 | opt-level = "z" 6 | overflow-checks = true 7 | panic = "abort" 8 | 9 | # [profile.dev] 10 | # debug = 0 11 | 12 | [workspace.package] 13 | authors = [ "Pagoda " ] 14 | edition = "2021" 15 | version = "0.4.0" 16 | 17 | [workspace] 18 | members = [ "bin/*", "crates/*", "contracts/*" ] 19 | resolver = "2" 20 | 21 | [workspace.dependencies] 22 | # Nostd 23 | borsh = { version = "1.4", default-features = false } 24 | serde = { version = "1.0", default-features = false, features = [ "derive" ] } 25 | serde_with = { version = "3.4", default-features = false, features = [ "hex", "base64", "macros" ] } 26 | 27 | # Std aware 28 | async-trait = "0.1" 29 | eyre = "0.6" 30 | futures = "0.3" 31 | futures-util = "*" 32 | itertools = "*" 33 | hex = "0.4" 34 | log = "0.4" 35 | rand = "0.8" 36 | serde_json = "1.0" 37 | tracing = "0.1" 38 | tracing-subscriber = { version = "0.3", features = [ "env-filter" ] } 39 | 40 | # NEAR 41 | near-crypto = "0.21" 42 | near-jsonrpc-client = "0.9" 43 | near-jsonrpc-primitives = "0.21" 44 | near-primitives = "0.21" 45 | near-sdk = "4.0.0" 46 | 47 | [patch.crates-io] 48 | parity-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } 49 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 NEAR 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TAG_PREFIX := us-docker.pkg.dev/pagoda-solutions-dev/rollup-data-availability 2 | IMAGE_TAG := 0.3.0 3 | 4 | #? format: format codes 5 | format: 6 | taplo format 7 | cargo fmt --all 8 | 9 | #? submodules: update submodules 10 | submodules: 11 | git submodule update --init --recursive 12 | .PHONY: submodules 13 | 14 | make pull-submodules: 15 | git pull --recurse-submodules 16 | .PHONY: pull-submodules 17 | 18 | #? build-contracts: create the blob store contract 19 | build-contracts: 20 | cargo build --package near-da-blob-store --target wasm32-unknown-unknown --release 21 | 22 | #? test-contracts: create the blob store contract and run tests 23 | test-contracts: build-contracts 24 | cargo test --package near-da-blob-store --test tests -- --nocapture 25 | .PHONY: test-contracts 26 | 27 | #? deploy-contracts: deploy the near-da-blob-store contract to the NEAR testnet 28 | deploy-contracts: 29 | near contract deploy $$NEAR_CONTRACT use-file ./target/wasm32-unknown-unknown/release/near_da_blob_store.wasm with-init-call new json-args {} network-config testnet sign-with-keychain 30 | 31 | da-rpc-sys: 32 | make -C ./crates/da-rpc-sys 33 | .PHONY: da-rpc-sys 34 | 35 | #? da-rpc-docker: build docker image 36 | da-rpc-docker: 37 | make -C ./crates/da-rpc-sys docker TAG_PREFIX=$(TAG_PREFIX) IMAGE_TAG=$(IMAGE_TAG) 38 | .PHONY: da-rpc-docker 39 | 40 | #? da-rpc-sys-unix: copy the compiled da-rpc library from the Docker image to the local filesystem 41 | da-rpc-sys-unix: 42 | docker rm dummy 43 | docker create --name dummy $(TAG_PREFIX)/da-rpc:$(IMAGE_TAG) 44 | docker cp dummy:/gopkg/da-rpc/lib ./gopkg/da-rpc/lib 45 | docker rm -f dummy 46 | .PHONY: da-rpc-sys-unix 47 | 48 | #? cdk-images: pull and tag the cdk-validium-contracts and cdk-validium-node Docker images 49 | cdk-images: 50 | # TODO: when we have public images docker pull "$(TAG_PREFIX)/cdk-validium-contracts:$(IMAGE_TAG)" 51 | docker pull ghcr.io/dndll/cdk-validium-contracts:latest 52 | docker tag ghcr.io/dndll/cdk-validium-contracts:latest "$(TAG_PREFIX)/cdk-validium-contracts:$(IMAGE_TAG)" 53 | $(COMMAND) $(TAG_PREFIX)/cdk-validium-node:latest -f cdk-stack/cdk-validium-node/Dockerfile cdk-stack/cdk-validium-node 54 | docker tag $(TAG_PREFIX)/cdk-validium-node:latest cdk-validium-node 55 | 56 | #? cdk-devnet-up: start the cdk-validium-node development network and explorer 57 | cdk-devnet-up: 58 | make -C ./cdk-stack/cdk-validium-node/test run run-explorer 59 | .PHONY: cdk-devnet-up 60 | 61 | #? cdk-devnet-down: stop the cdk-validium-node development network 62 | cdk-devnet-down: 63 | make -C ./cdk-stack/cdk-validium-node/test stop 64 | .PHONY: cdk-devnet-up 65 | 66 | #? cdk-node: build the cdk-validium-node 67 | cdk-node: 68 | make -C ./cdk-stack/cdk-validium-node build 69 | .PHONY: cdk-node 70 | 71 | #? send-cdk-transfers: run ERC20 transfers script 72 | send-cdk-transfers: 73 | cd cdk-stack/cdk-validium-node/test/benchmarks/sequencer/scripts/erc20-transfers && go run main.go 74 | .PHONY: send-cdk-transfers 75 | 76 | #? cdk-devnet-redeploy-test: build and start the cdk-validium-node development network, then test ERC20 transfers script 77 | cdk-devnet-redeploy-test: cdk-images cdk-devnet-up send-cdk-transfers 78 | .PHONY: cdk-devnet-redeploy-test 79 | 80 | #? help: get this help message 81 | help: Makefile 82 | @echo " Choose a command to run:" 83 | @sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /' 84 | .PHONY: help -------------------------------------------------------------------------------- /api/bruno.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1", 3 | "name": "api", 4 | "type": "collection", 5 | "presets": { 6 | "requestType": "http", 7 | "requestUrl": "{{baseUrl}}" 8 | }, 9 | "ignore": [ 10 | "bin", 11 | "target", 12 | ".git", 13 | ".devenv", 14 | "crates", 15 | "build" 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /api/environments/local.bru: -------------------------------------------------------------------------------- 1 | vars { 2 | baseUrl: http://localhost:5888 3 | } 4 | -------------------------------------------------------------------------------- /api/plasma-get.bru: -------------------------------------------------------------------------------- 1 | meta { 2 | name: Plasma: get 3 | type: http 4 | seq: 6 5 | } 6 | 7 | get { 8 | url: {{baseUrl}}/plasma/get/{{tx}} 9 | body: none 10 | auth: none 11 | } 12 | 13 | vars:pre-request { 14 | tx: 0x0f0a6be581f3916d5804f61e000b46477f7ae422362f751090189703bd490047 15 | } 16 | -------------------------------------------------------------------------------- /api/plasma-put.bru: -------------------------------------------------------------------------------- 1 | meta { 2 | name: Plasma: put 3 | type: http 4 | seq: 6 5 | } 6 | 7 | post { 8 | url: {{baseUrl}}/plasma/put 9 | body: text 10 | auth: none 11 | } 12 | 13 | headers { 14 | Content-Type: application/octet-stream 15 | } 16 | 17 | body:text { 18 | 123 19 | } 20 | -------------------------------------------------------------------------------- /bin/cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors.workspace = true 3 | edition.workspace = true 4 | name = "near-da-cli" 5 | version.workspace = true 6 | 7 | [dependencies] 8 | anyhow = "1.0.75" 9 | clap = { version = "4.4", features = [ "derive" ] } 10 | hex = "0.4.3" 11 | near-da-http-api-data = { path = "../../crates/http-api-data" } 12 | near-da-primitives = { path = "../../crates/primitives" } 13 | near-da-rpc = { path = "../../crates/da-rpc" } 14 | serde = "1" 15 | serde_json = "1.0.108" 16 | tokio = { version = "1", features = [ "full" ] } 17 | tower-http = { version = "0.4", features = [ "trace" ] } 18 | tracing-subscriber.workspace = true 19 | tracing.workspace = true 20 | -------------------------------------------------------------------------------- /bin/cli/README.md: -------------------------------------------------------------------------------- 1 | # CLI 2 | 3 | This is a CLI for interacting with the NEAR DA smart contract. It allows you to configure the client, get blobs, and submit blobs to the contract. 4 | 5 | ## Usage 6 | 7 | ### Configure 8 | 9 | Provide a `da_config.json` file with the following contents: 10 | ```json 11 | { 12 | "account_id": "throwayaccount.testnet", 13 | "secret_key": "ed25519:zmF3hHyozS6sEutTSHep1ZS51E8B5pybAJt1yvVaFe9DWNTbXwtRYv4AQ5xAvXJFpqggMPtbdP3MkKViswbYc29", 14 | "contract_id": "throwayaccount.testnet", 15 | "network": "Testnet", 16 | "namespace": { 17 | "version": 1, 18 | "id": 1 19 | } 20 | } 21 | ``` 22 | 23 | Alternatively, you can use the `--account-id`, `--secret-key`, `--contract-id`, `--network`, and `--namespace` flags to configure the client. 24 | 25 | ## Commands 26 | 27 | ### Submit 28 | 29 | ```sh 30 | $ cargo run --bin near-da-cli submit 31 | ``` 32 | ### Get 33 | 34 | ```sh 35 | cargo run --bin near-da-cli get 36 | ``` 37 | -------------------------------------------------------------------------------- /bin/cli/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap; 2 | use clap::{command, Parser}; 3 | use near_da_http_api_data::ConfigureClientRequest; 4 | use near_da_primitives::Mode; 5 | use near_da_rpc::near::config::Config; 6 | use near_da_rpc::near::Client; 7 | use near_da_rpc::{CryptoHash, DataAvailability}; 8 | use serde::{Deserialize, Serialize}; 9 | use std::fmt::Display as FmtDisplay; 10 | use std::str; 11 | use tracing::debug; 12 | 13 | #[derive(Parser, Debug)] 14 | #[command(author, version, about, long_about = None)] 15 | struct Args { 16 | #[clap( 17 | short = 'c', 18 | long = "config", 19 | help = "Path to the client configuration. If not specified, the client can be configured via PUT /config after starting the server.", 20 | default_value = "./bin/cli/da_config.json" 21 | )] 22 | config: Option, 23 | #[command(subcommand)] 24 | command: Commands, 25 | #[clap(short, long)] 26 | mode: Option, 27 | } 28 | struct AppState { 29 | client: Option, 30 | } 31 | 32 | fn config_request_to_config(request: ConfigureClientRequest) -> Result { 33 | Ok(Config { 34 | key: near_da_rpc::near::config::KeyType::SecretKey(request.account_id, request.secret_key), 35 | contract: request.contract_id, 36 | network: request 37 | .network 38 | .as_str() 39 | .try_into() 40 | .map_err(|e: String| anyhow::anyhow!(e))?, 41 | namespace: request 42 | .namespace 43 | .map(|ns| near_da_primitives::Namespace::new(ns.version, ns.id)), 44 | mode: request.mode.unwrap_or_default(), 45 | }) 46 | } 47 | 48 | fn hex_to_bytes(hex: String) -> Result, anyhow::Error> { 49 | let bytes = hex::decode(hex)?; 50 | Ok(bytes) 51 | } 52 | 53 | #[derive(Parser, Debug)] 54 | enum Commands { 55 | Submit(SubmitArgs), 56 | Get(GetArgs), 57 | } 58 | 59 | #[derive(Parser, Debug, Serialize, Deserialize)] 60 | struct SubmitArgs { 61 | pub data: String, 62 | } 63 | 64 | #[derive(Parser, Debug)] 65 | struct GetArgs { 66 | pub transaction_id: String, 67 | } 68 | 69 | struct AppError(anyhow::Error); 70 | 71 | impl From for AppError 72 | where 73 | E: Into, 74 | { 75 | fn from(err: E) -> Self { 76 | Self(err.into()) 77 | } 78 | } 79 | 80 | impl FmtDisplay for AppError { 81 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 82 | write!(f, "{}", self.0) 83 | } 84 | } 85 | 86 | async fn submit_blob( 87 | state: AppState, 88 | submit_args: SubmitArgs, 89 | ) -> anyhow::Result { 90 | debug!("submitting blob: {:?}", submit_args); 91 | let client = state 92 | .client 93 | .as_ref() 94 | .ok_or(anyhow::anyhow!("client is not configured"))?; 95 | let data = hex_to_bytes(submit_args.data)?; 96 | let blob_ref = client 97 | .submit(near_da_primitives::Blob::new(data)) 98 | .await 99 | .map_err(|e| anyhow::anyhow!("failed to submit blobs: {}", e))? 100 | .0; 101 | let transaction_id = CryptoHash(blob_ref.transaction_id); 102 | Ok(transaction_id) 103 | } 104 | 105 | async fn get_blob( 106 | state: AppState, 107 | get_args: GetArgs, 108 | ) -> anyhow::Result { 109 | let client = state 110 | .client 111 | .as_ref() 112 | .ok_or(anyhow::anyhow!("client is not configured"))?; 113 | 114 | let blob = client 115 | .get( 116 | get_args 117 | .transaction_id 118 | .parse() 119 | .map_err(|e| anyhow::anyhow!("invalid transaction id: {}", e))?, 120 | ) 121 | .await 122 | .map_err(|e| anyhow::anyhow!("failed to get blob: {}", e))? 123 | .0; 124 | 125 | let blob = near_da_http_api_data::Blob { data: blob.data }; 126 | 127 | Ok(blob) 128 | } 129 | 130 | #[tokio::main] 131 | async fn main() { 132 | tracing_subscriber::fmt() 133 | .with_target(false) 134 | .compact() 135 | .init(); 136 | 137 | let args = Args::parse(); 138 | let mut state = AppState { client: None }; 139 | 140 | if let Some(path) = args.config { 141 | let file_contents = tokio::fs::read_to_string(path).await.unwrap(); 142 | let config_parse = serde_json::from_str::(&file_contents) 143 | .unwrap_or_else(|e| panic!("failed to parse config: {}", e)); 144 | state.client = Some(Client::new( 145 | &config_request_to_config(config_parse).unwrap(), 146 | )); 147 | } 148 | 149 | match args.command { 150 | Commands::Submit(submit) => match submit_blob(state, submit).await { 151 | Ok(result) => println!("{:?}", result), 152 | Err(e) => println!("{}", e), 153 | }, 154 | Commands::Get(get) => match get_blob(state, get).await { 155 | Ok(blob) => println!("{:?}", blob), 156 | Err(e) => println!("{}", e), 157 | }, 158 | }; 159 | } 160 | -------------------------------------------------------------------------------- /bin/sidecar/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors.workspace = true 3 | edition.workspace = true 4 | name = "near-da-sidecar" 5 | version.workspace = true 6 | 7 | [dependencies] 8 | anyhow = "1.0.75" 9 | axum = "0.6" 10 | clap = { version = "4.4", features = [ "derive" ] } 11 | futures-util.workspace = true 12 | futures.workspace = true 13 | hex.workspace = true 14 | itertools.workspace = true 15 | moka = { version = "*", features = [ "future" ] } 16 | near-da-http-api-data = { path = "../../crates/http-api-data" } 17 | near-da-primitives = { path = "../../crates/primitives" } 18 | near-da-rpc = { path = "../../crates/da-rpc" } 19 | serde = "1" 20 | serde_json = "1.0.108" 21 | tokio = { version = "1", features = [ "full" ] } 22 | tower = "0.4" 23 | tower-http = { version = "0.4", features = [ "trace", "normalize-path" ] } 24 | tracing-subscriber.workspace = true 25 | tracing.workspace = true 26 | -------------------------------------------------------------------------------- /bin/sidecar/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef 2 | WORKDIR /near 3 | 4 | FROM chef AS planner 5 | COPY . . 6 | RUN cargo chef prepare --recipe-path recipe.json 7 | 8 | FROM chef AS builder 9 | COPY --from=planner /near/recipe.json recipe.json 10 | RUN cargo chef cook --release --recipe-path recipe.json 11 | 12 | # Build application 13 | COPY . . 14 | RUN cargo build --release --bin near-da-sidecar 15 | 16 | RUN ldd target/release/near-da-sidecar 17 | 18 | # We do not need the Rust toolchain to run the binary! 19 | FROM debian:bookworm-slim AS runtime 20 | 21 | LABEL org.opencontainers.image.source https://github.com/nuffle-labs/data-availability 22 | 23 | WORKDIR /app 24 | 25 | RUN apt-get update && apt-get install -y \ 26 | ca-certificates 27 | 28 | COPY --from=builder /near/target/release/near-da-sidecar /usr/local/bin 29 | ENV RUST_LOG=info 30 | ENTRYPOINT ["/usr/local/bin/near-da-sidecar"] 31 | -------------------------------------------------------------------------------- /bin/sidecar/src/main.rs: -------------------------------------------------------------------------------- 1 | use axum::{ 2 | body::{boxed, StreamBody}, 3 | extract::{Query, State}, 4 | http::StatusCode, 5 | response::{IntoResponse, Json, Response}, 6 | routing, Router, ServiceExt, 7 | }; 8 | use clap::Parser; 9 | use futures_util::stream::{self, StreamExt}; 10 | use moka::future::Cache; 11 | use near_da_http_api_data::ConfigureClientRequest; 12 | use near_da_rpc::{ 13 | near::{config::Config, Client}, 14 | Blob, BlobRef, CryptoHash, DataAvailability, 15 | }; 16 | use std::{net::SocketAddr, path::PathBuf, sync::Arc}; 17 | use tokio::sync::RwLock; 18 | use tower::Layer; 19 | use tower_http::{ 20 | classify::ServerErrorsFailureClass, 21 | normalize_path::NormalizePathLayer, 22 | trace::{self, TraceLayer}, 23 | }; 24 | use tracing::{debug, Level}; 25 | use tracing_subscriber::EnvFilter; 26 | 27 | mod plasma; 28 | 29 | pub type Result = anyhow::Result; 30 | 31 | #[derive(Parser, Debug)] 32 | #[command(author, version, about, long_about = None)] 33 | struct CliArgs { 34 | /// Run server on port. 35 | #[arg(short, long, default_value_t = 5888)] 36 | port: u16, 37 | 38 | /// Path to the client configuration. If not specified, the client can be 39 | /// configured via PUT /config after starting the server. 40 | #[arg(short, long)] 41 | config: Option, 42 | } 43 | 44 | /// Represents the application's state. 45 | struct AppState { 46 | /// An optional HTTP client for making network requests. 47 | client: Option, 48 | /// A cache for storing and retrieving data using cryptographic hashes as keys. 49 | /// TODO: choose a faster cache key implementation. 50 | cache: Cache, 51 | should_cache: bool, 52 | } 53 | 54 | fn config_request_to_client_config(request: ConfigureClientRequest) -> Result { 55 | Ok(Config { 56 | key: near_da_rpc::near::config::KeyType::SecretKey(request.account_id, request.secret_key), 57 | contract: request.contract_id, 58 | network: request 59 | .network 60 | .as_str() 61 | .try_into() 62 | .map_err(|e: String| anyhow::anyhow!(e))?, 63 | namespace: request 64 | .namespace 65 | .map(|ns| near_da_primitives::Namespace::new(ns.version, ns.id)), 66 | mode: request.mode.unwrap_or_default(), 67 | }) 68 | } 69 | 70 | async fn configure_client( 71 | State(state): State>>, 72 | Json(request): Json, 73 | ) -> anyhow::Result<(), AppError> { 74 | debug!("client configuration request: {:?}", request); 75 | 76 | tracing::info!("client configuration set: {:?}", request); 77 | 78 | let mut state = state.write().await; 79 | 80 | state.should_cache = request.should_cache; 81 | if !request.should_cache { 82 | state.cache.invalidate_all(); 83 | } 84 | 85 | let client = Client::new(&config_request_to_client_config(request)?); 86 | state.client = Some(client); 87 | 88 | Ok(()) 89 | } 90 | 91 | async fn get( 92 | State(state): State>>, 93 | Query(request): Query, 94 | ) -> anyhow::Result, AppError> { 95 | debug!("getting blob: {:?}", request); 96 | let app_state = state.read().await; 97 | let client = app_state 98 | .client 99 | .as_ref() 100 | .ok_or(anyhow::anyhow!("client is not configured"))?; 101 | 102 | let blob = client 103 | .get(CryptoHash(request.transaction_id)) 104 | .await 105 | .map_err(|e| anyhow::anyhow!("failed to get blob: {}", e))? 106 | .0; 107 | 108 | let blob = near_da_http_api_data::Blob { data: blob.data }; 109 | 110 | Ok(Json(blob)) 111 | } 112 | 113 | async fn submit( 114 | State(state): State>>, 115 | Json(request): Json, 116 | ) -> anyhow::Result, AppError> { 117 | debug!("submitting blob: {:?}", request); 118 | let app_state = state.read().await; 119 | 120 | let blob_hash = CryptoHash::hash_bytes(request.data.as_slice()); 121 | 122 | let blob_ref = if app_state.should_cache { 123 | app_state.cache.get(&blob_hash).await.map(|blob_ref| { 124 | debug!("blob is cached, returning: {:?}", blob_ref); 125 | blob_ref 126 | }) 127 | } else { 128 | None 129 | }; 130 | let blob_ref = if let Some(blob_ref) = blob_ref { 131 | blob_ref 132 | } else { 133 | let client = app_state 134 | .client 135 | .as_ref() 136 | .ok_or(anyhow::anyhow!("client is not configured"))?; 137 | 138 | let blob_ref = client 139 | .submit(near_da_primitives::Blob::new(request.data)) 140 | .await 141 | .map_err(|e| anyhow::anyhow!("failed to submit blobs: {}", e))? 142 | .0; 143 | 144 | debug!( 145 | "submit_blob result: {:?}, caching hash {blob_hash}", 146 | hex::encode(blob_ref.transaction_id) 147 | ); 148 | 149 | if app_state.should_cache { 150 | debug!("caching {blob_hash}"); 151 | app_state.cache.insert(blob_hash, blob_ref.clone()).await; 152 | } 153 | blob_ref 154 | }; 155 | Ok(blob_ref.into()) 156 | } 157 | 158 | pub(crate) fn stream_response + Send + Sync + 'static>( 159 | chunk: T, 160 | ) -> Response { 161 | let s = stream::iter([chunk]).map(|r| Ok::<_, anyhow::Error>(r)); 162 | Response::builder() 163 | .header("Content-Type", "application/octet-stream") 164 | .body(boxed(StreamBody::new(s))) 165 | .unwrap() 166 | } 167 | 168 | // https://github.com/tokio-rs/axum/blob/d7258bf009194cf2f242694e673759d1dbf8cfc0/examples/anyhow-error-response/src/main.rs#L34-L57 169 | struct AppError(pub anyhow::Error); 170 | 171 | impl IntoResponse for AppError { 172 | fn into_response(self) -> Response { 173 | tracing::error!("{}", self.0); 174 | ( 175 | StatusCode::INTERNAL_SERVER_ERROR, 176 | format!("something went wrong: {}", self.0), 177 | ) 178 | .into_response() 179 | } 180 | } 181 | 182 | impl From for AppError 183 | where 184 | E: Into, 185 | { 186 | fn from(err: E) -> Self { 187 | Self(err.into()) 188 | } 189 | } 190 | 191 | #[tokio::main] 192 | async fn main() { 193 | let args = CliArgs::parse(); 194 | 195 | tracing_subscriber::fmt() 196 | .with_target(false) 197 | .with_env_filter(EnvFilter::from_default_env()) 198 | .compact() 199 | .init(); 200 | 201 | let mut state = AppState { 202 | client: None, 203 | cache: Cache::new(2048), // (32 * 2) * 2048 = 128kb 204 | should_cache: true, 205 | }; 206 | 207 | if let Some(path) = args.config { 208 | let file_contents = tokio::fs::read_to_string(path).await.unwrap(); 209 | let config_parse = serde_json::from_str::(&file_contents) 210 | .unwrap_or_else(|e| panic!("failed to parse config: {}", e)); 211 | state.client = Some(Client::new( 212 | &config_request_to_client_config(config_parse).unwrap(), 213 | )); 214 | } 215 | 216 | let state = Arc::new(RwLock::new(state)); 217 | 218 | let router = Router::new() 219 | .route("/health", routing::get(|| async { "" })) 220 | .route("/configure", routing::put(configure_client)) 221 | .route("/blob", routing::get(get)) 222 | .route("/blob", routing::post(submit)) 223 | .route("/plasma/get/:transaction_id", routing::get(plasma::get)) 224 | .route("/plasma/put", routing::post(plasma::submit)) 225 | .with_state(state) 226 | .layer( 227 | TraceLayer::new_for_http() 228 | .on_failure(trace::DefaultOnFailure::new().level(Level::WARN)) 229 | .on_failure(|_error: ServerErrorsFailureClass, _latency, _request: &_| { 230 | tracing::warn!("request failed {:?}", _error); 231 | }) 232 | .make_span_with(trace::DefaultMakeSpan::new().level(Level::INFO)) 233 | .on_response(trace::DefaultOnResponse::new().level(Level::INFO)), 234 | ); 235 | let router_normalized = NormalizePathLayer::trim_trailing_slash().layer(router); 236 | 237 | let addr = SocketAddr::from(([0; 4], args.port)); 238 | tracing::info!("listening on {}", addr); 239 | 240 | axum::Server::bind(&addr) 241 | .serve(router_normalized.into_make_service()) 242 | .await 243 | .unwrap(); 244 | } 245 | 246 | #[cfg(test)] 247 | mod tests { 248 | use near_da_primitives::Mode; 249 | 250 | use super::*; 251 | 252 | // #[test] 253 | // fn test_config_request_to_config() { 254 | // let request = ConfigureClientRequest { 255 | // account_id: "account_id".to_string(), 256 | // secret_key: "secret_key".to_string(), 257 | // contract_id: "contract_id".to_string(), 258 | // network: "mainnet".to_string(), 259 | // namespace: Some(NamespaceRequest { 260 | // version: 1, 261 | // id: "namespace_id".to_string(), 262 | // }), 263 | // mode: Some(Mode::Default), 264 | // }; 265 | 266 | // let config = config_request_to_config(request).unwrap(); 267 | 268 | // assert_eq!( 269 | // config.key, 270 | // near_da_rpc::near::config::KeyType::SecretKey( 271 | // "account_id".to_string(), 272 | // "secret_key".to_string() 273 | // ) 274 | // ); 275 | // assert_eq!(config.contract, "contract_id".to_string()); 276 | // assert_eq!(config.network, near_da_rpc::near::config::Network::Mainnet); 277 | // assert_eq!( 278 | // config.namespace, 279 | // Some(near_da_primitives::Namespace::new( 280 | // 1, 281 | // "namespace_id".to_string() 282 | // )) 283 | // ); 284 | // assert_eq!(config.mode, Mode::default()); 285 | // } 286 | 287 | // #[test] 288 | // fn test_config_request_to_config_success() { 289 | // let request = ConfigureClientRequest { 290 | // account_id: "account_id".to_string(), 291 | // secret_key: "secret_key".to_string(), 292 | // contract_id: "contract_id".to_string(), 293 | // network: "mainnet".to_string(), 294 | // namespace: Some(NamespaceRequest { 295 | // version: 1, 296 | // id: "namespace_id".to_string(), 297 | // }), 298 | // mode: Some(Mode::Default), 299 | // }; 300 | 301 | // let config = config_request_to_config(request).unwrap(); 302 | 303 | // assert_eq!( 304 | // config.key, 305 | // near_da_rpc::near::config::KeyType::SecretKey( 306 | // "account_id".to_string(), 307 | // "secret_key".to_string() 308 | // ) 309 | // ); 310 | // assert_eq!(config.contract, "contract_id".to_string()); 311 | // assert_eq!(config.network, near_da_rpc::near::config::Network::Mainnet); 312 | // assert_eq!( 313 | // config.namespace, 314 | // Some(near_da_primitives::Namespace::new( 315 | // 1, 316 | // "namespace_id".to_string() 317 | // )) 318 | // ); 319 | // assert_eq!(config.mode, Mode::default()); 320 | // } 321 | 322 | #[test] 323 | fn test_config_request_to_config_default_mode() { 324 | let request = ConfigureClientRequest { 325 | account_id: "account_id".to_string(), 326 | secret_key: "secret_key".to_string(), 327 | contract_id: "contract_id".to_string(), 328 | network: "mainnet".to_string(), 329 | namespace: None, 330 | mode: None, 331 | should_cache: false, 332 | }; 333 | 334 | let config = config_request_to_client_config(request).unwrap(); 335 | 336 | assert_eq!(config.mode, Mode::default()); 337 | } 338 | 339 | #[test] 340 | fn test_config_request_to_config_invalid_network() { 341 | let request = ConfigureClientRequest { 342 | account_id: "account_id".to_string(), 343 | secret_key: "secret_key".to_string(), 344 | contract_id: "contract_id".to_string(), 345 | network: "invalid_network".to_string(), 346 | namespace: None, 347 | mode: None, 348 | should_cache: false, 349 | }; 350 | 351 | let result = config_request_to_client_config(request); 352 | 353 | assert!(result.is_err()); 354 | } 355 | } 356 | -------------------------------------------------------------------------------- /bin/sidecar/src/plasma.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use axum::{ 3 | extract::{BodyStream, Path, Query, State}, 4 | response::Response, 5 | }; 6 | use futures_util::stream::StreamExt; 7 | use itertools::Itertools; 8 | use near_da_rpc::{Blob, BlobRef}; 9 | use std::sync::Arc; 10 | use tokio::sync::RwLock; 11 | 12 | use crate::{stream_response, AppError, AppState}; 13 | 14 | // https://github.com/ethereum-optimism/specs/discussions/135 15 | pub const DA_SELECTOR: u8 = 0x6e; 16 | // https://github.com/ethereum-optimism/optimism/blob/457f33f4fdda9373dcf2839619ebf67182ee5057/op-plasma/commitment.go#L37 17 | pub const OP_PLASMA_GENERIC_COMMITMENT: u8 = 1; 18 | 19 | pub fn strip_plasma_bytes(bytes: Vec) -> super::Result> { 20 | bytes 21 | .strip_prefix(&[OP_PLASMA_GENERIC_COMMITMENT]) 22 | .ok_or_else(|| anyhow!("invalid plasma commitment")) 23 | .and_then(|stripped| { 24 | stripped 25 | .strip_prefix(&[DA_SELECTOR]) 26 | .ok_or_else(|| anyhow!("invalid DA selector, should be {DA_SELECTOR}")) 27 | }) 28 | .map(Into::into) 29 | } 30 | 31 | pub fn append_plasma_bytes(mut bytes: Vec) -> Vec { 32 | bytes.insert(0, DA_SELECTOR); 33 | bytes.insert(0, OP_PLASMA_GENERIC_COMMITMENT); 34 | bytes 35 | } 36 | 37 | pub(crate) async fn get( 38 | State(state): State>>, 39 | Path(request): Path, 40 | ) -> Result { 41 | let commitments = hex::decode(request.strip_prefix("0x").unwrap_or(&request))?; 42 | let commitments = strip_plasma_bytes(commitments)?; 43 | 44 | // Commitment can be chunks of 32 byte hashes for larger blobs 45 | if commitments.len() % 32 != 0 { 46 | return Err(anyhow::anyhow!("invalid commitment").into()); 47 | } 48 | 49 | let refs = commitments 50 | .chunks(32) 51 | .map(TryInto::<[u8; 32]>::try_into) 52 | .map(|tx| BlobRef::from(tx.unwrap())) 53 | .collect_vec(); 54 | 55 | let mut data = vec![]; 56 | for blob_ref in refs { 57 | data.extend_from_slice( 58 | &super::get(State(state.clone()), Query(blob_ref)) 59 | .await? 60 | .data, 61 | ); 62 | } 63 | 64 | Ok(stream_response(data)) 65 | } 66 | 67 | pub(crate) async fn submit( 68 | State(state): State>>, 69 | mut stream: BodyStream, 70 | ) -> Result { 71 | let mut chunks = vec![]; 72 | while let Some(chunk) = stream.next().await { 73 | chunks.extend_from_slice(&chunk?[..]) 74 | } 75 | 76 | let commitments = super::submit(State(state), Blob::new(chunks).into()) 77 | .await 78 | .map(|r| r.transaction_id.to_vec())?; 79 | let commitments = append_plasma_bytes(commitments); 80 | 81 | Ok(stream_response(commitments)) 82 | } 83 | 84 | #[cfg(test)] 85 | mod tests { 86 | use super::*; 87 | 88 | #[test] 89 | fn test_strip_plasma_bytes() { 90 | let bytes = vec![OP_PLASMA_GENERIC_COMMITMENT, DA_SELECTOR, 1, 2, 3]; 91 | let expected = vec![1, 2, 3]; 92 | assert_eq!(strip_plasma_bytes(bytes).unwrap(), expected); 93 | } 94 | 95 | #[test] 96 | fn test_strip_plasma_bytes_invalid_commitment() { 97 | let bytes = vec![0, DA_SELECTOR, 1, 2, 3]; 98 | assert!(strip_plasma_bytes(bytes).is_err()); 99 | } 100 | 101 | #[test] 102 | fn test_strip_plasma_bytes_invalid_selector() { 103 | let bytes = vec![OP_PLASMA_GENERIC_COMMITMENT, 0, 1, 2, 3]; 104 | assert!(strip_plasma_bytes(bytes).is_err()); 105 | } 106 | 107 | #[test] 108 | fn test_append_plasma_bytes() { 109 | let bytes = vec![1, 2, 3]; 110 | let expected = vec![OP_PLASMA_GENERIC_COMMITMENT, DA_SELECTOR, 1, 2, 3]; 111 | assert_eq!(append_plasma_bytes(bytes), expected); 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | # git-cliff ~ default configuration file 2 | # https://git-cliff.org/docs/configuration 3 | # 4 | # Lines starting with "#" are comments. 5 | # Configuration options are organized into tables and keys. 6 | # See documentation for more information on available options. 7 | 8 | [changelog] 9 | # changelog header 10 | header = """ 11 | # Changelog\n 12 | All notable changes to this project will be documented in this file.\n 13 | """ 14 | # template for the changelog body 15 | # https://keats.github.io/tera/docs/#introduction 16 | body = """ 17 | {% if version %}\ 18 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 19 | {% else %}\ 20 | ## [unreleased] 21 | {% endif %}\ 22 | {% for group, commits in commits | group_by(attribute="group") %} 23 | ### {{ group | upper_first }} 24 | {% for commit in commits %} 25 | - {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }}\ 26 | {% endfor %} 27 | {% endfor %}\n 28 | """ 29 | # remove the leading and trailing whitespace from the template 30 | trim = true 31 | # changelog footer 32 | footer = """ 33 | 34 | """ 35 | # postprocessors 36 | postprocessors = [ 37 | # { pattern = '', replace = "https://github.com/orhun/git-cliff" }, # replace repository URL 38 | ] 39 | [git] 40 | # parse the commits based on https://www.conventionalcommits.org 41 | conventional_commits = true 42 | # filter out the commits that are not conventional 43 | filter_unconventional = true 44 | # process each line of a commit as an individual commit 45 | split_commits = true 46 | # regex for preprocessing the commit messages 47 | commit_preprocessors = [ 48 | # { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, # replace issue numbers 49 | ] 50 | # regex for parsing and grouping commits 51 | commit_parsers = [ 52 | { message = "^feat", group = "Features" }, 53 | { message = "^fix", group = "Bug Fixes" }, 54 | { message = "^doc", group = "Documentation" }, 55 | { message = "^perf", group = "Performance" }, 56 | { message = "^refactor", group = "Refactor" }, 57 | { message = "^style", group = "Styling" }, 58 | { message = "^test", group = "Testing" }, 59 | { message = "^chore\\(release\\): prepare for", skip = true }, 60 | { message = "^chore\\(deps\\)", skip = true }, 61 | { message = "^chore\\(pr\\)", skip = true }, 62 | { message = "^chore\\(pull\\)", skip = true }, 63 | { message = "^chore|ci", group = "Miscellaneous Tasks" }, 64 | { body = ".*security", group = "Security" }, 65 | { message = "^revert", group = "Revert" }, 66 | ] 67 | # protect breaking changes from being skipped due to matching a skipping commit_parser 68 | protect_breaking_commits = true 69 | # filter out the commits that are not matched by commit parsers 70 | filter_commits = false 71 | # regex for matching git tags 72 | tag_pattern = "v[0-9].*" 73 | 74 | # regex for skipping tags 75 | skip_tags = "v0.1.0-beta.1" 76 | # regex for ignoring tags 77 | ignore_tags = "" 78 | # sort the tags topologically 79 | topo_order = true 80 | # sort the commits inside sections by oldest/newest order 81 | sort_commits = "oldest" 82 | # limit the number of commits included in the changelog. 83 | # limit_commits = 42 84 | -------------------------------------------------------------------------------- /contracts/blob-registry/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "blob-registry" 3 | description = "Blob registry" 4 | version = { workspace = true } 5 | edition = { workspace = true } 6 | authors = { workspace = true } 7 | 8 | [lib] 9 | crate-type = ["cdylib" ] 10 | 11 | [dependencies] 12 | borsh = { workspace = true } 13 | hex = { workspace = true } 14 | near-sdk = "5.1.0" 15 | near-sdk-contract-tools = "3.0.2" 16 | 17 | [dev-dependencies] 18 | serde_json = "1.0.120" 19 | anyhow = "1.0.75" 20 | near-da-primitives = { path = "../../crates/primitives", default-features = false } 21 | near-workspaces = { version = "0.8.0", features = [ "unstable" ] } 22 | tokio = "1.28" 23 | 24 | [features] 25 | default = [ "std" ] 26 | std = [ "near-da-primitives/std", "borsh/std" ] 27 | -------------------------------------------------------------------------------- /contracts/blob-registry/src/events.rs: -------------------------------------------------------------------------------- 1 | use crate::{Maintainer, Namespace}; 2 | use near_sdk::{ 3 | env::log_str, 4 | serde::{Deserialize, Serialize}, 5 | serde_json::to_string, 6 | }; 7 | 8 | const CONTRACT_STANDARD_NAME: &str = "nepXXX"; 9 | const CONTRACT_STANDARD_VERSION: &str = "1.0.0"; 10 | 11 | /// Interface to capture data about an event. 12 | /// 13 | /// Arguments: 14 | /// * `standard`: name of standard e.g. nep171 15 | /// * `version`: e.g. 1.0.0 16 | /// * `event`: associate event data 17 | #[derive(Serialize, Deserialize, Debug)] 18 | #[serde(crate = "near_sdk::serde")] 19 | pub(crate) struct EventLog { 20 | pub standard: String, 21 | pub version: String, 22 | #[serde(flatten)] 23 | pub event: EventLogVariant, 24 | } 25 | 26 | /// Enum that represents the data type of the EventLog. 27 | #[derive(Serialize, Deserialize, Debug)] 28 | #[serde(tag = "event", content = "data")] 29 | #[serde(rename_all = "snake_case")] 30 | #[serde(crate = "near_sdk::serde")] 31 | #[non_exhaustive] 32 | pub(crate) enum EventLogVariant { 33 | AddMaintainer(AddMaintainerLog), 34 | NamespaceRegistration(NamespaceRegistrationLog), 35 | } 36 | 37 | /// An event log to capture a maintainer inclusion. 38 | /// 39 | /// Arguments 40 | /// * `owner_id`: "account.near" 41 | /// * `memo`: optional message 42 | #[derive(Serialize, Deserialize, Debug)] 43 | #[serde(crate = "near_sdk::serde")] 44 | pub(crate) struct AddMaintainerLog { 45 | pub maintainer: Maintainer, 46 | #[serde(skip_serializing_if = "Option::is_none")] 47 | pub memo: Option, 48 | } 49 | 50 | /// An event log to capture a new namespace registration. 51 | /// 52 | /// Arguments 53 | /// * `namespace`: u32 that has been registered 54 | /// * `memo`: optional message 55 | #[derive(Serialize, Deserialize, Debug)] 56 | #[serde(crate = "near_sdk::serde")] 57 | pub(crate) struct NamespaceRegistrationLog { 58 | pub namespace: Namespace, 59 | #[serde(skip_serializing_if = "Option::is_none")] 60 | pub memo: Option, 61 | } 62 | 63 | impl EventLog { 64 | fn new(event: EventLogVariant) -> Self { 65 | Self { 66 | standard: CONTRACT_STANDARD_NAME.to_string(), 67 | version: CONTRACT_STANDARD_VERSION.to_string(), 68 | event, 69 | } 70 | } 71 | 72 | pub(crate) fn maintainer(maintainer: Maintainer) { 73 | let log = EventLog::new(EventLogVariant::AddMaintainer(AddMaintainerLog { 74 | maintainer, 75 | memo: None, 76 | })); 77 | log_str(&to_string(&log).unwrap()); 78 | } 79 | 80 | pub(crate) fn namespace(namespace: Namespace) { 81 | let log = EventLog::new(EventLogVariant::NamespaceRegistration( 82 | NamespaceRegistrationLog { 83 | namespace, 84 | memo: None, 85 | }, 86 | )); 87 | log_str(&to_string(&log).unwrap()); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /contracts/blob-registry/src/lib.rs: -------------------------------------------------------------------------------- 1 | use events::EventLog; 2 | use near_sdk::{ 3 | assert_one_yocto, env, near, 4 | serde::de::{self, Visitor}, 5 | serde::{Deserialize, Deserializer, Serialize, Serializer}, 6 | AccountId, NearToken, PanicOnDefault, 7 | }; 8 | 9 | use near_sdk_contract_tools::{ 10 | owner::{Owner, OwnerExternal}, 11 | Owner, 12 | }; 13 | use std::collections::{HashMap, HashSet}; 14 | 15 | mod events; 16 | 17 | // Error messages. 18 | const ERR_NAMESPACE_MISSING: &str = "Namespace does not exist"; 19 | const ERR_UNAUTHORIZED_CALLER: &str = "Caller is not authorized to call method"; 20 | const ERR_INVALID_INPUT: &str = "Invalid input"; 21 | const ERR_CONTRACT_INITIALIZED: &str = "Contract already initialized"; 22 | const ERR_NAMESPACE_EXISTS: &str = "Namespace exists and cannot be registered again"; 23 | const ERR_NOT_ENOUGHT_FUNDS: &str = "Not enough funds to register a namespace"; 24 | const MINIMUM_DEPOSIT: u8 = 100; // 0.1 NEAR == 100 miliNEAR 25 | 26 | /// The contract itself. 27 | #[derive(PanicOnDefault, Owner)] 28 | #[near(contract_state, serializers=[borsh, json])] 29 | pub struct Contract { 30 | info: HashMap, 31 | } 32 | 33 | /// Repository information, understood as a set of namespaces and their metadata. 34 | #[derive(Default, Clone)] 35 | #[near(serializers=[borsh, json])] 36 | pub struct Metadata { 37 | priority: Priority, 38 | maintainers: HashSet, 39 | extra: Option, 40 | } 41 | 42 | type Namespace = u32; 43 | type Priority = u32; 44 | type Maintainer = Vec; 45 | type TransactionId = Hash; 46 | 47 | #[near] 48 | impl Contract { 49 | #[init] 50 | /// Create a new contract with a given owner. 51 | pub fn new(owner_id: AccountId) -> Self { 52 | assert!(!env::state_exists(), "{ERR_CONTRACT_INITIALIZED}"); 53 | let mut contract = Self { 54 | info: Default::default(), 55 | }; 56 | Self::init(&mut contract, &owner_id); 57 | contract 58 | } 59 | 60 | /// Get the priority level. 61 | pub fn priority(&self, namespace: Namespace) -> Option { 62 | self.info.get(&namespace).map(|metadata| metadata.priority) 63 | } 64 | 65 | /// Get the maintainers. 66 | pub fn maintainers(&self, namespace: Namespace) -> Option> { 67 | self.info 68 | .get(&namespace) 69 | .map(|metadata| metadata.maintainers.clone()) 70 | } 71 | 72 | /// Get the extra information in the metadata. 73 | pub fn extra(&self, namespace: Namespace) -> Option { 74 | self.info 75 | .get(&namespace) 76 | .and_then(|metadata| metadata.extra.clone()) 77 | } 78 | 79 | /// Add a new maintainer. 80 | pub fn add_maintainer(&mut self, namespace: Namespace, maintainer: Maintainer) { 81 | match self.check_authorized(namespace) { 82 | Some(mut metadata) => { 83 | // add it to the set and log the inclusion 84 | if metadata.maintainers.insert(maintainer.clone()) { 85 | EventLog::maintainer(maintainer); 86 | }; 87 | } 88 | None => { 89 | env::panic_str(ERR_UNAUTHORIZED_CALLER); 90 | } 91 | } 92 | } 93 | 94 | /// Submit the blob and the namespace. 95 | pub fn submit(&self, namespace: Namespace, _transaction_ids: Vec) { 96 | // check the namespace exists and the caller is in the maintainers list 97 | match self.check_authorized(namespace) { 98 | Some(_) => { 99 | env::input() 100 | .is_none() 101 | .then(|| env::panic_str(ERR_INVALID_INPUT)); 102 | } 103 | None => { 104 | env::panic_str(ERR_UNAUTHORIZED_CALLER); 105 | } 106 | } 107 | } 108 | 109 | /// Transfer the ownership of the contract. An event is emited by `Self::update_owner`. 110 | pub fn transfer_ownership(&mut self, new_owner_id: AccountId) { 111 | Self::require_owner(); 112 | assert_one_yocto(); 113 | Self::update_owner(self, Some(new_owner_id.clone())); 114 | } 115 | 116 | /// Register a DA consumer. 117 | #[payable] 118 | pub fn register_consumer(&mut self, namespace: Namespace) { 119 | if self.info.get(&namespace).is_some() { 120 | // when the namespace does not exist, 121 | env::panic_str(ERR_NAMESPACE_EXISTS); 122 | } else { 123 | // when the deposit is enough 124 | if env::attached_deposit() >= NearToken::from_millinear(MINIMUM_DEPOSIT.into()) { 125 | // and the namespace does not exist, then it can be registered 126 | let metadata = Metadata { 127 | maintainers: HashSet::from([env::predecessor_account_id().as_bytes().to_vec()]), 128 | ..Default::default() 129 | }; 130 | self.info.insert(namespace, metadata); 131 | // and an event can be emitted 132 | EventLog::namespace(namespace); 133 | } else { 134 | env::panic_str(ERR_NOT_ENOUGHT_FUNDS); 135 | } 136 | } 137 | } 138 | } 139 | 140 | impl Contract { 141 | /// Helper function to check that the caller is authorized to call the method. 142 | fn check_authorized(&self, namespace: Namespace) -> Option { 143 | let predecessor = env::predecessor_account_id(); 144 | if let Some(metadata) = self.info.get(&namespace) { 145 | if self.own_get_owner().unwrap() == predecessor 146 | || metadata.maintainers.contains(predecessor.as_bytes()) 147 | { 148 | Some(metadata.clone()) 149 | } else { 150 | None 151 | } 152 | } else { 153 | env::panic_str(ERR_NAMESPACE_MISSING); 154 | } 155 | } 156 | } 157 | 158 | /// Hash type for represennting the transaction id. 159 | #[derive(Debug)] 160 | pub struct Hash([u8; 32]); 161 | 162 | impl Serialize for Hash { 163 | fn serialize(&self, serializer: S) -> Result 164 | where 165 | S: Serializer, 166 | { 167 | // Convert the byte array to a hex string for serialization 168 | let hex_string = hex::encode(self.0); 169 | serializer.serialize_str(&hex_string) 170 | } 171 | } 172 | 173 | impl<'de> Deserialize<'de> for Hash { 174 | fn deserialize(deserializer: D) -> Result 175 | where 176 | D: Deserializer<'de>, 177 | { 178 | struct MyHashVisitor; 179 | 180 | impl<'de> Visitor<'de> for MyHashVisitor { 181 | type Value = Hash; 182 | 183 | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { 184 | formatter.write_str("a hex string representing a hash") 185 | } 186 | 187 | fn visit_str(self, v: &str) -> Result 188 | where 189 | E: de::Error, 190 | { 191 | // Convert the hex string back to a byte array 192 | let bytes = hex::decode(v).map_err(de::Error::custom)?; 193 | if bytes.len() != 32 { 194 | return Err(de::Error::custom("expected a 32-byte hash")); 195 | } 196 | let mut hash = [0u8; 32]; 197 | hash.copy_from_slice(&bytes); 198 | Ok(Hash(hash)) 199 | } 200 | } 201 | 202 | deserializer.deserialize_str(MyHashVisitor) 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /contracts/blob-registry/tests/tests.rs: -------------------------------------------------------------------------------- 1 | #![cfg(test)] 2 | 3 | use near_sdk::NearToken; 4 | use serde_json::json; 5 | 6 | #[tokio::test] 7 | async fn new() -> anyhow::Result<()> { 8 | // Create a new sandbox for testing. 9 | let worker = near_workspaces::sandbox().await?; 10 | // Compile the contract. 11 | let wasm = near_workspaces::compile_project(".").await?; 12 | // Deploy the (wasm) contract into the sandbox. 13 | let contract = worker.dev_deploy(&wasm).await?; 14 | // Create a dev account for testing. 15 | let alice = worker.dev_create_account().await?; 16 | 17 | // Calling contract's `new()` 18 | contract 19 | .call("new") 20 | .args_json(json!({ "owner_id": alice.id() })) 21 | .transact() 22 | .await? 23 | .into_result()?; 24 | 25 | // `alice` is implicitly set as owner 26 | let owner = contract.view("own_get_owner").await?.json::()?; 27 | assert_eq!(owner, alice.id().as_str(), "alice should be the owner"); 28 | 29 | Ok(()) 30 | } 31 | 32 | #[tokio::test] 33 | async fn register_consumer_not_enough_funds_errs() -> anyhow::Result<()> { 34 | // Create a new sandbox for testing. 35 | let worker = near_workspaces::sandbox().await?; 36 | // Compile the contract. 37 | let wasm = near_workspaces::compile_project(".").await?; 38 | // Deploy the (wasm) contract into the sandbox. 39 | let contract = worker.dev_deploy(&wasm).await?; 40 | // Create a dev account for testing. 41 | let alice = worker.dev_create_account().await?; 42 | 43 | // Calling contract's `new()` 44 | contract 45 | .call("new") 46 | .args_json(json!({ "owner_id": alice.id() })) 47 | .transact() 48 | .await? 49 | .into_result()?; 50 | 51 | // Register a consumer 52 | let registration = alice 53 | .call(contract.id(), "register_consumer") 54 | .args_json(json!({ "namespace": 0 })) 55 | .deposit(NearToken::from_millinear(99).as_yoctonear()) 56 | .transact() 57 | .await? 58 | .into_result(); 59 | 60 | assert!(registration.is_err()); 61 | 62 | Ok(()) 63 | } 64 | 65 | #[tokio::test] 66 | async fn submit() -> anyhow::Result<()> { 67 | // Create a new sandbox for testing. 68 | let worker = near_workspaces::sandbox().await?; 69 | // Compile the contract. 70 | let wasm = near_workspaces::compile_project(".").await?; 71 | // Deploy the (wasm) contract into the sandbox. 72 | let contract = worker.dev_deploy(&wasm).await?; 73 | // Create a dev account for testing. 74 | let alice = worker.dev_create_account().await?; 75 | 76 | // Calling contract's `new()` 77 | contract 78 | .call("new") 79 | .args_json(json!({ "owner_id": alice.id() })) 80 | .transact() 81 | .await? 82 | .into_result()?; 83 | 84 | // Register a consumer 85 | alice 86 | .call(contract.id(), "register_consumer") 87 | .args_json(json!({ "namespace": 0 })) 88 | .deposit(NearToken::from_millinear(100).as_yoctonear()) 89 | .transact() 90 | .await? 91 | .into_result()?; 92 | 93 | let mut tx_ids = vec![]; 94 | for _ in 0..100 { 95 | tx_ids.push(hex::encode([1u8; 32])); 96 | } 97 | 98 | eprintln!("Submitting {} TX IDs...", tx_ids.len()); 99 | let result = alice 100 | .call(contract.id(), "submit") 101 | .args_json(json!({ "namespace": 0, "_transaction_ids": tx_ids })) 102 | .transact() 103 | .await? 104 | .into_result()?; 105 | 106 | println!("Gas burned: {}", result.total_gas_burnt); 107 | 108 | Ok(()) 109 | } 110 | 111 | #[tokio::test] 112 | async fn owner_change() -> anyhow::Result<()> { 113 | // Create a new sandbox for testing. 114 | let worker = near_workspaces::sandbox().await?; 115 | // Compile the contract. 116 | let wasm = near_workspaces::compile_project(".").await?; 117 | // Deploy the (wasm) contract into the sandbox. 118 | let contract = worker.dev_deploy(&wasm).await?; 119 | // Create a dev account for testing. 120 | let alice = worker.dev_create_account().await?; 121 | 122 | // Calling contract's `new()` 123 | contract 124 | .call("new") 125 | .args_json(json!({ "owner_id": alice.id() })) 126 | .transact() 127 | .await? 128 | .into_result()?; 129 | 130 | // `alice` is implicitly set as owner 131 | let owner = contract.view("own_get_owner").await?.json::()?; 132 | assert_eq!(owner, alice.id().as_str(), "alice should be the owner"); 133 | 134 | // test switching ownership 135 | let bob = worker.dev_create_account().await?; 136 | 137 | // Alice proposes Bob as the new owner 138 | alice 139 | .call(contract.id(), "own_propose_owner") 140 | .args_json(json!({ 141 | "account_id": bob.id(), 142 | })) 143 | .deposit(NearToken::from_yoctonear(1).as_yoctonear()) 144 | .transact() 145 | .await? 146 | .unwrap(); 147 | 148 | // Bob accepts the ownership 149 | bob.call(contract.id(), "own_accept_owner") 150 | .deposit(NearToken::from_yoctonear(1).as_yoctonear()) 151 | .transact() 152 | .await? 153 | .unwrap(); 154 | 155 | // Check if Bob is the new owner 156 | let owner = contract.view("own_get_owner").await?.json::()?; 157 | assert_eq!(owner, bob.id().as_str(), "bob should be the owner"); 158 | 159 | Ok(()) 160 | } 161 | -------------------------------------------------------------------------------- /contracts/blob-store/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = { workspace = true } 3 | edition = { workspace = true } 4 | name = "near-da-blob-store" 5 | version = { workspace = true } 6 | 7 | [lib] 8 | crate-type = [ "cdylib" ] 9 | 10 | [dependencies] 11 | borsh = { workspace = true } 12 | near-sdk = { workspace = true } 13 | uint = { version = "0.9.3", default-features = false } 14 | 15 | [dev-dependencies] 16 | anyhow = "1.0.75" 17 | near-da-primitives = { path = "../../crates/primitives", default-features = false } 18 | near-workspaces = { version = "0.8.0", features = [ "unstable" ] } 19 | tokio = "1.28" 20 | 21 | [features] 22 | default = [ "std" ] 23 | std = [ "near-da-primitives/std", "borsh/std", "uint/std" ] 24 | -------------------------------------------------------------------------------- /contracts/blob-store/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo ">> Building contract" 4 | 5 | rustup target add wasm32-unknown-unknown 6 | cargo build --all --target wasm32-unknown-unknown --release -------------------------------------------------------------------------------- /contracts/blob-store/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | use near_sdk::{assert_one_yocto, env, AccountId}; 4 | 5 | const ERR_CONTRACT_NOT_INITIALIZED: &str = "Contract is not initialized."; 6 | const ERR_CONTRACT_ALREADY_INITIALIZED: &str = "Contract already initialized."; 7 | const ERR_NOT_OWNER: &str = "Predecessor is not owner."; 8 | const ERR_NO_PROPOSED_OWNER: &str = "No proposed owner."; 9 | const ERR_NOT_PROPOSED_OWNER: &str = "Predecessor is not proposed owner."; 10 | const ERR_MISSING_INVALID_INPUT: &str = "Missing or invalid input."; 11 | const JSON_NULL: &[u8] = b"null"; 12 | const JSON_DOUBLE_QUOTE: &[u8] = b"\""; 13 | // storage keys 14 | const KEY_INITIALIZED: &[u8; 1] = &[0]; 15 | const KEY_OWNER: &[u8; 1] = &[1]; // serialized with .as_bytes() NOT Borsh 16 | const KEY_PROPOSED_OWNER: &[u8; 1] = &[2]; // ditto. Not guaranteed to be a valid AccountId. 17 | 18 | fn require_initialized() { 19 | if !env::storage_has_key(KEY_INITIALIZED) { 20 | env::panic_str(ERR_CONTRACT_NOT_INITIALIZED); 21 | } 22 | } 23 | 24 | fn require_owner(predecessor: &AccountId) { 25 | if env::storage_read(KEY_OWNER) 26 | .filter(|v| v == predecessor.as_bytes()) 27 | .is_none() 28 | { 29 | env::panic_str(ERR_NOT_OWNER); 30 | } 31 | } 32 | 33 | #[no_mangle] 34 | pub fn new() { 35 | if env::storage_has_key(KEY_INITIALIZED) { 36 | env::panic_str(ERR_CONTRACT_ALREADY_INITIALIZED); 37 | } 38 | 39 | env::storage_write(KEY_INITIALIZED, &[1]); 40 | 41 | let predecessor_account_id = env::predecessor_account_id(); 42 | 43 | env::storage_write(KEY_OWNER, predecessor_account_id.as_bytes()); 44 | } 45 | 46 | #[no_mangle] 47 | pub fn submit() { 48 | require_initialized(); 49 | require_owner(&env::predecessor_account_id()); 50 | 51 | if env::input().is_none() { 52 | env::panic_str(ERR_MISSING_INVALID_INPUT); 53 | } 54 | } 55 | 56 | fn return_json_string(v: Option<&[u8]>) { 57 | let r = v.map_or_else( 58 | || JSON_NULL.to_vec(), 59 | |v| [JSON_DOUBLE_QUOTE, v, JSON_DOUBLE_QUOTE].concat(), 60 | ); 61 | env::value_return(&r); 62 | } 63 | 64 | #[no_mangle] 65 | pub fn own_get_owner() { 66 | require_initialized(); 67 | 68 | let current_owner = env::storage_read(KEY_OWNER); 69 | 70 | return_json_string(current_owner.as_deref()); 71 | } 72 | 73 | #[no_mangle] 74 | pub fn own_get_proposed_owner() { 75 | require_initialized(); 76 | 77 | let current_proposed_owner = env::storage_read(KEY_PROPOSED_OWNER); 78 | 79 | return_json_string(current_proposed_owner.as_deref()); 80 | } 81 | 82 | #[no_mangle] 83 | pub fn own_propose_owner() { 84 | require_initialized(); 85 | assert_one_yocto(); 86 | let predecessor = env::predecessor_account_id(); 87 | require_owner(&predecessor); 88 | 89 | let payload = env::input().unwrap_or_else(|| env::panic_str(ERR_MISSING_INVALID_INPUT)); 90 | 91 | let new_proposed_owner = if payload == b"{}" { 92 | None 93 | } else if let Some(account_id) = payload 94 | .strip_prefix(br#"{"account_id":""#) // jank JSON "parsing" 95 | .and_then(|s| s.strip_suffix(br#""}"#)) 96 | { 97 | Some(account_id) 98 | } else { 99 | env::panic_str(ERR_MISSING_INVALID_INPUT); 100 | }; 101 | 102 | match new_proposed_owner { 103 | Some(new_proposed_owner) => { 104 | env::storage_write(KEY_PROPOSED_OWNER, new_proposed_owner); 105 | } 106 | None => { 107 | env::storage_remove(KEY_PROPOSED_OWNER); 108 | } 109 | } 110 | } 111 | 112 | #[no_mangle] 113 | pub fn own_accept_owner() { 114 | require_initialized(); 115 | assert_one_yocto(); 116 | let predecessor = env::predecessor_account_id(); 117 | let current_proposed_owner = env::storage_read(KEY_PROPOSED_OWNER) 118 | .unwrap_or_else(|| env::panic_str(ERR_NO_PROPOSED_OWNER)); 119 | 120 | if predecessor.as_bytes() != current_proposed_owner { 121 | env::panic_str(ERR_NOT_PROPOSED_OWNER); 122 | } 123 | 124 | env::storage_remove(KEY_PROPOSED_OWNER); 125 | env::storage_write(KEY_OWNER, ¤t_proposed_owner); 126 | } 127 | 128 | #[no_mangle] 129 | pub fn own_renounce_owner() { 130 | require_initialized(); 131 | assert_one_yocto(); 132 | let predecessor = env::predecessor_account_id(); 133 | require_owner(&predecessor); 134 | 135 | env::storage_remove(KEY_OWNER); 136 | env::storage_remove(KEY_PROPOSED_OWNER); 137 | } 138 | -------------------------------------------------------------------------------- /contracts/blob-store/tests/tests.rs: -------------------------------------------------------------------------------- 1 | use near_da_primitives::Blob; 2 | 3 | #[tokio::test] 4 | async fn test() -> anyhow::Result<()> { 5 | eprintln!("Initializing sandbox..."); 6 | let worker = near_workspaces::sandbox().await?; 7 | 8 | eprintln!("Setting up accounts..."); 9 | let wasm = near_workspaces::compile_project(".").await?; 10 | 11 | let contract = worker.dev_deploy(&wasm).await?; 12 | let alice = worker.dev_create_account().await?; 13 | 14 | eprintln!("Calling contract::new()..."); 15 | 16 | alice 17 | .call(contract.id(), "new") 18 | .transact() 19 | .await? 20 | .into_result()?; 21 | 22 | eprintln!("Viewing contract::own_get_owner()..."); 23 | 24 | // alice is implicitly set as owner 25 | 26 | let owner = contract.view("own_get_owner").await?.json::()?; 27 | 28 | assert_eq!(owner, alice.id().as_str(), "alice should be the owner"); 29 | 30 | let mut blobs = vec![]; 31 | for _ in 0..100 { 32 | blobs.push(Blob::new(vec![3u8; 256])); 33 | } 34 | let blob_ser = borsh::to_vec(&blobs).unwrap(); 35 | 36 | eprintln!("Submitting {} blobs...", blobs.len()); 37 | 38 | let result = alice 39 | .call(contract.id(), "submit") 40 | .args(blob_ser) 41 | .transact() 42 | .await? 43 | .into_result()?; 44 | 45 | eprintln!("Gas burned: {}", result.total_gas_burnt); 46 | 47 | // test switching ownership 48 | eprintln!("Creating bob..."); 49 | 50 | let bob = worker.dev_create_account().await?; 51 | 52 | eprintln!("Proposing bob as new owner..."); 53 | 54 | alice 55 | .call(contract.id(), "own_propose_owner") 56 | .args_json(near_sdk::serde_json::json!({ 57 | "account_id": bob.id(), 58 | })) 59 | .deposit(1) 60 | .transact() 61 | .await? 62 | .unwrap(); 63 | 64 | eprintln!("Ownership acceptance by bob..."); 65 | 66 | bob.call(contract.id(), "own_accept_owner") 67 | .deposit(1) 68 | .transact() 69 | .await? 70 | .unwrap(); 71 | 72 | let owner = contract.view("own_get_owner").await?.json::()?; 73 | 74 | assert_eq!(owner, bob.id().as_str(), "bob should be the owner"); 75 | 76 | Ok(()) 77 | } 78 | -------------------------------------------------------------------------------- /crates/da-rpc-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors.workspace = true 3 | edition.workspace = true 4 | name = "near-da-rpc-sys" 5 | version.workspace = true 6 | 7 | [lib] 8 | crate-type = [ "cdylib", "staticlib" ] 9 | 10 | [dependencies] 11 | libc = "*" 12 | once_cell = "*" 13 | tokio = { version = "1.0", features = [ "full" ] } 14 | # TODO: move to eyre 15 | anyhow = "*" 16 | da-rpc = { path = "../da-rpc", package = "near-da-rpc" } 17 | ffi-support = "*" 18 | ffi_helpers = "*" 19 | openssl = { version = "0.10", features = [ "vendored" ] } 20 | 21 | [dev-dependencies] 22 | pretty_env_logger = "*" 23 | 24 | [build-dependencies] 25 | cbindgen = "*" 26 | which = "*" 27 | -------------------------------------------------------------------------------- /crates/da-rpc-sys/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=$BUILDPLATFORM rust:1.72.0-slim-bookworm 2 | 3 | LABEL org.opencontainers.image.source https://github.com/nuffle-labs/data-availability 4 | 5 | RUN apt-get update && apt-get install -y \ 6 | git \ 7 | jq \ 8 | make \ 9 | bash \ 10 | openssl \ 11 | libssl-dev \ 12 | protobuf-compiler \ 13 | pkg-config \ 14 | cbindgen \ 15 | && rm -rf /var/lib/apt/lists/* 16 | 17 | COPY . /app 18 | 19 | WORKDIR /app 20 | 21 | RUN mkdir -p lib 22 | 23 | RUN make da-rpc-sys 24 | 25 | RUN cp -f ./target/release/libnear* /lib/ 26 | RUN ls /lib 27 | 28 | RUN mkdir -p /gopkg 29 | RUN cp -rf ./gopkg/* /gopkg 30 | RUN mkdir -p /gopkg/da-rpc/lib 31 | RUN cp -f /lib/libnear* /gopkg/da-rpc/lib 32 | RUN ls /gopkg 33 | RUN ls /gopkg/da-rpc/lib 34 | 35 | 36 | WORKDIR /lib 37 | 38 | RUN rm -rf /app 39 | RUN rm -rf ~/.cargo 40 | -------------------------------------------------------------------------------- /crates/da-rpc-sys/Makefile: -------------------------------------------------------------------------------- 1 | DA_RPC_GO_DIR=../../gopkg/da-rpc 2 | 3 | da-rpc-sys: 4 | cargo build --release --locked 5 | mkdir -p $(DA_RPC_GO_DIR)/lib 6 | cp -f ../../target/release/libnear*.h $(DA_RPC_GO_DIR)/lib/ 7 | cp -f ../../target/release/libnear*.a $(DA_RPC_GO_DIR)/lib/ 8 | ls -l $(DA_RPC_GO_DIR)/lib 9 | cat $(DA_RPC_GO_DIR)/lib/*.h 10 | 11 | clean: 12 | rm -rf $(DA_RPC_GO_DIR)/lib 13 | 14 | musl: 15 | ~/.cargo/bin/cross build --target x86_64-unknown-linux-musl --release 16 | 17 | TAG_PREFIX?=near 18 | IMAGE_TAG?=0.0.1 19 | docker: 20 | DOCKER_BUILDKIT=1 docker build --progress=plain -t $(TAG_PREFIX)/da-rpc:$(IMAGE_TAG) -f Dockerfile ../../ 21 | docker tag $(TAG_PREFIX)/da-rpc:$(IMAGE_TAG) $(TAG_PREFIX)/da-rpc:latest 22 | 23 | push-docker: 24 | docker push $(TAG_PREFIX)/da-rpc:$(IMAGE_TAG)-bullseye 25 | docker push $(TAG_PREFIX)/da-rpc:latest 26 | -------------------------------------------------------------------------------- /crates/da-rpc-sys/build.rs: -------------------------------------------------------------------------------- 1 | use std::{env, path::PathBuf}; 2 | 3 | fn main() { 4 | let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); 5 | let crate_name = env::var("CARGO_PKG_NAME").unwrap(); 6 | #[allow(clippy::single_char_pattern)] // False positive 7 | let output_file = target_dir() 8 | .join(format!("lib{crate_name}.h").replace("-", "_")) 9 | .display() 10 | .to_string(); 11 | 12 | // check if cbindgen is in path or panic 13 | let _cbindgen = match which::which("cbindgen") { 14 | Ok(path) => path, 15 | Err(_) => panic!("cbindgen not found in path"), 16 | }; 17 | 18 | let mut config: cbindgen::Config = Default::default(); 19 | config.language = cbindgen::Language::C; 20 | config.parse.parse_deps = true; 21 | config.parse.include = Some(vec![ 22 | crate_name.clone(), 23 | "da-rpc".to_string(), 24 | "near-da-rpc".to_string(), 25 | ]); 26 | config.sys_includes = vec!["math.h".to_string(), "stdio.h".to_string()]; 27 | cbindgen::generate_with_config(crate_dir, config) 28 | .expect("Unable to generate bindings") 29 | .write_to_file(output_file); 30 | } 31 | 32 | /// Find the location of the `target/` directory. Note that this may be 33 | /// overridden by `cmake`, so we also need to check the `CARGO_TARGET_DIR` 34 | /// variable. 35 | fn target_dir() -> PathBuf { 36 | if let Ok(dir) = env::var("OUT_DIR") { 37 | PathBuf::from(dir).join("../../..") 38 | } else { 39 | PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()).join("target") 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /crates/da-rpc-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use da_rpc::near::config::{self, Network}; 3 | pub use da_rpc::near::{config::Config, Client}; 4 | use da_rpc::CryptoHash; 5 | use da_rpc::DataAvailability; 6 | pub use da_rpc::Namespace; 7 | pub use da_rpc::{Blob, BlobRef}; 8 | 9 | use ffi_helpers::error_handling::update_last_error; 10 | use ffi_helpers::null_pointer_check; 11 | use ffi_helpers::Nullable; 12 | use ffi_support::FfiStr; 13 | use libc::size_t; 14 | use once_cell::sync::Lazy; 15 | use std::ptr::{null, null_mut}; 16 | 17 | use std::{ 18 | ffi::{c_char, CStr, CString}, 19 | mem, slice, 20 | }; 21 | use tokio::runtime::{self, Runtime}; 22 | 23 | pub type BlockHeight = u64; 24 | 25 | // Denote the version to make sure we don't break the API downstream 26 | pub const VERSION: u8 = 4; 27 | 28 | /// TODO: fix a lot of these panics since they arent handled well by ffi! 29 | 30 | static RUNTIME: Lazy = Lazy::new(|| { 31 | runtime::Builder::new_multi_thread() 32 | .enable_io() 33 | .enable_time() 34 | .build() 35 | .expect("Failed to create runtime") 36 | }); 37 | 38 | #[no_mangle] 39 | pub extern "C" fn get_error() -> *mut c_char { 40 | let err = ffi_helpers::take_last_error(); 41 | match err { 42 | None => std::ptr::null_mut(), 43 | Some(err) => { 44 | let msg = err.to_string(); 45 | let mut buf = vec![0; msg.len() + 1]; 46 | 47 | buf[..msg.len()].copy_from_slice(msg.as_bytes()); 48 | // Make sure to add a trailing null in case people use this as a bare char* 49 | buf[msg.len()] = u8::NULL; 50 | 51 | let ptr = buf.as_mut_ptr(); 52 | mem::forget(buf); 53 | ptr as *mut c_char 54 | } 55 | } 56 | } 57 | 58 | /// # Safety 59 | /// We check if the pointers are null 60 | /// This is only used in a test 61 | #[no_mangle] 62 | pub unsafe extern "C" fn set_error(err: *const c_char) { 63 | null_pointer_check!(err); 64 | let msg = FfiStr::from_raw(err).into_string(); 65 | ffi_helpers::error_handling::update_last_error(anyhow::anyhow!(msg)); 66 | } 67 | 68 | #[no_mangle] 69 | pub extern "C" fn clear_error() { 70 | ffi_helpers::error_handling::clear_last_error(); 71 | } 72 | 73 | /// # Safety 74 | /// We check if the pointers are null 75 | #[no_mangle] 76 | pub unsafe extern "C" fn new_client_file( 77 | key_path: *const c_char, 78 | contract: *const c_char, 79 | network: *const c_char, 80 | namespace_version: u8, 81 | namespace: u32, 82 | ) -> *const Client { 83 | null_pointer_check!(key_path); 84 | 85 | let key_path = FfiStr::from_raw(key_path).into_string(); 86 | let key_type = || config::KeyType::File(key_path.into()); 87 | init_client(contract, network, namespace_version, namespace, key_type) 88 | } 89 | 90 | unsafe fn init_client config::KeyType>( 91 | contract: *const c_char, 92 | network: *const c_char, 93 | namespace_version: u8, 94 | namespace: u32, 95 | f: F, 96 | ) -> *const Client { 97 | null_pointer_check!(contract); 98 | null_pointer_check!(network); 99 | 100 | let contract = FfiStr::from_raw(contract).into_string(); 101 | let network = FfiStr::from_raw(network).as_str(); 102 | 103 | let namespace = if namespace > 0 { 104 | Some(Namespace::new(namespace_version, namespace)) 105 | } else { 106 | None 107 | }; 108 | 109 | let network = Network::try_from(network); 110 | 111 | match network { 112 | Err(e) => { 113 | update_last_error(anyhow::anyhow!(e)); 114 | null() 115 | } 116 | Ok(network) => { 117 | let config = Config { 118 | key: f(), 119 | contract, 120 | network, 121 | namespace, 122 | mode: Default::default(), // TODO: for now we don't expose mode to the client 123 | }; 124 | 125 | Box::into_raw(Box::new(Client::new(&config))) 126 | } 127 | } 128 | } 129 | 130 | /// # Safety 131 | /// We check if the pointers are null 132 | #[no_mangle] 133 | pub unsafe extern "C" fn new_client( 134 | account_id: *const c_char, 135 | secret_key: *const c_char, 136 | contract: *const c_char, 137 | network: *const c_char, 138 | // TODO: make option 139 | namespace_version: u8, 140 | namespace: u32, 141 | ) -> *const Client { 142 | null_pointer_check!(account_id); 143 | null_pointer_check!(secret_key); 144 | 145 | let account_id = FfiStr::from_raw(account_id).into_string(); 146 | let secret_key = FfiStr::from_raw(secret_key).into_string(); 147 | 148 | let key_type = || config::KeyType::SecretKey(account_id, secret_key); 149 | init_client(contract, network, namespace_version, namespace, key_type) 150 | } 151 | 152 | /// # Safety 153 | /// We check if the client is null 154 | #[no_mangle] 155 | pub unsafe extern "C" fn free_client(client: *mut Client) { 156 | null_pointer_check!(client); 157 | let _ = Box::from_raw(client); 158 | } 159 | 160 | /// # Safety 161 | /// We check if the slices are null 162 | #[no_mangle] 163 | pub unsafe extern "C" fn submit(client: *const Client, blob: *const BlobSafe) -> *mut c_char { 164 | null_pointer_check!(client); 165 | null_pointer_check!(blob); 166 | 167 | let client = &*client; 168 | let blob = &*blob; 169 | let blob = slice::from_raw_parts(blob.data, blob.len); 170 | 171 | RUNTIME 172 | .block_on(client.submit(Blob::new(blob.to_vec()))) 173 | .map_err(|e| anyhow::anyhow!(e)) 174 | .and_then(|x| { 175 | let ptr = CString::new(x.0.transaction_id) 176 | .with_context(|| "failed to convert transaction id to C string")? 177 | .into_raw(); 178 | Ok(ptr as *mut c_char) 179 | }) 180 | .unwrap_or(null_mut()) 181 | } 182 | 183 | #[repr(C)] 184 | #[derive(Debug, Clone)] 185 | pub struct BlobSafe { 186 | pub data: *const u8, 187 | pub len: size_t, 188 | } 189 | 190 | impl From for Blob { 191 | fn from(blob: BlobSafe) -> Self { 192 | let data = unsafe { slice::from_raw_parts(blob.data, blob.len) }; 193 | 194 | Self { 195 | data: data.to_vec(), 196 | } 197 | } 198 | } 199 | impl From for BlobSafe { 200 | fn from(blob: Blob) -> Self { 201 | let (data, len) = vec_to_safe_ptr(blob.data); 202 | Self { data, len } 203 | } 204 | } 205 | 206 | pub fn vec_to_safe_ptr(vec: Vec) -> (*const T, size_t) { 207 | let mut vec = vec; 208 | vec.shrink_to_fit(); 209 | 210 | let ptr = vec.as_ptr(); 211 | let len = vec.len(); 212 | mem::forget(vec); 213 | 214 | (ptr, len as size_t) 215 | } 216 | 217 | #[repr(C)] 218 | pub struct RustSafeArray { 219 | pub data: *const u8, 220 | pub len: size_t, 221 | } 222 | 223 | impl RustSafeArray { 224 | pub fn new(vec: Vec) -> Self { 225 | let (data, len) = vec_to_safe_ptr(vec); 226 | 227 | Self { data, len } 228 | } 229 | } 230 | 231 | impl Nullable for RustSafeArray { 232 | const NULL: Self = RustSafeArray { 233 | data: null(), 234 | len: 0, 235 | }; 236 | 237 | fn is_null(&self) -> bool { 238 | unsafe { *self.data == *Self::NULL.data && self.len == 0 } 239 | } 240 | } 241 | 242 | /// # Safety 243 | /// We check if the slices are null and they should always be 32 bytes 244 | #[no_mangle] 245 | pub unsafe extern "C" fn get(client: *const Client, transaction_id: *const u8) -> *const BlobSafe { 246 | null_pointer_check!(client); 247 | null_pointer_check!(transaction_id); 248 | 249 | let client = &*client; 250 | 251 | let transaction_id = slice::from_raw_parts(transaction_id, 32); 252 | let transaction_id: Result<[u8; 32], _> = transaction_id.try_into(); 253 | match transaction_id { 254 | Ok(transaction_id) => scoop_err( 255 | RUNTIME 256 | .block_on(client.get(CryptoHash(transaction_id))) 257 | .map_err(|e| anyhow::anyhow!(e)) 258 | .map(|x| x.0.into()), 259 | ), 260 | Err(e) => { 261 | update_last_error(anyhow::anyhow!(e)); 262 | std::ptr::null() 263 | } 264 | } 265 | } 266 | 267 | /// # Safety 268 | /// We check if the slices are null 269 | #[no_mangle] 270 | pub unsafe extern "C" fn free_blob(blob: *mut BlobSafe) { 271 | null_pointer_check!(blob); 272 | 273 | unsafe { 274 | let _ = Box::from_raw(blob); 275 | } 276 | } 277 | 278 | /// # Safety 279 | /// We check if the slices are null 280 | #[no_mangle] 281 | pub unsafe extern "C" fn submit_batch( 282 | client: *const Client, 283 | candidate_hex: *const c_char, 284 | tx_data: *const u8, 285 | tx_data_len: size_t, 286 | ) -> *const RustSafeArray { 287 | null_pointer_check!(client); 288 | null_pointer_check!(candidate_hex); 289 | null_pointer_check!(tx_data); 290 | 291 | let client = unsafe { &*client }; 292 | let candidate_hex = unsafe { scoop_err(CStr::from_ptr(candidate_hex).to_str()) }; 293 | null_pointer_check!(candidate_hex); 294 | let candidate_hex = *candidate_hex; 295 | let candidate_hex = candidate_hex.to_owned(); 296 | let tx_data = { unsafe { slice::from_raw_parts(tx_data, tx_data_len) } }; 297 | 298 | // TODO: this is too coupled to OP 299 | // If batcher inbox, submit the tx 300 | if candidate_hex == "0xfF00000000000000000000000000000000000000" { 301 | // Prepare the blob for submission 302 | // TODO: namespace versioning 303 | let blob = Blob::new(tx_data.to_vec()); 304 | 305 | scoop_err( 306 | RUNTIME 307 | .block_on(client.submit(blob)) 308 | .map(|result| result.0) 309 | .map(|r| RustSafeArray::new((*r).to_vec())) 310 | .map_err(|e| anyhow::anyhow!(e)), 311 | ) 312 | } else { 313 | eprintln!("Not a batcher inbox"); 314 | update_last_error(anyhow::anyhow!("Not a batcher inbox")); 315 | &RustSafeArray::NULL 316 | } 317 | } 318 | 319 | fn scoop_err>(result: Result) -> *const T { 320 | match result { 321 | Err(e) => { 322 | let e = e.into(); 323 | eprintln!("NEAR FFI: {:?}", e); 324 | update_last_error(e); 325 | std::ptr::null() 326 | } 327 | Ok(t) => Box::into_raw(Box::new(t)), 328 | } 329 | } 330 | 331 | #[cfg(test)] 332 | pub mod test { 333 | use super::*; 334 | use da_rpc::near::config::Network; 335 | use ffi_helpers::take_last_error; 336 | use std::env; 337 | use std::ffi::CString; 338 | use std::str::FromStr; 339 | 340 | const PREVIOUSLY_SUBMITTED_TX: &str = "4YPsDMPsF35x6eWnBpFqrz1PC36tV3JdWwhTx6ZggEQo"; 341 | 342 | #[test] 343 | fn test_error_handling() { 344 | update_last_error(anyhow::anyhow!("test")); 345 | let error = unsafe { &*get_error() }; 346 | let err_str = unsafe { CStr::from_ptr(error).to_str().unwrap() }; 347 | println!("{:?}", err_str); 348 | assert_eq!("test", err_str); 349 | assert!(take_last_error().is_none()); 350 | } 351 | 352 | #[test] 353 | fn test_error_handling_manual_clear() { 354 | update_last_error(anyhow::anyhow!("test")); 355 | assert!(!get_error().is_null()); 356 | clear_error(); 357 | assert!(get_error().is_null()); 358 | } 359 | 360 | fn test_get_client() -> (Client, Config) { 361 | pretty_env_logger::try_init().ok(); 362 | let account = env::var("TEST_NEAR_ACCOUNT").unwrap(); 363 | let secret = env::var("TEST_NEAR_SECRET").unwrap(); 364 | let config = Config { 365 | key: config::KeyType::SecretKey(account.clone(), secret), 366 | contract: account.to_string(), 367 | network: Network::Testnet, 368 | namespace: None, 369 | mode: Default::default(), 370 | }; 371 | let client = Client::new(&config); 372 | (client, config) 373 | } 374 | 375 | #[allow(temporary_cstring_as_ptr)] // JUSTIFICATION: it only lives in this scope, so it's fine 376 | #[test] 377 | fn test_init_client() { 378 | let (_, config) = test_get_client(); 379 | assert!(unsafe { 380 | !new_client_file( 381 | CString::new("throwaway-key.json").unwrap().as_ptr(), 382 | CString::new(config.contract.to_string()).unwrap().as_ptr(), 383 | CString::new(config.network.to_string()).unwrap().as_ptr(), 384 | Namespace::default().version, 385 | Namespace::default().id, 386 | ) 387 | .is_null() 388 | }); 389 | } 390 | 391 | #[ignore = "This should be an integration test"] 392 | #[test] 393 | fn c_e2e() { 394 | unsafe { 395 | let (client, _) = test_get_client(); 396 | let original_blob = Blob::new(vec![0x01, 0x02, 0x03]); 397 | 398 | let res = submit(&client, &original_blob.clone().into()); 399 | assert!(!res.is_null()); 400 | 401 | let tx_hash = CString::from_raw(res); 402 | println!("{:?}", tx_hash); 403 | 404 | let fetched = Blob::from((*get(&client, tx_hash.as_ptr() as *const u8)).clone()); 405 | 406 | assert_eq!(original_blob.data, fetched.data); 407 | } 408 | } 409 | 410 | #[ignore = "This should be an integration test"] 411 | #[test] 412 | fn c_submit() { 413 | let blob: BlobSafe = Blob::new(vec![0x01, 0x02, 0x03]).into(); 414 | let (client, _) = test_get_client(); 415 | let res = unsafe { submit(&client, &blob) }; 416 | assert!(!res.is_null()); 417 | let binding = unsafe { CString::from_raw(res) }; 418 | let str = binding; 419 | println!("{:?}", str); 420 | } 421 | 422 | #[ignore = "This should be an integration test"] 423 | #[test] 424 | fn c_submit_1point5mb() { 425 | let blob: BlobSafe = Blob::new(vec![99u8; 1536 * 1024]).into(); 426 | let (client, _) = test_get_client(); 427 | let res = unsafe { submit(&client, &blob) }; 428 | 429 | if res.is_null() { 430 | let error = unsafe { &*get_error() }; 431 | let err_str = unsafe { CStr::from_ptr(error).to_str().unwrap() }; 432 | println!("{:?}", err_str); 433 | panic!("Should not be null"); 434 | } 435 | let binding = unsafe { CString::from_raw(res) }; 436 | let str = binding; 437 | println!("{:?}", str); 438 | } 439 | 440 | #[test] 441 | #[ignore = "Wait for integration tests"] 442 | fn c_get() { 443 | let (client, _) = test_get_client(); 444 | 445 | let hash = CryptoHash::from_str(PREVIOUSLY_SUBMITTED_TX).unwrap(); 446 | let ptr = hash.0.as_ptr(); 447 | 448 | let res = unsafe { get(&client, ptr) }; 449 | assert!(!res.is_null()); 450 | let safe_blob: &BlobSafe = unsafe { &*res }; 451 | let safe_blob = safe_blob.clone(); 452 | println!("{:?}", safe_blob); 453 | assert_eq!(safe_blob.len, 706); 454 | let data = unsafe { slice::from_raw_parts(safe_blob.data, safe_blob.len as usize) }; 455 | assert_eq!(data.len(), 706); 456 | } 457 | 458 | #[test] 459 | fn test_blob_to_blobsafe() { 460 | let blob = Blob::new(vec![0x01, 0x02, 0x03]); 461 | let blob_safe: BlobSafe = blob.into(); 462 | assert_eq!(blob_safe.len, 3); 463 | let data = unsafe { slice::from_raw_parts(blob_safe.data, blob_safe.len) }; 464 | assert_eq!(data, &vec![0x01, 0x02, 0x03]); 465 | } 466 | } 467 | -------------------------------------------------------------------------------- /crates/da-rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors.workspace = true 3 | edition.workspace = true 4 | name = "near-da-rpc" 5 | version.workspace = true 6 | 7 | [dependencies] 8 | async-trait = { workspace = true } 9 | eyre = { workspace = true } 10 | futures = { workspace = true } 11 | tokio = { version = "1.0", features = [ "full" ] } 12 | tracing = { workspace = true } 13 | url = "2.5" 14 | 15 | # Serialization 16 | serde = { workspace = true, default-features = true } 17 | serde_json = { workspace = true } 18 | serde_with = { workspace = true, default-features = true } 19 | 20 | borsh = { workspace = true } 21 | near-crypto = { workspace = true } 22 | near-da-primitives = { path = "../primitives" } 23 | near-jsonrpc-client = { workspace = true } 24 | near-jsonrpc-primitives = { workspace = true } 25 | near-primitives = { workspace = true } 26 | 27 | [dev-dependencies] 28 | tracing-subscriber = "*" 29 | 30 | [build-dependencies] 31 | cbindgen = "*" 32 | which = "*" 33 | -------------------------------------------------------------------------------- /crates/da-rpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | use eyre::Result; 2 | pub use near_da_primitives::{Blob, BlobRef, Commitment, Namespace}; 3 | pub use near_primitives::hash::CryptoHash; 4 | use near_primitives::types::BlockHeight; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | pub mod near; 8 | 9 | #[repr(C)] 10 | #[derive(Debug, Clone, Serialize, Deserialize)] 11 | pub struct SubmitResult(pub BlobRef); 12 | 13 | #[repr(C)] 14 | #[derive(Debug, Clone, Serialize, Deserialize)] 15 | pub struct Read(pub Blob); 16 | 17 | #[repr(C)] 18 | #[derive(Debug, Clone, Serialize, Deserialize)] 19 | pub struct ReadAll(pub Vec<(BlockHeight, Blob)>); 20 | 21 | #[repr(C)] 22 | #[derive(Debug, Clone, Serialize, Deserialize)] 23 | pub struct IndexRead(pub Blob); 24 | 25 | #[async_trait::async_trait] 26 | pub trait DataAvailability { 27 | /// Submit blobs to the da layer 28 | async fn submit(&self, blob: Blob) -> Result; 29 | /// Read blob by namespace and height 30 | async fn get(&self, transaction_id: CryptoHash) -> Result; 31 | } 32 | -------------------------------------------------------------------------------- /crates/da-rpc/src/near/config.rs: -------------------------------------------------------------------------------- 1 | use near_da_primitives::{Mode, Namespace}; 2 | use serde::{Deserialize, Deserializer}; 3 | use std::{fmt::Display, path::PathBuf}; 4 | use url::Url; 5 | 6 | #[derive(Debug, Clone, Deserialize)] 7 | pub enum KeyType { 8 | File(PathBuf), 9 | Seed(String, String), 10 | SecretKey(String, String), 11 | } 12 | 13 | #[cfg(test)] 14 | impl Default for KeyType { 15 | fn default() -> Self { 16 | Self::File(PathBuf::from("throwaway-key.json")) 17 | } 18 | } 19 | 20 | #[derive(Debug, Clone, Deserialize)] 21 | #[cfg_attr(test, derive(Default))] 22 | pub struct Config { 23 | pub key: KeyType, 24 | pub contract: String, 25 | pub network: Network, 26 | pub namespace: Option, 27 | pub mode: Mode, 28 | } 29 | 30 | // TODO: stole from near-light-client, create primitives to share this 31 | #[derive(Debug, Clone, Default, PartialEq, Eq)] 32 | pub enum Network { 33 | Mainnet, 34 | #[default] 35 | Testnet, 36 | // provide url 37 | Custom(String), 38 | } 39 | 40 | impl Network { 41 | fn parse_customnet(s: &str) -> Result { 42 | s.parse::() 43 | .map_err(|err| err.to_string()) 44 | .map(|_| Network::Custom(s.into())) 45 | } 46 | } 47 | 48 | impl<'de> Deserialize<'de> for Network { 49 | fn deserialize(deserializer: D) -> Result 50 | where 51 | D: Deserializer<'de>, 52 | { 53 | let s: &str = Deserialize::deserialize(deserializer)?; 54 | s.try_into().map_err(serde::de::Error::custom) 55 | } 56 | } 57 | 58 | impl Network { 59 | pub fn to_endpoint(&self) -> String { 60 | const MAINNET_RPC_ENDPOINT: &str = "https://rpc.mainnet.near.org"; 61 | const TESTNET_RPC_ENDPOINT: &str = "https://rpc.testnet.near.org"; 62 | match self { 63 | Self::Mainnet => MAINNET_RPC_ENDPOINT.into(), 64 | Self::Testnet => TESTNET_RPC_ENDPOINT.into(), 65 | Self::Custom(url) => url.clone(), 66 | } 67 | } 68 | pub fn archive_endpoint(&self) -> String { 69 | const MAINNET_RPC_ARCHIVE_ENDPOINT: &str = "https://archival-rpc.mainnet.near.org"; 70 | const TESTNET_RPC_ARCHIVE_ENDPOINT: &str = "https://archival-rpc.testnet.near.org"; 71 | match self { 72 | Self::Mainnet => MAINNET_RPC_ARCHIVE_ENDPOINT.into(), 73 | Self::Testnet => TESTNET_RPC_ARCHIVE_ENDPOINT.into(), 74 | Self::Custom(url) => url.clone(), 75 | } 76 | } 77 | } 78 | 79 | impl Display for Network { 80 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 81 | let s = match self { 82 | Self::Mainnet => "mainnet", 83 | Self::Testnet => "testnet", 84 | Self::Custom(url) => url.as_str(), 85 | }; 86 | write!(f, "{}", s) 87 | } 88 | } 89 | 90 | impl TryFrom<&str> for Network { 91 | type Error = String; 92 | fn try_from(s: &str) -> Result { 93 | match s.to_lowercase().as_str() { 94 | "mainnet" => Ok(Self::Mainnet), 95 | "testnet" => Ok(Self::Testnet), 96 | url => Self::parse_customnet(url), 97 | } 98 | } 99 | } 100 | 101 | #[cfg(test)] 102 | mod tests { 103 | use super::*; 104 | 105 | #[test] 106 | fn test_network_from_str() { 107 | let network = Network::try_from("mainnet").unwrap(); 108 | assert_eq!(network, Network::Mainnet); 109 | 110 | let network = Network::try_from("MAINNET").unwrap(); 111 | assert_eq!(network, Network::Mainnet); 112 | 113 | let network = Network::try_from("testnet").unwrap(); 114 | assert_eq!(network, Network::Testnet); 115 | 116 | { 117 | let url = "http://127.0.0.1:3030"; 118 | let network = Network::try_from(url).unwrap(); 119 | assert_eq!(network, Network::Custom(url.into())); 120 | } 121 | 122 | { 123 | let url = "ws://someurl:2754"; 124 | let network = Network::try_from(url).unwrap(); 125 | assert_eq!(network, Network::Custom(url.into())); 126 | } 127 | } 128 | 129 | #[test] 130 | fn test_invalid_local_adress() { 131 | let network = Network::try_from("invalid").unwrap_err(); 132 | assert_eq!(network, "relative URL without a base"); 133 | } 134 | 135 | #[test] 136 | fn test_network_case_insensitive() { 137 | let network = Network::try_from("MAINNET").unwrap(); 138 | assert_eq!(network, Network::Mainnet); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /crates/http-api-data/Cargo.toml: -------------------------------------------------------------------------------- 1 | # The intent of this crate is to not depend on any chain-specific crates, to 2 | # avoid dependency problems and simplify the dependency graph as much as 3 | # possible. 4 | 5 | [package] 6 | name = "near-da-http-api-data" 7 | version.workspace = true 8 | authors.workspace = true 9 | edition.workspace = true 10 | 11 | [dependencies] 12 | serde_with = { version = "3", default-features = false, features = [ 13 | "hex", 14 | "macros", 15 | ] } 16 | serde = "1" 17 | near-da-primitives = { path = "../primitives", default-features = false, features = [ 18 | "std", 19 | ] } 20 | -------------------------------------------------------------------------------- /crates/http-api-data/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | extern crate alloc; 3 | 4 | use alloc::string::String; 5 | use near_da_primitives::Mode; 6 | pub use near_da_primitives::{Blob, BlobRef, Namespace}; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | #[derive(Serialize, Deserialize, PartialEq, Clone, Debug)] 10 | pub struct ConfigureClientRequest { 11 | pub account_id: String, 12 | pub secret_key: String, 13 | pub contract_id: String, 14 | pub network: String, 15 | pub namespace: Option, 16 | pub mode: Option, 17 | /// How big the bytes should be for the lru lookup cache 18 | #[serde(default = "default_bool::")] 19 | pub should_cache: bool, 20 | } 21 | 22 | pub const fn default_bool() -> bool { 23 | V 24 | } 25 | 26 | // TODO: tech debt 27 | // impl From for ConfigureClientRequest {} 28 | -------------------------------------------------------------------------------- /crates/primitives/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors.workspace = true 3 | edition.workspace = true 4 | name = "near-da-primitives" 5 | version.workspace = true 6 | 7 | [dependencies] 8 | borsh = { workspace = true } 9 | serde = { workspace = true } 10 | serde_with = { workspace = true } 11 | 12 | [dev-dependencies] 13 | hex = { workspace = true } 14 | 15 | [features] 16 | default = [ "std" ] 17 | std = [ "serde_with/std", "serde/std", "borsh/std" ] 18 | -------------------------------------------------------------------------------- /crates/primitives/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "std"), no_std)] 2 | 3 | extern crate alloc; 4 | 5 | use borsh::{BorshDeserialize, BorshSerialize}; 6 | use serde::{Deserialize, Serialize}; 7 | use serde_with::serde_as; 8 | 9 | use core::ops::Deref; 10 | 11 | pub type Data = alloc::vec::Vec; 12 | pub type ShareVersion = u32; 13 | pub type Commitment = [u8; 32]; 14 | pub type BlockHeight = u64; 15 | 16 | /// The namespace is a reference to who is submitting blobs, it will be considered 17 | /// important in the blob registry. This allows users not familiar with NEAR to use a shared 18 | /// contract, with shared proving capabilities. 19 | /// 20 | /// TODO: optional namespace for users who submit their own blobs to their own contract 21 | #[derive( 22 | Clone, 23 | Copy, 24 | BorshSerialize, 25 | BorshDeserialize, 26 | Ord, 27 | PartialOrd, 28 | Eq, 29 | PartialEq, 30 | Default, 31 | Serialize, 32 | Deserialize, 33 | Debug, 34 | )] 35 | pub struct Namespace { 36 | pub version: u8, 37 | pub id: u32, 38 | } 39 | 40 | impl Namespace { 41 | pub fn new(version: u8, id: u32) -> Self { 42 | Self { version, id } 43 | } 44 | } 45 | 46 | #[serde_as] 47 | #[derive(Deserialize, Serialize, BorshSerialize, BorshDeserialize, Clone, Debug)] 48 | #[cfg_attr(test, derive(PartialEq, Eq))] 49 | pub struct Blob { 50 | #[serde_as(as = "serde_with::hex::Hex")] 51 | pub data: Data, 52 | } 53 | 54 | impl Blob { 55 | pub fn new(data: Data) -> Self { 56 | Self { data } 57 | } 58 | } 59 | 60 | impl From for Blob { 61 | fn from(data: Data) -> Self { 62 | Self { data } 63 | } 64 | } 65 | 66 | impl From for Blob { 67 | fn from(legacy_blob: LegacyBlob) -> Self { 68 | Self { 69 | data: legacy_blob.data, 70 | } 71 | } 72 | } 73 | 74 | #[serde_as] 75 | #[derive(Deserialize, Serialize, BorshSerialize, BorshDeserialize, Clone, Debug)] 76 | #[cfg_attr(test, derive(PartialEq, Eq))] 77 | pub struct LegacyBlob { 78 | pub namespace: Namespace, 79 | pub share_version: u32, 80 | #[serde_as(as = "serde_with::hex::Hex")] 81 | pub commitment: [u8; 32], 82 | #[serde_as(as = "serde_with::hex::Hex")] 83 | pub data: Data, 84 | } 85 | 86 | // TODO: these can actually be many multiples of txhash now. 87 | #[serde_as] 88 | #[cfg_attr(test, derive(PartialEq, Eq))] 89 | #[derive(Debug, Serialize, Deserialize, Clone)] 90 | pub struct BlobRef { 91 | #[serde_as(as = "serde_with::hex::Hex")] 92 | pub transaction_id: [u8; 32], 93 | } 94 | 95 | impl From<[u8; 32]> for BlobRef { 96 | fn from(transaction_id: [u8; 32]) -> Self { 97 | Self { transaction_id } 98 | } 99 | } 100 | 101 | pub const BLOB_REF_SIZE: usize = 32; 102 | 103 | impl BlobRef { 104 | pub fn new(transaction_id: [u8; BLOB_REF_SIZE]) -> Self { 105 | Self { transaction_id } 106 | } 107 | } 108 | 109 | impl Deref for BlobRef { 110 | type Target = [u8; BLOB_REF_SIZE]; 111 | fn deref(&self) -> &Self::Target { 112 | &self.transaction_id 113 | } 114 | } 115 | 116 | #[serde_with::serde_as] 117 | #[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize, PartialEq, Clone, Debug)] 118 | pub struct SubmitRequest { 119 | pub namespace: Option, 120 | #[serde_as(as = "serde_with::hex::Hex")] 121 | pub data: Vec, 122 | } 123 | 124 | #[derive(Serialize, Deserialize, PartialEq, Clone, Debug, Default)] 125 | #[serde(rename_all = "lowercase")] 126 | pub enum Mode { 127 | /// Wait for 128 | /// - Inclusion in the block, but not finalized 129 | Optimistic, 130 | /// Wait for 131 | /// - Transaction execution, but additional receipts/refunds were not included 132 | Standard, 133 | /// Wait for 134 | /// - Inclusion in the block 135 | /// - Execution of the blob (even though theres no execution) 136 | /// - All other shards execute 137 | #[default] 138 | Pessimistic, 139 | } 140 | 141 | impl From<&str> for Mode { 142 | fn from(s: &str) -> Self { 143 | match s.to_lowercase().as_str() { 144 | "optimistic" => Mode::Optimistic, 145 | "standard" => Mode::Standard, 146 | "pessimistic" => Mode::Pessimistic, 147 | _ => Mode::Pessimistic, 148 | } 149 | } 150 | } 151 | 152 | #[cfg(test)] 153 | mod tests { 154 | use super::*; 155 | 156 | #[test] 157 | fn test_format() { 158 | let frame_ref = BlobRef::new([2u8; BLOB_REF_SIZE]); 159 | assert_eq!( 160 | *frame_ref, 161 | [ 162 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 163 | 2, 2, 2, 2 164 | ], 165 | "FrameRef::to_celestia_format() should return 40 bytes array" 166 | ); 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /devenv.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "devenv": { 4 | "locked": { 5 | "dir": "src/modules", 6 | "lastModified": 1716211464, 7 | "owner": "cachix", 8 | "repo": "devenv", 9 | "rev": "91efc0986bba23527af7d777ddcfebcec3ad9766", 10 | "treeHash": "362af2d1ce62fea959555b57cd73106a417129d0", 11 | "type": "github" 12 | }, 13 | "original": { 14 | "dir": "src/modules", 15 | "owner": "cachix", 16 | "repo": "devenv", 17 | "type": "github" 18 | } 19 | }, 20 | "fenix": { 21 | "inputs": { 22 | "nixpkgs": [ 23 | "nixpkgs" 24 | ], 25 | "rust-analyzer-src": "rust-analyzer-src" 26 | }, 27 | "locked": { 28 | "lastModified": 1712730246, 29 | "owner": "nix-community", 30 | "repo": "fenix", 31 | "rev": "d402ae4a5e5676722290470f61a5e8e3155b5487", 32 | "treeHash": "95703e3e405d8986c90164b40fcc0e8023dc03d9", 33 | "type": "github" 34 | }, 35 | "original": { 36 | "owner": "nix-community", 37 | "repo": "fenix", 38 | "rev": "d402ae4a5e5676722290470f61a5e8e3155b5487", 39 | "type": "github" 40 | } 41 | }, 42 | "flake-compat": { 43 | "flake": false, 44 | "locked": { 45 | "lastModified": 1696426674, 46 | "owner": "edolstra", 47 | "repo": "flake-compat", 48 | "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", 49 | "treeHash": "2addb7b71a20a25ea74feeaf5c2f6a6b30898ecb", 50 | "type": "github" 51 | }, 52 | "original": { 53 | "owner": "edolstra", 54 | "repo": "flake-compat", 55 | "type": "github" 56 | } 57 | }, 58 | "flake-compat_2": { 59 | "flake": false, 60 | "locked": { 61 | "lastModified": 1696426674, 62 | "owner": "edolstra", 63 | "repo": "flake-compat", 64 | "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", 65 | "treeHash": "2addb7b71a20a25ea74feeaf5c2f6a6b30898ecb", 66 | "type": "github" 67 | }, 68 | "original": { 69 | "owner": "edolstra", 70 | "repo": "flake-compat", 71 | "type": "github" 72 | } 73 | }, 74 | "gitignore": { 75 | "inputs": { 76 | "nixpkgs": [ 77 | "pre-commit-hooks", 78 | "nixpkgs" 79 | ] 80 | }, 81 | "locked": { 82 | "lastModified": 1709087332, 83 | "owner": "hercules-ci", 84 | "repo": "gitignore.nix", 85 | "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", 86 | "treeHash": "ca14199cabdfe1a06a7b1654c76ed49100a689f9", 87 | "type": "github" 88 | }, 89 | "original": { 90 | "owner": "hercules-ci", 91 | "repo": "gitignore.nix", 92 | "type": "github" 93 | } 94 | }, 95 | "nixpkgs": { 96 | "locked": { 97 | "lastModified": 1713361204, 98 | "owner": "cachix", 99 | "repo": "devenv-nixpkgs", 100 | "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", 101 | "treeHash": "50354b35a3e0277d4a83a0a88fa0b0866b5f392f", 102 | "type": "github" 103 | }, 104 | "original": { 105 | "owner": "cachix", 106 | "ref": "rolling", 107 | "repo": "devenv-nixpkgs", 108 | "type": "github" 109 | } 110 | }, 111 | "nixpkgs-python": { 112 | "inputs": { 113 | "flake-compat": "flake-compat", 114 | "nixpkgs": [ 115 | "nixpkgs" 116 | ] 117 | }, 118 | "locked": { 119 | "lastModified": 1715840472, 120 | "owner": "cachix", 121 | "repo": "nixpkgs-python", 122 | "rev": "e828d3225853fc93a34e0326ceb987fea89b3d74", 123 | "treeHash": "a968a7ca6960a3ff76c43d9c7ffe983ecb00ff84", 124 | "type": "github" 125 | }, 126 | "original": { 127 | "owner": "cachix", 128 | "repo": "nixpkgs-python", 129 | "type": "github" 130 | } 131 | }, 132 | "nixpkgs-stable": { 133 | "locked": { 134 | "lastModified": 1716061101, 135 | "owner": "NixOS", 136 | "repo": "nixpkgs", 137 | "rev": "e7cc61784ddf51c81487637b3031a6dd2d6673a2", 138 | "treeHash": "0dfa8dcfb5f2b25b4be1a6f873c087557f335fac", 139 | "type": "github" 140 | }, 141 | "original": { 142 | "owner": "NixOS", 143 | "ref": "nixos-23.11", 144 | "repo": "nixpkgs", 145 | "type": "github" 146 | } 147 | }, 148 | "nixpkgs-working-bun": { 149 | "locked": { 150 | "lastModified": 1710406996, 151 | "owner": "nixos", 152 | "repo": "nixpkgs", 153 | "rev": "9e58aca561e18f5197029926db8dbde1738a2ff5", 154 | "treeHash": "9db4093f3ab4bde3632feee76e9f69bd4b46d192", 155 | "type": "github" 156 | }, 157 | "original": { 158 | "owner": "nixos", 159 | "repo": "nixpkgs", 160 | "rev": "9e58aca561e18f5197029926db8dbde1738a2ff5", 161 | "type": "github" 162 | } 163 | }, 164 | "pre-commit-hooks": { 165 | "inputs": { 166 | "flake-compat": "flake-compat_2", 167 | "gitignore": "gitignore", 168 | "nixpkgs": [ 169 | "nixpkgs" 170 | ], 171 | "nixpkgs-stable": "nixpkgs-stable" 172 | }, 173 | "locked": { 174 | "lastModified": 1715870890, 175 | "owner": "cachix", 176 | "repo": "pre-commit-hooks.nix", 177 | "rev": "fa606cccd7b0ccebe2880051208e4a0f61bfc8c1", 178 | "treeHash": "ea38e19d9964cd72fb9936d75b9807756c754234", 179 | "type": "github" 180 | }, 181 | "original": { 182 | "owner": "cachix", 183 | "repo": "pre-commit-hooks.nix", 184 | "type": "github" 185 | } 186 | }, 187 | "root": { 188 | "inputs": { 189 | "devenv": "devenv", 190 | "fenix": "fenix", 191 | "nixpkgs": "nixpkgs", 192 | "nixpkgs-python": "nixpkgs-python", 193 | "nixpkgs-working-bun": "nixpkgs-working-bun", 194 | "pre-commit-hooks": "pre-commit-hooks" 195 | } 196 | }, 197 | "rust-analyzer-src": { 198 | "flake": false, 199 | "locked": { 200 | "lastModified": 1716107283, 201 | "owner": "rust-lang", 202 | "repo": "rust-analyzer", 203 | "rev": "21ec8f523812b88418b2bfc64240c62b3dd967bd", 204 | "treeHash": "9919709f182ddef513a0602773fba1a13a970d5d", 205 | "type": "github" 206 | }, 207 | "original": { 208 | "owner": "rust-lang", 209 | "ref": "nightly", 210 | "repo": "rust-analyzer", 211 | "type": "github" 212 | } 213 | } 214 | }, 215 | "root": "root", 216 | "version": 7 217 | } 218 | -------------------------------------------------------------------------------- /devenv.nix: -------------------------------------------------------------------------------- 1 | { inputs, pkgs, ... }: 2 | 3 | { 4 | devcontainer.enable = true; 5 | difftastic.enable = true; 6 | dotenv.enable = true; 7 | 8 | languages = { 9 | python = { 10 | enable = true; 11 | }; 12 | go.enable = true; 13 | go.package = pkgs.go_1_21; 14 | nix.enable = true; 15 | c.enable = true; 16 | cplusplus.enable = true; 17 | rust = { 18 | enable = true; 19 | targets = [ 20 | "wasm32-unknown-unknown" 21 | ]; 22 | # https://devenv.sh/reference/options/#languagesrustchannel 23 | channel = "stable"; 24 | components = [ 25 | "rustc" 26 | "cargo" 27 | "clippy" 28 | "rustfmt" 29 | "rust-src" 30 | ]; 31 | }; 32 | }; 33 | 34 | env.LIBCLANG_PATH = pkgs.lib.makeLibraryPath [ pkgs.llvmPackages_latest.libclang.lib ]; 35 | 36 | 37 | # https://devenv.sh/packages/ 38 | packages = with pkgs; [ 39 | git 40 | openssl 41 | rust-analyzer 42 | ninja 43 | protobuf 44 | just 45 | 46 | # bun without bugs 47 | (inputs.nixpkgs-working-bun.legacyPackages.${system}.bun) 48 | ]; 49 | 50 | enterShell = '' 51 | echo "Welcome to devshell! Printing info.." 52 | devenv info 53 | 54 | echo "Printing legacy just commands.." 55 | just 56 | ''; 57 | 58 | # https://devenv.sh/tests/ 59 | enterTest = '' 60 | echo "Running tests" 61 | 62 | # Near localnet 63 | wait_for_port 5888 64 | 65 | # Sidecar 66 | wait_for_port 3030 67 | 68 | 69 | test-rust 70 | test-eth 71 | ''; 72 | 73 | # https://devenv.sh/pre-commit-hooks/ 74 | pre-commit.hooks = { 75 | # execute example shell from Markdown files 76 | mdsh.enable = true; 77 | # format Python code 78 | black.enable = true; 79 | 80 | # shellcheck.enable = true; 81 | check-json.enable = true; 82 | check-toml.enable = true; 83 | check-yaml.enable = true; 84 | clippy.enable = true; 85 | detect-private-keys.enable = true; 86 | flake-checker.enable = true; 87 | gofmt.enable = true; 88 | # FIXME: Doesnt work because we setup sidecar etc gotest.enable = true; 89 | rustfmt.enable = true; 90 | cargo-check.enable = true; 91 | }; 92 | # https://devenv.sh/services/ 93 | # services.postgres.enable = true; 94 | 95 | # https://devenv.sh/processes/ 96 | 97 | scripts = { 98 | # The sidecar used to interact with a live network 99 | sidecar.exec = "RUST_LOG=debug cargo run --bin near-da-sidecar -- -c http-config.json"; 100 | 101 | # Test rust workspace 102 | test-rust.exec = "cargo test --workspace --all-features"; 103 | 104 | # Test near da contract on eth 105 | test-eth.exec = '' 106 | cd eth 107 | bun install 108 | bun run lint 109 | forge build --sizes 110 | forge config 111 | forge test --gas-report 112 | ''; 113 | 114 | # Generate a changelog 115 | changelog.exec = "git-cliff > CHANGELOG.md"; 116 | 117 | # Enrich JSON file with environment variable values 118 | enrich.exec = ''scripts/enrich.sh http-config.template.json http-config.json''; 119 | }; 120 | processes = { 121 | set-key.exec = "docker compose up near-localnet-set-key"; 122 | localnet.exec = "docker compose up --build near-localnet"; 123 | sidecar.exec = "RUST_LOG=debug cargo run --bin near-da-sidecar -- -c test/http-sidecar.json"; 124 | }; 125 | # See full reference at https://devenv.sh/reference/options/ 126 | } 127 | -------------------------------------------------------------------------------- /devenv.yaml: -------------------------------------------------------------------------------- 1 | inputs: 2 | nixpkgs: 3 | url: github:cachix/devenv-nixpkgs/rolling 4 | nixpkgs-python: 5 | url: github:cachix/nixpkgs-python 6 | inputs: 7 | nixpkgs: 8 | follows: nixpkgs 9 | nixpkgs-working-bun: 10 | url: github:nixos/nixpkgs/9e58aca561e18f5197029926db8dbde1738a2ff5 11 | fenix: 12 | url: github:nix-community/fenix?rev=d402ae4a5e5676722290470f61a5e8e3155b5487 13 | inputs: 14 | nixpkgs: 15 | follows: nixpkgs 16 | allowUnfree: true 17 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | 2 | networks: 3 | default: 4 | name: nearda 5 | 6 | services: 7 | near-da-sidecar: 8 | container_name: near-da-sidecar 9 | image: ghcr.io/nuffle-labs/data-availability/sidecar:dev 10 | # build: 11 | # context: . 12 | # dockerfile: bin/http-api/Dockerfile 13 | restart: unless-stopped 14 | depends_on: 15 | - near-localnet-set-key 16 | environment: 17 | - RUST_LOG=debug 18 | volumes: 19 | - ./test/http-sidecar.json:/app/config.json 20 | command: 21 | - -c 22 | - /app/config.json 23 | ports: 24 | - 5888:5888 25 | 26 | near-localnet-set-key: 27 | container_name: near-localnet-set-key 28 | build: 29 | context: ./test 30 | dockerfile: sandbox.Dockerfile 31 | depends_on: 32 | near-localnet: 33 | condition: service_healthy 34 | volumes: 35 | - near-sandbox-data:/root/.near 36 | - ./test/http-sidecar.json:/config.json 37 | entrypoint: 38 | - bash 39 | - -c 40 | - sed "s/HTTP_API_TEST_SECRET_KEY/`cat /root/.near/validator_key.json | jq -r '.secret_key'`/g" /config.json > /config2.json && cp /config2.json /config.json 41 | 42 | near-localnet: 43 | container_name: near-localnet 44 | build: 45 | context: ./test 46 | dockerfile: sandbox.Dockerfile 47 | healthcheck: 48 | test: curl --fail http://localhost:3030/health || exit 1 49 | interval: 30s 50 | retries: 5 51 | start_period: 10s 52 | timeout: 5s 53 | volumes: 54 | - near-sandbox-data:/root/.near 55 | ports: 56 | - 3030:3030 57 | 58 | volumes: 59 | near-sandbox-data: 60 | 61 | -------------------------------------------------------------------------------- /docs/OP-Alt-DA.md: -------------------------------------------------------------------------------- 1 | # How to use OP Alt-DA with NEAR DA 2 | 3 | ## Introduction 4 | 5 | This is a guide on how to run NEAR DA with OP Alt-DA mode. The instructions will cover localnet and testnet. 6 | 7 | ### Setup DA Server 8 | 9 | Follow the instructions in [Getting Started](https://github.com/Nuffle-Labs/data-availability/blob/main/README.md#getting-started) section of the main respository. 10 | 11 | The main two things to look out for when configuring the NEAR DA Server are: 12 | 13 | While running `docker compose`, `http-sidecar.json` should be set correctly depending on the environment that you would like to run the NEAR DA sidecar for. 14 | 15 | For localnet: 16 | 17 | ``` json 18 | { 19 | "account_id": "test.near", 20 | "secret_key": "ed25519:4dagBsEqCv3Ao5wa4KKFa57xNAH4wuBjh9wdTNYeCqDSeA9zE7fCnHSvWpU8t68jUpcCGqgfYwcH68suPaqmdcgm", 21 | "contract_id": "test.near", 22 | "network": "http://near-localnet:3030", 23 | "namespace": null, 24 | "should_cache": false 25 | } 26 | 27 | ``` 28 | 29 | For testing localnet setup with NEAR DA sidecar, run `docker-compose up` in the root of the repository. 3 containers including `near-localnet`, `near-localnet-set-key` and `near-da-sidecar` will run. 30 | 31 | For testnet: 32 | 33 | ``` json 34 | { 35 | "account_id": "", 36 | "secret_key": "ed25519:4dagBsEqCv3Ao5wa4KKFa57xNAH4wuBjh9wdTNYeCqDSeA9zE7fCnHSvWpU8t68jUpcCGqgfYwcH68suPaqmdcgm", 37 | "contract_id": "", 38 | "network": "https://rpc.testnet.near.org", 39 | "namespace": null, 40 | "should_cache": false 41 | } 42 | 43 | ``` 44 | 45 | ### Configure OP Node 46 | 47 | Follow the instructions [in OP Create L2 Rollup](https://docs.optimism.io/builders/chain-operators/tutorials/create-l2-rollup) to set up your OP Node. 48 | 49 | Check the ports in `docker-compose` file for the DA server 50 | ``` yml 51 | services: 52 | near-da-sidecar: 53 | container_name: near-da-sidecar 54 | image: ghcr.io/nuffle-labs/data-availability/sidecar:dev 55 | # build: 56 | # context: . 57 | # dockerfile: bin/http-api/Dockerfile 58 | restart: unless-stopped 59 | depends_on: 60 | - near-localnet-set-key 61 | environment: 62 | - RUST_LOG=debug 63 | volumes: 64 | - ./test/http-sidecar.json:/app/config.json 65 | command: 66 | - -c 67 | - /app/config.json 68 | ports: 69 | - 5888:5888 70 | ``` 71 | 72 | Set --altda.enabled=true and point both op-batcher and op-node to the DA server. 73 | No configuration changes are required for op-geth or op-proposer. 74 | Alt-DA (EXPERIMENTAL) 75 | 76 | --altda.da-server value ($OP_NODE_ALTDA_DA_SERVER) 77 | HTTP address of a DA Server 78 | 79 | --altda.enabled (default: false) ($OP_NODE_ALTDA_ENABLED) 80 | Enable Alt-DA mode 81 | 82 | --altda.verify-on-read (default: true) ($OP_NODE_ALTDA_VERIFY_ON_READ) 83 | Verify input data matches the commitments from the DA storage service 84 | 85 | Configure Your Batcher 86 | Set --altda.enabled=true and --altda.da-service=true. 87 | Provide the URL for --atlda.da-server=$DA_SERVER_HTTP_URL. 88 | 89 | ``` 90 | --altda.da-server value ($OP_BATCHER_ALTDA_DA_SERVER) 91 | HTTP address of a DA Server 92 | ``` 93 | --altda.da-service (default: false) ($OP_BATCHER_ALTDA_DA_SERVICE) 94 | Use DA service type where commitments are generated by the DA server 95 | 96 | --altda.enabled (default: false) ($OP_BATCHER_ALTDA_ENABLED) 97 | Enable Alt-DA mode 98 | 99 | --altda.verify-on-read (default: true) ($OP_BATCHER_ALTDA_VERIFY_ON_READ) 100 | Verify input data matches the commitments from the DA storage service 101 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # NEAR Data Availability architecture docs 2 | 3 | For our architecture docs, we make use of [mermaid](https://mermaid.js.org/intro/). 4 | It's a simple and easy to use architecture-as-code software, with an online editor and various plugins. 5 | Gihub also supports this out of the box within mermaid code blocks. 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/da_rpc_client.md: -------------------------------------------------------------------------------- 1 | # DA RPC Client 2 | 3 | The below diagrams outline how a rollup will interact with DA depending on their architecture. 4 | 5 | 6 | ## Rust 7 | 8 | ```mermaid 9 | classDiagram 10 | class DaRpcClient 11 | class Blob { 12 | +Namespace namespace 13 | +bytes32 commitment 14 | +bytes data 15 | } 16 | class Namespace { 17 | +u8 version 18 | +u32 id 19 | } 20 | class FrameRef { 21 | +bytes32 tx_id 22 | +bytes32 commitment 23 | } 24 | 25 | class DaRpc { 26 | <> 27 | +submit(List~Blob~) FrameRef 28 | +get(tx_id) 29 | } 30 | 31 | DaRpc <|-- DaRpcClient : implements 32 | DaRpc >-- Rollup : submit blobs 33 | DaRpc >-- Rollup : get blobs 34 | 35 | class L1 { 36 | postCommitment() 37 | verifySequence() 38 | } 39 | L1 >-- Rollup : post frameRef with commitments 40 | ``` 41 | 42 | ## Golang, or anything CFFI compatible 43 | 44 | This diagram outlines how rollups written in golang would interact with the go rpc client. 45 | 46 | ```mermaid 47 | classDiagram 48 | class Blob{ 49 | +Namespace namespace 50 | +bytes32 commitment 51 | +bytes data 52 | } 53 | 54 | class Namespace { 55 | +u8 version 56 | +u32 id 57 | } 58 | 59 | class FrameRef { 60 | +bytes32 tx_id 61 | +bytes32 commitment 62 | } 63 | 64 | class DaRpcClient 65 | 66 | class DaRpc{ 67 | <> 68 | +submit(List~Blob~) FrameRef 69 | +get(tx_id) 70 | } 71 | 72 | class DaRpcSys{ 73 | +new_client(account, sk, contract, network, namespace) 74 | +submit(*client, blobs) frame 75 | +get(*client, tx_id) 76 | } 77 | 78 | class DaRpcGo { 79 | +newConfig(account, contract, key, namespaceId) Config 80 | +submit(*Config, candidate, data) FrameRef 81 | +force_submit(*Config, data) FrameRef 82 | +get(*Config, FrameRef frameRef, txIndex) 83 | } 84 | 85 | DaRpc <|-- DaRpcClient : implements 86 | DaRpc >-- DaRpcSys : uses 87 | DaRpcSys >-- DaRpcGo : uses 88 | 89 | DaRpcGo >-- GoRollup : submit blobs 90 | DaRpcGo >-- GoRollup : get blobs 91 | 92 | class L1 { 93 | postCommitment() 94 | verifySequence() 95 | } 96 | L1 >-- GoRollup : post frameRef with commitments 97 | ``` 98 | 99 | -------------------------------------------------------------------------------- /docs/optimisim_containers.md: -------------------------------------------------------------------------------- 1 | # Container diagram for optimism 2 | 3 | ```mermaid 4 | C4Container 5 | title NEAR Data Availability System Containers for Optimism 6 | 7 | Enterprise_Boundary(b3, "NEAR") { 8 | System_Ext(SystemNear, "NEAR") 9 | } 10 | 11 | Enterprise_Boundary(b1, "Ethereum") { 12 | Component(L2Output, "L2 Output Oracle") 13 | } 14 | 15 | 16 | Container_Boundary(b2, "Rollup") { 17 | Component(DaClient, "NEAR DA Client", "Submits/Gets blob data, creates commitments") 18 | 19 | Container(Proposer, "Proposer", "Propose L2 outputs and DA commitments") 20 | Container(Batcher, "Batcher", "Create frame channels and send batches") 21 | Container(Sequencer, "Sequencer", "Derives blocks, execute transactions") 22 | 23 | } 24 | 25 | Rel_U(DaClient, SystemNear, "Submit/Get blob data") 26 | Rel(Batcher, DaClient, "Post batches") 27 | Rel(Sequencer, DaClient, "Retrieve Blobs") 28 | BiRel(Batcher, Sequencer, "Write FrameRef") 29 | 30 | Rel(Proposer, Sequencer, "Reads L2 outputs and FrameRef") 31 | Rel_D(Proposer, L2Output, "FrameRef") 32 | 33 | UpdateLayoutConfig($c4ShapeInRow="2", $c4BoundaryInRow="2") 34 | 35 | System_Ext(FraudProofs, "Fraud proving mechanism") 36 | ``` 37 | -------------------------------------------------------------------------------- /docs/system_context.md: -------------------------------------------------------------------------------- 1 | # System context 2 | 3 | This outlines the system components that we build and how it interacts with external components. 4 | 5 | Red lines denote external flow of commitments. 6 | White lines denote flow of blob data. 7 | 8 | 9 | Note: "fisherman" is just an example how a rollup can work with the light client in the initial stage of DA, until we implement a more non-interactive approach, such as KZG. 10 | 11 | ```mermaid 12 | C4Context 13 | title NEAR Data Availability System Context 14 | 15 | Enterprise_Boundary(b1, "Ethereum") { 16 | System_Ext(SystemEth, "Ethereum") 17 | 18 | System_Boundary(b2, "Rollup") { 19 | System_Ext(SystemRollup, "Rollup", "Derives blocks, execute transactions, posts commitments & sequence data") 20 | System(SystemNearDa, "NEAR DA Client", "Submits/Gets blob data, creates commitments") 21 | } 22 | BiRel(SystemRollup, SystemEth, "Posts sequences, proofs of execution, DA frame references") 23 | BiRel(SystemRollup, SystemNearDa, "Post batches, retrieves commitments") 24 | Rel(fisherman, SystemEth, "Looks for commitments, posts results") 25 | } 26 | 27 | Enterprise_Boundary(b0, "NEAR") { 28 | 29 | System(SystemLc, "Light Client", "Syncs headers, provides inclusion proofs") 30 | System(SystemNear, "NEAR Protocol", "NEAR validators, archival nodes") 31 | 32 | Rel(SystemLc, SystemNear, "Syncs headers") 33 | Rel(SystemNearDa, SystemNear, "Submits/Gets blob") 34 | 35 | %% This doesn't exist yet 36 | %% System(SystemDas, "Data Availability Sampling", "Data redundancy, retrieval, sample responses") 37 | %% BiRel(SystemDas, SystemLc, "Commitments") 38 | } 39 | 40 | Person_Ext(fisherman, "Fisherman") 41 | Rel(fisherman, SystemLc, "Requests inclusion proofs, validates inclusion proofs") 42 | 43 | 44 | UpdateRelStyle(fisherman, SystemEth, $offsetY="-10" $lineColor="red") 45 | UpdateRelStyle(fisherman, SystemLc, $offsetY="-10", $lineColor="red") 46 | UpdateRelStyle(SystemRollup, SystemEth, $offsetY="-30", $lineColor="white") 47 | UpdateElementStyle(fisherman, $bgColor="grey", $borderColor="red") 48 | 49 | UpdateRelStyle(SystemRollup, SystemNearDa, $offsetX="-200", $lineColor="white", $textColor="white") 50 | UpdateRelStyle(SystemNearDa, SystemNear, $textColor="white", $lineColor="white", $offsetY="10") 51 | UpdateRelStyle(SystemNearLc, SystemNear, $offsetX="30") 52 | ``` 53 | -------------------------------------------------------------------------------- /docs/test.md: -------------------------------------------------------------------------------- 1 | ```mermaid 2 | classDiagram 3 | Class01 <|-- AveryLongClass : Cool 4 | Class03 *-- Class04 5 | Class05 o-- Class06 6 | Class07 .. Class08 7 | Class09 --> C2 : Where am i? 8 | Class09 --* C3 9 | Class09 --|> Class07 10 | Class07 : equals() 11 | Class07 : Object[] elementData 12 | Class01 : size() 13 | Class01 : int chimp 14 | Class01 : int gorilla 15 | Class08 <--> C2: Cool label 16 | ``` 17 | -------------------------------------------------------------------------------- /eth/.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig http://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | # All files 7 | [*] 8 | charset = utf-8 9 | end_of_line = lf 10 | indent_size = 2 11 | indent_style = space 12 | insert_final_newline = true 13 | trim_trailing_whitespace = true 14 | 15 | [*.sol] 16 | indent_size = 4 17 | 18 | [*.tree] 19 | indent_size = 1 20 | -------------------------------------------------------------------------------- /eth/.env.example: -------------------------------------------------------------------------------- 1 | export API_KEY_ALCHEMY="YOUR_API_KEY_ALCHEMY" 2 | export API_KEY_ARBISCAN="YOUR_API_KEY_ARBISCAN" 3 | export API_KEY_BSCSCAN="YOUR_API_KEY_BSCSCAN" 4 | export API_KEY_ETHERSCAN="YOUR_API_KEY_ETHERSCAN" 5 | export API_KEY_GNOSISSCAN="YOUR_API_KEY_GNOSISSCAN" 6 | export API_KEY_INFURA="YOUR_API_KEY_INFURA" 7 | export API_KEY_OPTIMISTIC_ETHERSCAN="YOUR_API_KEY_OPTIMISTIC_ETHERSCAN" 8 | export API_KEY_POLYGONSCAN="YOUR_API_KEY_POLYGONSCAN" 9 | export API_KEY_SNOWTRACE="YOUR_API_KEY_SNOWTRACE" 10 | export MNEMONIC="YOUR_MNEMONIC" 11 | export FOUNDRY_PROFILE="default" 12 | -------------------------------------------------------------------------------- /eth/.gitignore: -------------------------------------------------------------------------------- 1 | # directories 2 | cache 3 | coverage 4 | node_modules 5 | out 6 | 7 | # files 8 | *.env 9 | *.log 10 | .DS_Store 11 | .pnp.* 12 | lcov.info 13 | package-lock.json 14 | pnpm-lock.yaml 15 | yarn.lock 16 | 17 | # broadcasts 18 | !broadcast 19 | broadcast/* 20 | broadcast/*/31337/ 21 | -------------------------------------------------------------------------------- /eth/.prettierignore: -------------------------------------------------------------------------------- 1 | # directories 2 | broadcast 3 | cache 4 | coverage 5 | node_modules 6 | out 7 | 8 | # files 9 | *.env 10 | *.log 11 | .DS_Store 12 | .pnp.* 13 | bun.lockb 14 | lcov.info 15 | package-lock.json 16 | pnpm-lock.yaml 17 | yarn.lock 18 | -------------------------------------------------------------------------------- /eth/.prettierrc.yml: -------------------------------------------------------------------------------- 1 | bracketSpacing: true 2 | printWidth: 120 3 | proseWrap: "always" 4 | singleQuote: false 5 | tabWidth: 2 6 | trailingComma: "all" 7 | useTabs: false 8 | -------------------------------------------------------------------------------- /eth/.solhint.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "solhint:recommended", 3 | "rules": { 4 | "code-complexity": ["error", 8], 5 | "compiler-version": ["error", ">=0.8.25"], 6 | "func-name-mixedcase": "off", 7 | "func-visibility": ["error", { "ignoreConstructors": true }], 8 | "max-line-length": ["error", 120], 9 | "named-parameters-mapping": "warn", 10 | "no-console": "off", 11 | "not-rely-on-time": "off", 12 | "one-contract-per-file": "off" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /eth/README.md: -------------------------------------------------------------------------------- 1 | If this is your first time with Foundry, check out the 2 | [installation](https://github.com/foundry-rs/foundry#installation) instructions. 3 | 4 | ## Features 5 | 6 | This template builds upon the frameworks and libraries mentioned above, so please consult their respective documentation 7 | for details about their specific features. 8 | 9 | For example, if you're interested in exploring Foundry in more detail, you should look at the 10 | [Foundry Book](https://book.getfoundry.sh/). In particular, you may be interested in reading the 11 | [Writing Tests](https://book.getfoundry.sh/forge/writing-tests.html) tutorial. 12 | 13 | ### Sensible Defaults 14 | 15 | This template comes with a set of sensible default configurations for you to use. These defaults can be found in the 16 | following files: 17 | 18 | ```text 19 | ├── .editorconfig 20 | ├── .gitignore 21 | ├── .prettierignore 22 | ├── .prettierrc.yml 23 | ├── .solhint.json 24 | ├── foundry.toml 25 | └── remappings.txt 26 | ``` 27 | 28 | ### VSCode Integration 29 | 30 | This template is IDE agnostic, but for the best user experience, you may want to use it in VSCode alongside Nomic 31 | Foundation's [Solidity extension](https://marketplace.visualstudio.com/items?itemName=NomicFoundation.hardhat-solidity). 32 | 33 | For guidance on how to integrate a Foundry project in VSCode, please refer to this 34 | [guide](https://book.getfoundry.sh/config/vscode). 35 | 36 | ### GitHub Actions 37 | 38 | This template comes with GitHub Actions pre-configured. Your contracts will be linted and tested on every push and pull 39 | request made to the `main` branch. 40 | 41 | You can edit the CI script in [.github/workflows/ci.yml](./.github/workflows/ci.yml). 42 | 43 | ## Installing Dependencies 44 | 45 | Foundry typically uses git submodules to manage dependencies, but this template uses Node.js packages because 46 | [submodules don't scale](https://twitter.com/PaulRBerg/status/1736695487057531328). 47 | 48 | This is how to install dependencies: 49 | 50 | 1. Install the dependency using your preferred package manager, e.g. `bun install dependency-name` 51 | - Use this syntax to install from GitHub: `bun install github:username/repo-name` 52 | 2. Add a remapping for the dependency in [remappings.txt](./remappings.txt), e.g. 53 | `dependency-name=node_modules/dependency-name` 54 | 55 | Note that OpenZeppelin Contracts is pre-installed, so you can follow that as an example. 56 | 57 | ## Writing Tests 58 | 59 | To write a new test contract, you start by importing [PRBTest](https://github.com/PaulRBerg/prb-test) and inherit from 60 | it in your test contract. PRBTest comes with a pre-instantiated [cheatcodes](https://book.getfoundry.sh/cheatcodes/) 61 | environment accessible via the `vm` property. If you would like to view the logs in the terminal output you can add the 62 | `-vvv` flag and use [console.log](https://book.getfoundry.sh/faq?highlight=console.log#how-do-i-use-consolelog). 63 | 64 | This template comes with an example test contract [Foo.t.sol](./test/Foo.t.sol) 65 | 66 | ## Usage 67 | 68 | This is a list of the most frequently needed commands. 69 | 70 | ### Build 71 | 72 | Build the contracts: 73 | 74 | ```sh 75 | $ forge build 76 | ``` 77 | 78 | ### Clean 79 | 80 | Delete the build artifacts and cache directories: 81 | 82 | ```sh 83 | $ forge clean 84 | ``` 85 | 86 | ### Compile 87 | 88 | Compile the contracts: 89 | 90 | ```sh 91 | $ forge build 92 | ``` 93 | 94 | ### Coverage 95 | 96 | Get a test coverage report: 97 | 98 | ```sh 99 | $ forge coverage 100 | ``` 101 | 102 | ### Deploy 103 | 104 | Deploy to Anvil: 105 | 106 | ```sh 107 | $ forge script script/Deploy.s.sol --broadcast --fork-url http://localhost:8545 108 | ``` 109 | 110 | For this script to work, you need to have a `MNEMONIC` environment variable set to a valid 111 | [BIP39 mnemonic](https://iancoleman.io/bip39/). 112 | 113 | For instructions on how to deploy to a testnet or mainnet, check out the 114 | [Solidity Scripting](https://book.getfoundry.sh/tutorials/solidity-scripting.html) tutorial. 115 | 116 | ### Format 117 | 118 | Format the contracts: 119 | 120 | ```sh 121 | $ forge fmt 122 | ``` 123 | 124 | ### Gas Usage 125 | 126 | Get a gas report: 127 | 128 | ```sh 129 | $ forge test --gas-report 130 | ``` 131 | 132 | ### Lint 133 | 134 | Lint the contracts: 135 | 136 | ```sh 137 | $ bun run lint 138 | ``` 139 | 140 | ### Test 141 | 142 | Run the tests: 143 | 144 | ```sh 145 | $ forge test 146 | ``` 147 | 148 | Generate test coverage and output result to the terminal: 149 | 150 | ```sh 151 | $ bun run test:coverage 152 | ``` 153 | 154 | Generate test coverage with lcov report (you'll have to open the `./coverage/index.html` file in your browser, to do so 155 | simply copy paste the path): 156 | 157 | ```sh 158 | $ bun run test:coverage:report 159 | ``` 160 | 161 | ## Related Efforts 162 | 163 | - [abigger87/femplate](https://github.com/abigger87/femplate) 164 | - [cleanunicorn/ethereum-smartcontract-template](https://github.com/cleanunicorn/ethereum-smartcontract-template) 165 | - [foundry-rs/forge-template](https://github.com/foundry-rs/forge-template) 166 | - [FrankieIsLost/forge-template](https://github.com/FrankieIsLost/forge-template) 167 | 168 | ## License 169 | 170 | This project is licensed under MIT. 171 | -------------------------------------------------------------------------------- /eth/bun.lockb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Nuffle-Labs/data-availability/28b6b80099a4be5b66959c0903b730451cfc314a/eth/bun.lockb -------------------------------------------------------------------------------- /eth/foundry.toml: -------------------------------------------------------------------------------- 1 | # Full reference https://github.com/foundry-rs/foundry/tree/master/crates/config 2 | 3 | [profile.default] 4 | auto_detect_solc = false 5 | block_timestamp = 1_680_220_800 # March 31, 2023 at 00:00 GMT 6 | bytecode_hash = "none" 7 | evm_version = "paris" # See https://www.evmdiff.com/features?name=PUSH0&kind=opcode 8 | fuzz = { runs = 1_000 } 9 | gas_reports = [ "*" ] 10 | libs = [ "node_modules", "lib" ] 11 | optimizer = true 12 | optimizer_runs = 10_000 13 | out = "out" 14 | script = "script" 15 | solc = "0.8.25" 16 | src = "src" 17 | test = "test" 18 | 19 | [profile.ci] 20 | fuzz = { runs = 10_000 } 21 | verbosity = 4 22 | 23 | # [etherscan] 24 | # arbitrum = { key = "${API_KEY_ARBISCAN}" } 25 | # avalanche = { key = "${API_KEY_SNOWTRACE}" } 26 | # bnb_smart_chain = { key = "${API_KEY_BSCSCAN}" } 27 | # gnosis_chain = { key = "${API_KEY_GNOSISSCAN}" } 28 | # goerli = { key = "${API_KEY_ETHERSCAN}" } 29 | # mainnet = { key = "${API_KEY_ETHERSCAN}" } 30 | # optimism = { key = "${API_KEY_OPTIMISTIC_ETHERSCAN}" } 31 | # polygon = { key = "${API_KEY_POLYGONSCAN}" } 32 | # sepolia = { key = "${API_KEY_ETHERSCAN}" } 33 | 34 | [fmt] 35 | bracket_spacing = true 36 | int_types = "long" 37 | line_length = 120 38 | multiline_func_header = "all" 39 | number_underscore = "thousands" 40 | quote_style = "double" 41 | tab_width = 4 42 | wrap_comments = true 43 | 44 | [rpc_endpoints] 45 | arbitrum = "https://arbitrum-mainnet.infura.io/v3/${API_KEY_INFURA}" 46 | avalanche = "https://avalanche-mainnet.infura.io/v3/${API_KEY_INFURA}" 47 | bnb_smart_chain = "https://bsc-dataseed.binance.org" 48 | gnosis_chain = "https://rpc.gnosischain.com" 49 | goerli = "https://goerli.infura.io/v3/${API_KEY_INFURA}" 50 | local = "http://localhost:8545" 51 | locall2 = "http://localhost:8125" 52 | mainnet = "https://eth-mainnet.g.alchemy.com/v2/${API_KEY_ALCHEMY}" 53 | optimism = "https://optimism-mainnet.infura.io/v3/${API_KEY_INFURA}" 54 | polygon = "https://polygon-mainnet.infura.io/v3/${API_KEY_INFURA}" 55 | sepolia = "https://sepolia.infura.io/v3/${API_KEY_INFURA}" 56 | -------------------------------------------------------------------------------- /eth/justfile: -------------------------------------------------------------------------------- 1 | # Change this if you want to use it 2 | CDK_PATH := ~/projects/data-availability/cdk/cdk-validium-node 3 | 4 | cdk-local: 5 | NEWCONTRACT=forge script Deploy --fork-url local --broadcast --legacy --json | jq -R 'fromjson?' | jq -r '.returns.da.value' 6 | docker exec zkevm-sequence-sender /app/zkevm-node set-dap --da-addr $NEWCONTRACT --network custom --custom-network-file /app/genesis.json --key-store-path /pk/sequencer.keystore --pw testonly --cfg /app/config.toml 7 | gen-cdk: 8 | forge inspect NearDataAvailability abi > {{CDK_PATH}}/etherman/smartcontracts/abi/neardataavailability.abi 9 | forge inspect NearDataAvailability bytecode > {{CDK_PATH}}/etherman/smartcontracts/bin/neardataavailabil 10 | -------------------------------------------------------------------------------- /eth/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@near/rollup-data-availability", 3 | "description": "", 4 | "version": "1.0.0", 5 | "author": { 6 | "name": "dndll", 7 | "url": "https://github.com/dndll" 8 | }, 9 | "dependencies": { 10 | "@0xpolygonhermez/zkevm-contracts": "github.com:0xPolygonHermez/zkevm-contracts", 11 | "@openzeppelin/contracts": "^5.0.2", 12 | "@openzeppelin/contracts-upgradeable": "^5.0.1", 13 | "solady": "^0.0.191" 14 | }, 15 | "devDependencies": { 16 | "@prb/test": "^0.6.4", 17 | "forge-std": "github:foundry-rs/forge-std#v1.7.5", 18 | "prettier": "^3.0.0", 19 | "solhint": "^3.6.2" 20 | }, 21 | "keywords": [ 22 | "blockchain", 23 | "ethereum", 24 | "forge", 25 | "foundry", 26 | "smart-contracts", 27 | "solidity", 28 | "template" 29 | ], 30 | "private": true, 31 | "scripts": { 32 | "clean": "rm -rf cache out", 33 | "build": "forge build", 34 | "lint": "bun run lint:sol && bun run prettier:check", 35 | "lint:sol": "forge fmt --check && bun solhint {script,src,test}/**/*.sol", 36 | "fix": "forge fmt . && bun run lint:sol --fix && bun run prettier:write", 37 | "prettier:check": "prettier --check \"**/*.{json,md,yml}\" --ignore-path \".prettierignore\"", 38 | "prettier:write": "prettier --write \"**/*.{json,md,yml}\" --ignore-path \".prettierignore\"", 39 | "test": "forge test", 40 | "test:coverage": "forge coverage", 41 | "test:coverage:report": "forge coverage --report lcov && genhtml lcov.info --branch-coverage --output-dir coverage" 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /eth/remappings.txt: -------------------------------------------------------------------------------- 1 | @openzeppelin/contracts-upgradeable/=node_modules/@openzeppelin/contracts-upgradeable/ 2 | @openzeppelin/contracts/=node_modules/@openzeppelin/contracts/ 3 | @prb/test/=node_modules/@prb/test/ 4 | forge-std/=node_modules/forge-std/ 5 | @polygon/zkevm-contracts/=node_modules/@0xpolygonhermez/zkevm-contracts/contracts/v2 6 | @solady/=node_modules/solady/src/ 7 | -------------------------------------------------------------------------------- /eth/script/Base.s.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity >=0.8.25 <0.9.0; 3 | 4 | import { Script } from "forge-std/src/Script.sol"; 5 | 6 | abstract contract BaseScript is Script { 7 | /// @dev Included to enable compilation of the script without a $MNEMONIC environment variable. 8 | string internal constant TEST_MNEMONIC = "test test test test test test test test test test test junk"; 9 | 10 | /// @dev Needed for the deterministic deployments. 11 | bytes32 internal constant ZERO_SALT = bytes32(0); 12 | 13 | /// @dev The address of the transaction broadcaster. 14 | address internal broadcaster; 15 | 16 | /// @dev Used to derive the broadcaster's address if $ETH_FROM is not defined. 17 | string internal mnemonic; 18 | 19 | /// @dev Initializes the transaction broadcaster like this: 20 | /// 21 | /// - If $ETH_FROM is defined, use it. 22 | /// - Otherwise, derive the broadcaster address from $MNEMONIC. 23 | /// - If $MNEMONIC is not defined, default to a test mnemonic. 24 | /// 25 | /// The use case for $ETH_FROM is to specify the broadcaster key and its address via the command line. 26 | constructor() { 27 | address from = vm.envOr({ name: "ETH_FROM", defaultValue: address(0) }); 28 | if (from != address(0)) { 29 | broadcaster = from; 30 | } else { 31 | mnemonic = vm.envOr({ name: "MNEMONIC", defaultValue: TEST_MNEMONIC }); 32 | (broadcaster,) = deriveRememberKey({ mnemonic: mnemonic, index: 0 }); 33 | } 34 | } 35 | 36 | modifier broadcast() { 37 | vm.startBroadcast(broadcaster); 38 | _; 39 | vm.stopBroadcast(); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /eth/script/Deploy.s.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity >=0.8.25 <0.9.0; 3 | 4 | import { NearDataAvailability } from "../src/NearDataAvailability.sol"; 5 | import { BaseScript } from "./Base.s.sol"; 6 | 7 | /// @dev See the Solidity Scripting tutorial: https://book.getfoundry.sh/tutorials/solidity-scripting 8 | contract Deploy is BaseScript { 9 | function run() public broadcast returns (NearDataAvailability da) { 10 | da = new NearDataAvailability(); 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /eth/src/NearDataAvailability.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: AGPL-3.0 2 | pragma solidity >=0.8.25; 3 | 4 | import { IDataAvailabilityProtocol } from "@polygon/zkevm-contracts/interfaces/IDataAvailabilityProtocol.sol"; 5 | import { OwnableRoles } from "@solady/auth/OwnableRoles.sol"; 6 | // import { Initializable } from "@solady/utils/Initializable.sol"; 7 | 8 | /** 9 | * @dev Struct to store the data availability batch, transaction verification on ethereum and transaction submission on 10 | * NEAR 11 | * 12 | */ 13 | struct VerifiedBatch { 14 | bytes32 id; 15 | bytes32 verifyTxHash; 16 | bytes32 submitTxId; 17 | } 18 | 19 | /* 20 | * Contract responsible for storing the lookup information for the status of each NEARDA batch 21 | * It is heavily modeled after the requirements from polygon CDK 22 | */ 23 | contract NearDataAvailability is IDataAvailabilityProtocol, OwnableRoles { 24 | // Name of the data availability protocol 25 | string internal constant _PROTOCOL_NAME = "NearProtocol"; 26 | 27 | // The amount of batches that we track is available 28 | // note, they are still available via archival and indexers, just not actively tracked 29 | // in the contract. 30 | uint256 public constant _STORED_BATCH_AMT = 32; 31 | 32 | // The number of transactions we actively track awaiting verification 33 | // this is useful for users who want some immediate notfification on-chain. 34 | uint256 public constant _SUBMITTED_BATCH_AMT = 128; 35 | 36 | // The role allows a client to notify the contract that a batch has been submitted for 37 | // verification. 38 | uint256 public constant _NOTIFIER = _ROLE_10; 39 | 40 | // The role enables providing verified batches to the contract, this would normally 41 | // be the light client. 42 | uint256 public constant _VERIFIER = _ROLE_11; 43 | 44 | // @dev The batches that have been made available, keyed by bucket id 45 | // @notice this dusts the earliest batch 46 | VerifiedBatch[_STORED_BATCH_AMT] public batchInfo; 47 | uint256 private _verifyBucketIdx; 48 | 49 | /** 50 | * @dev Batches that have been submitted and are awaiting proofs 51 | * @notice this dusts the earliest batch 52 | * @notice this is very inefficient, we are going to modify the way the light client proves batches 53 | * to better utilise the generators in the light client 54 | */ 55 | bytes32[_SUBMITTED_BATCH_AMT] public submittedBatches; 56 | uint256 private _submitBucketIdx; 57 | 58 | // @dev Bypass the verification logic for light client proofs 59 | bool private _bypass = true; 60 | 61 | /** 62 | * @dev Emitted when the DA batch is made available, used to determine if the batch has been proven 63 | * @param batch Batch of data that has been verified 64 | * @param bucketIdx current index of the batch in the store 65 | */ 66 | event IsAvailable(uint256 bucketIdx, VerifiedBatch batch); 67 | 68 | /** 69 | * @dev Emitted when the batch has been submitted for verification 70 | * @param bucketIdx current index of the tx in the store 71 | * @param submitTxId transaction id of the submission on NEAR 72 | */ 73 | event Submitted(uint256 bucketIdx, bytes32 submitTxId); 74 | 75 | /** 76 | * @dev Emitted when the batch has not been verified 77 | * 78 | */ 79 | error InvalidBatch(); 80 | 81 | /// @custom:oz-upgrades-unsafe-allow constructor 82 | constructor() { 83 | // _disableInitializers(); 84 | _initializeOwner(msg.sender); 85 | } 86 | 87 | // function initialize(address initialOwner) public initializer { 88 | // } 89 | 90 | /** 91 | * @notice Verifies that the given signedHash has been signed by requiredAmountOfSignatures committee members 92 | * @param dataAvailabilityBatch the message to determine the batch 93 | * @notice For now we essentially always pass since we need to hook up the LC consumer 94 | */ 95 | function verifyMessage(bytes32, /*hash*/ bytes calldata dataAvailabilityBatch) external view { 96 | VerifiedBatch storage item; 97 | // TODO: will fail decoding since not chunked 98 | bytes32 batchId = abi.decode(dataAvailabilityBatch, (bytes32)); 99 | for (uint256 i = 0; i < batchInfo.length; i++) { 100 | item = batchInfo[i]; 101 | if (item.id == batchId) { 102 | return; 103 | } 104 | } 105 | // TODO: when optimise storage layout for NEAR LC, we reenable checking 106 | // ifsubmitted && sender is sequencer, return; 107 | if (!_bypass) { 108 | revert InvalidBatch(); 109 | } 110 | } 111 | 112 | function notifySubmitted(bytes calldata batches) external onlyRolesOrOwner(_NOTIFIER) { 113 | // chunk the batches into blobRefSizes 114 | uint256 blobRefSize = 32; 115 | uint256 numBatches = batches.length / blobRefSize; 116 | for (uint256 i = 0; i < numBatches; i++) { 117 | bytes32 txId = abi.decode(batches[i * blobRefSize:(i + 1) * blobRefSize], (bytes32)); 118 | uint256 bucketIdx = setSubmitted(txId); 119 | emit Submitted(bucketIdx, txId); 120 | } 121 | } 122 | 123 | function setSubmitted(bytes32 txId) private returns (uint256) { 124 | uint256 bucket = _submitBucketIdx; 125 | submittedBatches[bucket] = txId; 126 | 127 | // TODO[Optimisation]: replace with bitmask & assembly 128 | _submitBucketIdx = (bucket + 1) % _SUBMITTED_BATCH_AMT; 129 | return bucket; 130 | } 131 | 132 | function notifyAvailable(VerifiedBatch memory verifiedBatch) external onlyRolesOrOwner(_VERIFIER) { 133 | uint256 bucketIdx = setBatch(verifiedBatch); 134 | emit IsAvailable(bucketIdx, verifiedBatch); 135 | } 136 | 137 | function setBatch(VerifiedBatch memory verifiedBatch) private returns (uint256) { 138 | uint256 bucket = _verifyBucketIdx; 139 | batchInfo[bucket] = verifiedBatch; 140 | 141 | // TODO[Optimisation]: replace with bitmask & assembly 142 | _verifyBucketIdx = (bucket + 1) % _STORED_BATCH_AMT; 143 | return bucket; 144 | } 145 | 146 | /** 147 | * @notice Return the protocol name 148 | */ 149 | function getProcotolName() external pure override returns (string memory) { 150 | return _PROTOCOL_NAME; 151 | } 152 | 153 | /** 154 | * @notice Disable bypass 155 | * 156 | */ 157 | function switchBypass() external onlyOwner { 158 | _bypass = !_bypass; 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /eth/test/NearDataAvailability.t.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: UNLICENSED 2 | pragma solidity >=0.8.25 <0.9.0; 3 | 4 | import { PRBTest } from "@prb/test/src/PRBTest.sol"; 5 | import { StdCheats } from "forge-std/src/StdCheats.sol"; 6 | 7 | import { NearDataAvailability, VerifiedBatch } from "../src/NearDataAvailability.sol"; 8 | 9 | contract NearDataAvailabilityTest is PRBTest, StdCheats { 10 | NearDataAvailability public nearDataAvailability; 11 | address public owner; 12 | address public nonOwner; 13 | 14 | /// @dev A function invoked before each test case is run. 15 | function setUp() public virtual { 16 | // Instantiate the contract-under-test. 17 | nearDataAvailability = new NearDataAvailability(); 18 | owner = address(this); 19 | nonOwner = address(0x1); 20 | nearDataAvailability.switchBypass(); 21 | // nearDataAvailability.initialize(owner); 22 | } 23 | 24 | function testVerifyMessageExistingBatch() public { 25 | VerifiedBatch memory batch = VerifiedBatch(bytes32(uint256(1)), bytes32(uint256(2)), bytes32(uint256(3))); 26 | nearDataAvailability.notifyAvailable(batch); 27 | 28 | bytes memory encodedBatch = abi.encode(batch.id); 29 | nearDataAvailability.verifyMessage(bytes32(0), encodedBatch); 30 | } 31 | 32 | function testVerifyMessageNonExistingBatch() public { 33 | VerifiedBatch memory batch = VerifiedBatch(bytes32(uint256(1)), bytes32(uint256(2)), bytes32(uint256(3))); 34 | 35 | bytes memory encodedBatch = abi.encode(batch.id); 36 | vm.expectRevert(); 37 | nearDataAvailability.verifyMessage(bytes32(0), encodedBatch); 38 | } 39 | 40 | function testNotifyAvailable() public { 41 | VerifiedBatch memory batch = VerifiedBatch(bytes32(uint256(1)), bytes32(uint256(2)), bytes32(uint256(3))); 42 | vm.expectEmit(true, true, true, true); 43 | emit NearDataAvailability.IsAvailable(0, batch); 44 | nearDataAvailability.notifyAvailable(batch); 45 | } 46 | 47 | function testNotifyAvailableOverwritesBatch() public { 48 | VerifiedBatch memory batch1 = VerifiedBatch(bytes32(uint256(1)), bytes32(uint256(2)), bytes32(uint256(3))); 49 | nearDataAvailability.notifyAvailable(batch1); 50 | 51 | vm.expectEmit(true, true, true, true); 52 | VerifiedBatch memory batch2 = VerifiedBatch(bytes32(uint256(4)), bytes32(uint256(5)), bytes32(uint256(6))); 53 | emit NearDataAvailability.IsAvailable(1, batch2); 54 | nearDataAvailability.notifyAvailable(batch2); 55 | } 56 | 57 | function testGetProcotolName() public { 58 | assertEq(nearDataAvailability.getProcotolName(), "NearProtocol"); 59 | } 60 | 61 | function testNotifyAvailableLIFO() public { 62 | uint256 numBatches = 100; 63 | 64 | VerifiedBatch[] memory batches = new VerifiedBatch[](numBatches); 65 | 66 | for (uint256 i = 0; i < numBatches; i++) { 67 | VerifiedBatch memory batch = 68 | VerifiedBatch(bytes32(uint256(i + 1)), bytes32(uint256(i + 2)), bytes32(uint256(i + 3))); 69 | batches[i] = batch; 70 | vm.expectEmit(true, true, true, true); 71 | emit NearDataAvailability.IsAvailable((i % nearDataAvailability._STORED_BATCH_AMT()), batch); 72 | nearDataAvailability.notifyAvailable(batches[i]); 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1710146030, 9 | "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "flake-utils_2": { 22 | "inputs": { 23 | "systems": "systems_2" 24 | }, 25 | "locked": { 26 | "lastModified": 1705309234, 27 | "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", 28 | "owner": "numtide", 29 | "repo": "flake-utils", 30 | "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", 31 | "type": "github" 32 | }, 33 | "original": { 34 | "owner": "numtide", 35 | "repo": "flake-utils", 36 | "type": "github" 37 | } 38 | }, 39 | "nixpkgs": { 40 | "locked": { 41 | "lastModified": 1711703276, 42 | "narHash": "sha256-iMUFArF0WCatKK6RzfUJknjem0H9m4KgorO/p3Dopkk=", 43 | "owner": "nixos", 44 | "repo": "nixpkgs", 45 | "rev": "d8fe5e6c92d0d190646fb9f1056741a229980089", 46 | "type": "github" 47 | }, 48 | "original": { 49 | "owner": "nixos", 50 | "ref": "nixos-unstable", 51 | "repo": "nixpkgs", 52 | "type": "github" 53 | } 54 | }, 55 | "nixpkgs_2": { 56 | "locked": { 57 | "lastModified": 1706487304, 58 | "narHash": "sha256-LE8lVX28MV2jWJsidW13D2qrHU/RUUONendL2Q/WlJg=", 59 | "owner": "NixOS", 60 | "repo": "nixpkgs", 61 | "rev": "90f456026d284c22b3e3497be980b2e47d0b28ac", 62 | "type": "github" 63 | }, 64 | "original": { 65 | "owner": "NixOS", 66 | "ref": "nixpkgs-unstable", 67 | "repo": "nixpkgs", 68 | "type": "github" 69 | } 70 | }, 71 | "root": { 72 | "inputs": { 73 | "flake-utils": "flake-utils", 74 | "nixpkgs": "nixpkgs", 75 | "rust-overlay": "rust-overlay" 76 | } 77 | }, 78 | "rust-overlay": { 79 | "inputs": { 80 | "flake-utils": "flake-utils_2", 81 | "nixpkgs": "nixpkgs_2" 82 | }, 83 | "locked": { 84 | "lastModified": 1712024007, 85 | "narHash": "sha256-52cf+mHZJbSaDFdsBj6vN1hH52AXsMgEpS/ajzc9yQE=", 86 | "owner": "oxalica", 87 | "repo": "rust-overlay", 88 | "rev": "d45d957dc3c48792af7ce58eec5d84407655e8fa", 89 | "type": "github" 90 | }, 91 | "original": { 92 | "owner": "oxalica", 93 | "repo": "rust-overlay", 94 | "type": "github" 95 | } 96 | }, 97 | "systems": { 98 | "locked": { 99 | "lastModified": 1681028828, 100 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 101 | "owner": "nix-systems", 102 | "repo": "default", 103 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 104 | "type": "github" 105 | }, 106 | "original": { 107 | "owner": "nix-systems", 108 | "repo": "default", 109 | "type": "github" 110 | } 111 | }, 112 | "systems_2": { 113 | "locked": { 114 | "lastModified": 1681028828, 115 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 116 | "owner": "nix-systems", 117 | "repo": "default", 118 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 119 | "type": "github" 120 | }, 121 | "original": { 122 | "owner": "nix-systems", 123 | "repo": "default", 124 | "type": "github" 125 | } 126 | } 127 | }, 128 | "root": "root", 129 | "version": 7 130 | } 131 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "Development nix flake"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable"; 6 | flake-utils.url = "github:numtide/flake-utils"; 7 | rust-overlay.url = "github:oxalica/rust-overlay"; 8 | }; 9 | 10 | outputs = { self, nixpkgs, flake-utils, rust-overlay, ... }: 11 | flake-utils.lib.eachDefaultSystem (system: 12 | let 13 | overlays = [ 14 | (import rust-overlay) 15 | (self: prevPkgs: { 16 | nodejs = prevPkgs.nodejs-16_x; 17 | }) 18 | ]; 19 | pkgs = import nixpkgs { inherit system overlays; }; 20 | rustVersion = (pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml); 21 | rustPlatform = pkgs.makeRustPlatform { 22 | cargo = rustVersion; 23 | rustc = rustVersion; 24 | }; 25 | in 26 | { 27 | stdenv = pkgs.fastStdenv; 28 | devShell = pkgs.mkShell { 29 | LIBCLANG_PATH = pkgs.libclang.lib + "/lib/"; 30 | LD_LIBRARY_PATH = "${pkgs.stdenv.cc.cc.lib}/lib/:/usr/local/lib"; 31 | PROTOC = pkgs.protobuf + "/bin/protoc"; 32 | 33 | NIXPKGS_ALLOW_INSECURE = 1; 34 | 35 | nativeBuildInputs = with pkgs; [ 36 | bashInteractive 37 | taplo 38 | clang 39 | just 40 | cmake 41 | openssl 42 | protobuf 43 | pkg-config 44 | # clang 45 | llvmPackages.bintools 46 | llvmPackages.libclang 47 | protobuf 48 | rust-cbindgen 49 | 50 | # Should be go 1.19 51 | go 52 | gopls 53 | python3Full 54 | nodejs_20 55 | bun 56 | solc 57 | slither-analyzer 58 | # Note: needs impure flake to build contracts, ignore for now 59 | # nodejs_16 60 | # yarn 61 | 62 | ]; 63 | buildInputs = with pkgs; [ 64 | (rustVersion.override { extensions = [ "rust-src" ]; }) 65 | ]; 66 | permittedInsecurePackages = [ 67 | "nodejs-16.20.1" 68 | ]; 69 | 70 | }; 71 | }); 72 | } 73 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/nuffle-labs/data-availability 2 | 3 | go 1.21 4 | 5 | require ( 6 | github.com/sirupsen/logrus v1.9.3 7 | github.com/stretchr/testify v1.9.0 8 | ) 9 | 10 | require ( 11 | github.com/kr/pretty v0.3.1 // indirect 12 | github.com/rogpeppe/go-internal v1.10.0 // indirect 13 | ) 14 | 15 | require ( 16 | github.com/davecgh/go-spew v1.1.1 // indirect 17 | github.com/pmezard/go-difflib v1.0.0 // indirect 18 | golang.org/x/sys v0.21.0 // indirect 19 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 20 | gopkg.in/yaml.v3 v3.0.1 // indirect 21 | ) 22 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 2 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 4 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 5 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 6 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 7 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 8 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 9 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 10 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 11 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 12 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 13 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 14 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 15 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 16 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 17 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 18 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 19 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 20 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 21 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 22 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 23 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 24 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 25 | golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= 26 | golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 27 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 28 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 29 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 30 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 31 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 32 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 33 | -------------------------------------------------------------------------------- /gopkg/da-rpc/README.md: -------------------------------------------------------------------------------- 1 | # go-near 2 | 3 | This library exposes a thin go module around `near-op-sys`. This enables DA capabilities with minimal intrusion. 4 | 5 | -------------------------------------------------------------------------------- /gopkg/da-rpc/lib/libnear_da_rpc_sys.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #define VERSION 3 9 | 10 | typedef struct Client Client; 11 | 12 | typedef struct BlobSafe { 13 | const uint8_t *data; 14 | size_t len; 15 | } BlobSafe; 16 | 17 | typedef struct RustSafeArray { 18 | const uint8_t *data; 19 | size_t len; 20 | } RustSafeArray; 21 | 22 | char *get_error(void); 23 | 24 | void clear_error(void); 25 | 26 | /** 27 | * # Safety 28 | * null check the ptr 29 | */ 30 | void set_error(char *err); 31 | 32 | /** 33 | * # Safety 34 | * we check if the pointer is null before attempting to free it 35 | */ 36 | void free_error(char *error); 37 | 38 | /** 39 | * # Safety 40 | * We check if the pointers are null 41 | */ 42 | const struct Client *new_client_file(const char *key_path, 43 | const char *contract, 44 | const char *network, 45 | uint8_t namespace_version, 46 | uint32_t namespace_); 47 | 48 | /** 49 | * # Safety 50 | * We check if the pointers are null 51 | */ 52 | const struct Client *new_client(const char *account_id, 53 | const char *secret_key, 54 | const char *contract, 55 | const char *network, 56 | uint8_t namespace_version, 57 | uint32_t namespace_); 58 | 59 | /** 60 | * # Safety 61 | * We check if the client is null 62 | */ 63 | void free_client(struct Client *client); 64 | 65 | /** 66 | * # Safety 67 | * We check if the slices are null 68 | */ 69 | char *submit(const struct Client *client, const struct BlobSafe *blobs, size_t len); 70 | 71 | /** 72 | * # Safety 73 | * We check if the slices are null and they should always be 32 bytes 74 | */ 75 | const struct BlobSafe *get(const struct Client *client, const uint8_t *transaction_id); 76 | 77 | /** 78 | * # Safety 79 | * We check if the slices are null 80 | */ 81 | void free_blob(struct BlobSafe *blob); 82 | 83 | /** 84 | * # Safety 85 | * We check if the slices are null 86 | */ 87 | const struct RustSafeArray *submit_batch(const struct Client *client, 88 | const char *candidate_hex, 89 | const uint8_t *tx_data, 90 | size_t tx_data_len); 91 | -------------------------------------------------------------------------------- /gopkg/da-rpc/near.go: -------------------------------------------------------------------------------- 1 | package near 2 | 3 | /* 4 | #include "./lib/libnear_da_rpc_sys.h" 5 | #include 6 | */ 7 | import "C" 8 | 9 | import ( 10 | "encoding" 11 | "errors" 12 | "fmt" 13 | "unsafe" 14 | 15 | sidecar "github.com/nuffle-labs/data-availability/gopkg/sidecar" 16 | 17 | log "github.com/sirupsen/logrus" 18 | ) 19 | 20 | type Namespace struct { 21 | Version uint8 22 | Id uint32 23 | } 24 | 25 | type Config struct { 26 | Namespace Namespace 27 | Client *C.Client 28 | } 29 | 30 | var ( 31 | ErrInvalidSize = errors.New("NEAR DA unmarshal blob: invalid size") 32 | ErrInvalidNetwork = errors.New("NEAR DA client relative URL without a base") 33 | ) 34 | 35 | // Framer defines a way to encode/decode a FrameRef. 36 | type Framer interface { 37 | encoding.BinaryMarshaler 38 | encoding.BinaryUnmarshaler 39 | } 40 | 41 | // BlobRef contains the reference to the specific blob on near and 42 | // satisfies the Framer interface. 43 | type BlobRef struct { 44 | TxId []byte 45 | } 46 | 47 | var _ Framer = &BlobRef{} 48 | 49 | // MarshalBinary encodes the Ref into a format that can be 50 | // serialized. 51 | func (f *BlobRef) MarshalBinary() ([]byte, error) { 52 | ref := make([]byte, sidecar.EncodedBlobRefSize) 53 | 54 | copy(ref[:sidecar.EncodedBlobRefSize], f.TxId) 55 | 56 | return ref, nil 57 | } 58 | 59 | func (f *BlobRef) UnmarshalBinary(ref []byte) error { 60 | if len(ref) != sidecar.EncodedBlobRefSize { 61 | log.Warn("invalid size ", len(ref), " expected ", sidecar.EncodedBlobRefSize) 62 | return ErrInvalidSize 63 | } 64 | f.TxId = ref[:sidecar.EncodedBlobRefSize] 65 | return nil 66 | } 67 | 68 | // Note, networkN value can be either Mainnet, Testnet 69 | // or loopback address in [ip]:[port] format. 70 | func NewConfig(accountN, contractN, keyN, networkN string, ns uint32) (*Config, error) { 71 | log.Info("creating NEAR client ", "\ncontract: ", contractN, "\nnetwork: ", networkN, "\nnamespace ", ns, "\naccount ", accountN) 72 | 73 | account := C.CString(accountN) 74 | defer C.free(unsafe.Pointer(account)) 75 | 76 | key := C.CString(keyN) 77 | defer C.free(unsafe.Pointer(key)) 78 | 79 | contract := C.CString(contractN) 80 | defer C.free(unsafe.Pointer(contract)) 81 | 82 | network := C.CString(networkN) 83 | defer C.free(unsafe.Pointer(network)) 84 | 85 | // Numbers don't need to be dellocated 86 | namespaceId := C.uint(ns) 87 | namespaceVersion := C.uint8_t(0) 88 | 89 | daClient := C.new_client(account, key, contract, network, namespaceVersion, namespaceId) 90 | if daClient == nil { 91 | err := GetDAError() 92 | if err != nil { 93 | return nil, err 94 | } 95 | return nil, errors.New("unable to create NEAR DA client") 96 | } 97 | 98 | return &Config{ 99 | Namespace: Namespace{Version: 0, Id: ns}, 100 | Client: daClient, 101 | }, nil 102 | } 103 | 104 | // Note, networkN value can be either Mainnet, Testnet 105 | // or loopback address in [ip]:[port] format. 106 | func NewConfigFile(keyPathN, contractN, networkN string, ns uint32) (*Config, error) { 107 | keyPath := C.CString(keyPathN) 108 | defer C.free(unsafe.Pointer(keyPath)) 109 | 110 | contract := C.CString(contractN) 111 | defer C.free(unsafe.Pointer(contract)) 112 | 113 | network := C.CString(networkN) 114 | defer C.free(unsafe.Pointer(network)) 115 | 116 | namespaceId := C.uint(ns) 117 | namespaceVersion := C.uint8_t(0) 118 | 119 | daClient := C.new_client_file(keyPath, contract, network, namespaceVersion, namespaceId) 120 | if daClient == nil { 121 | err := GetDAError() 122 | if err != nil { 123 | return nil, err 124 | } 125 | return nil, errors.New("unable to create NEAR DA client") 126 | } 127 | 128 | return &Config{ 129 | Namespace: Namespace{Version: 0, Id: ns}, 130 | Client: daClient, 131 | }, nil 132 | } 133 | 134 | // Note, candidateHex has to be "0xfF00000000000000000000000000000000000000" for the 135 | // data to be submitted in the case of other Rollups. If concerned, use ForceSubmit 136 | func (config *Config) Submit(candidateHex string, data []byte) ([]byte, error) { 137 | 138 | candidateHexPtr := C.CString(candidateHex) 139 | defer C.free(unsafe.Pointer(candidateHexPtr)) 140 | 141 | txBytes := C.CBytes(data) 142 | defer C.free(unsafe.Pointer(txBytes)) 143 | 144 | maybeFrameRef := C.submit_batch(config.Client, candidateHexPtr, (*C.uint8_t)(txBytes), C.size_t(len(data))) 145 | 146 | err := GetDAError() 147 | if err != nil { 148 | return nil, err 149 | } 150 | 151 | log.Info("Submitting to NEAR", 152 | "maybeFrameData", maybeFrameRef, 153 | "candidate", candidateHex, 154 | "namespace", config.Namespace, 155 | "txLen", C.size_t(len(data)), 156 | ) 157 | 158 | if maybeFrameRef.len > 1 { 159 | // Set the tx data to a frame reference 160 | frameData := C.GoBytes(unsafe.Pointer(maybeFrameRef.data), C.int(maybeFrameRef.len)) 161 | log.Debug("NEAR frame data", frameData) 162 | return frameData, nil 163 | } else { 164 | log.Warn("no frame reference returned from NEAR, falling back to ethereum") 165 | return data, nil 166 | } 167 | } 168 | 169 | // Used by other rollups without candidate semantics, if you know for sure you want to submit the 170 | // data to NEAR 171 | func (config *Config) ForceSubmit(data []byte) ([]byte, error) { 172 | candidateHex := "0xfF00000000000000000000000000000000000000" 173 | return config.Submit(candidateHex, data) 174 | } 175 | 176 | func (config *Config) Get(frameRefBytes []byte, txIndex uint32) ([]byte, error) { 177 | frameRef := BlobRef{} 178 | err := frameRef.UnmarshalBinary(frameRefBytes) 179 | if err != nil { 180 | log.Warn("unable to decode frame reference", "index", txIndex, "err", err) 181 | return nil, err 182 | } 183 | 184 | log.Info("NEAR frame ref request", "frameRef", frameRef) 185 | 186 | txId := C.CBytes(frameRef.TxId) 187 | defer C.free(unsafe.Pointer(txId)) 188 | 189 | blob := C.get((*C.Client)(config.Client), (*C.uint8_t)(txId)) 190 | defer C.free(unsafe.Pointer(blob)) 191 | 192 | if blob == nil { 193 | err := GetDAError() 194 | if err != nil { 195 | log.Warn("no data returned from near", "namespace", config.Namespace, "height", frameRef.TxId) 196 | return nil, err 197 | } 198 | } else { 199 | log.Info("NEAR data retrieved", "namespace", config.Namespace, "height", frameRef.TxId) 200 | } 201 | 202 | return ToBytes(blob), nil 203 | } 204 | 205 | func (config *Config) FreeClient() { 206 | C.free_client((*C.Client)(config.Client)) 207 | config.Client = nil 208 | } 209 | 210 | func NewBlobSafe(data []byte) *C.BlobSafe { 211 | blob := C.BlobSafe{ 212 | data: (*C.uint8_t)(C.CBytes(data)), 213 | len: C.size_t(len(data)), 214 | } 215 | return &blob 216 | } 217 | 218 | func ToBytes(b *C.BlobSafe) []byte { 219 | return C.GoBytes(unsafe.Pointer(b.data), C.int(b.len)) 220 | } 221 | 222 | func To32Bytes(ptr unsafe.Pointer) []byte { 223 | bytes := make([]byte, 32) 224 | copy(bytes, C.GoBytes(ptr, 32)) 225 | 226 | return bytes 227 | } 228 | 229 | func GetDAError() (err error) { 230 | defer func() { 231 | if rErr := recover(); rErr != nil { 232 | err = fmt.Errorf("critical error from NEAR DA GetDAError: %v", rErr) 233 | } 234 | }() 235 | 236 | errData := C.get_error() 237 | 238 | if errData != nil { 239 | defer C.free(unsafe.Pointer(errData)) 240 | 241 | errStr := C.GoString(errData) 242 | return fmt.Errorf("NEAR DA client %s", errStr) 243 | } else { 244 | return nil 245 | } 246 | } 247 | 248 | func TestSetError(msg string) { 249 | cmsg := C.CString(msg) 250 | defer C.free(unsafe.Pointer(cmsg)) 251 | C.set_error(cmsg) 252 | } 253 | -------------------------------------------------------------------------------- /gopkg/da-rpc/near_darwin.go: -------------------------------------------------------------------------------- 1 | //go:build darwin 2 | 3 | package near 4 | 5 | //#cgo LDFLAGS: -L/usr/local/lib -lnear_da_rpc_sys -lm -framework SystemConfiguration -framework Security 6 | import "C" 7 | -------------------------------------------------------------------------------- /gopkg/da-rpc/near_test.go: -------------------------------------------------------------------------------- 1 | package near_test 2 | 3 | import ( 4 | "testing" 5 | "unsafe" 6 | 7 | near "github.com/nuffle-labs/data-availability/gopkg/da-rpc" 8 | sidecar "github.com/nuffle-labs/data-availability/gopkg/sidecar" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | var ( 14 | stubKey string = "ed25519:4dagBsEqCv3Ao5wa4KKFa57xNAH4wuBjh9wdTNYeCqDSeA9zE7fCnHSvWpU8t68jUpcCGqgfYwcH68suPaqmdcgm" 15 | localNet string = "http://127.0.0.1:3030" 16 | account string = "test.near" 17 | contract string = "test.near" 18 | ) 19 | 20 | func TestFrameRefMarshalBinary(t *testing.T) { 21 | id := make([]byte, 32) 22 | copy(id, []byte("11111111111111111111111111111111")) 23 | frameRef := near.BlobRef{ 24 | TxId: id, 25 | } 26 | binary, err := frameRef.MarshalBinary() 27 | assert.NoError(t, err) 28 | 29 | if len(binary) != sidecar.EncodedBlobRefSize { 30 | t.Error("Expected binary length to be 64") 31 | } 32 | if string(binary[:sidecar.EncodedBlobRefSize]) != string(id) { 33 | t.Error("Expected id to be equal") 34 | } 35 | } 36 | 37 | func TestFrameRefUnmarshalBinary(t *testing.T) { 38 | bytes := make([]byte, sidecar.EncodedBlobRefSize) 39 | copy(bytes, []byte("1111111111111111111111111111111122222222222222222222222222222222")) 40 | blobRef := near.BlobRef{} 41 | err := blobRef.UnmarshalBinary(bytes) 42 | if err != nil { 43 | t.Error(err) 44 | } 45 | if string(blobRef.TxId) != "11111111111111111111111111111111" { 46 | t.Error("Expected id to be equal") 47 | } 48 | } 49 | 50 | func TestNewConfig(t *testing.T) { 51 | accountN := "testaccount" 52 | contractN := "testcontract" 53 | keyN := "testkey" 54 | networkN := "Testnet" 55 | ns := uint32(123) 56 | 57 | config, err := near.NewConfig(accountN, contractN, keyN, networkN, ns) 58 | 59 | assert.NoError(t, err) 60 | assert.NotNil(t, config) 61 | assert.Equal(t, near.Namespace{Version: 0, Id: ns}, config.Namespace) 62 | assert.NotNil(t, config.Client) 63 | 64 | println(config) 65 | if config.Namespace.Id != ns { 66 | t.Error("Expected namespace id to be equal") 67 | } 68 | if config.Namespace.Version != 0 { 69 | t.Error("Expected namespace version to be equal") 70 | } 71 | 72 | // Test error cases 73 | _, err = near.NewConfig(accountN, contractN, keyN, "InvalidNetwork", ns) 74 | assert.Error(t, err) 75 | assert.Equal(t, near.ErrInvalidNetwork, err) 76 | } 77 | 78 | func TestNewConfigFile(t *testing.T) { 79 | keyPathN := "testkey.json" 80 | contractN := "testcontract" 81 | networkN := "http://127.0.0.1:3030" 82 | ns := uint32(1) 83 | 84 | config, err := near.NewConfigFile(keyPathN, contractN, networkN, ns) 85 | assert.NoError(t, err) 86 | assert.NotNil(t, config) 87 | assert.Equal(t, near.Namespace{Version: 0, Id: ns}, config.Namespace) 88 | assert.NotNil(t, config.Client) 89 | 90 | // Test error cases 91 | _, err = near.NewConfigFile(keyPathN, contractN, "InvalidNetwork", ns) 92 | require.Error(t, err) 93 | require.Equal(t, near.ErrInvalidNetwork, err) 94 | 95 | println(config) 96 | if config.Namespace.Id != 1 { 97 | t.Error("Expected namespace id to be equal") 98 | } 99 | if config.Namespace.Version != 0 { 100 | t.Error("Expected namespace version to be equal") 101 | } 102 | } 103 | 104 | func liveConfig(t *testing.T) *near.Config { 105 | config, err := near.NewConfig(account, contract, stubKey, localNet, 0) 106 | require.NotNil(t, config) 107 | require.NoError(t, err) 108 | return config 109 | } 110 | 111 | func TestFreeClient(t *testing.T) { 112 | config, _ := near.NewConfig(account, contract, stubKey, "Testnet", 1) 113 | config.FreeClient() 114 | assert.Nil(t, config.Client) 115 | } 116 | 117 | func TestLiveSubmitRetrieve(t *testing.T) { 118 | candidateHex := "0xfF00000000000000000000000000000000000000" 119 | data := []byte("test data") 120 | 121 | config := liveConfig(t) 122 | 123 | blobRef, err := config.Submit(candidateHex, data) 124 | require.NoError(t, err) 125 | require.NotEmpty(t, blobRef) 126 | 127 | txIndex := uint32(0) 128 | 129 | data, err = config.Get(blobRef, txIndex) 130 | assert.NoError(t, err) 131 | assert.NotEmpty(t, data) 132 | } 133 | 134 | func TestLiveForceSubmit(t *testing.T) { 135 | data := []byte("test data") 136 | 137 | config := liveConfig(t) 138 | 139 | frameData, err := config.ForceSubmit(data) 140 | assert.NoError(t, err) 141 | assert.NotEmpty(t, frameData) 142 | 143 | // Test error cases 144 | // TODO: Add test cases for error scenarios 145 | } 146 | 147 | func TestToBytes(t *testing.T) { 148 | b := []byte{1, 2, 3} 149 | blob := near.NewBlobSafe(b) 150 | bytes := near.ToBytes(blob) 151 | assert.Equal(t, b, bytes) 152 | } 153 | 154 | func TestTo32Bytes(t *testing.T) { 155 | data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} 156 | ptr := unsafe.Pointer(&data[0]) 157 | 158 | bytes := near.To32Bytes(ptr) 159 | assert.Equal(t, data, bytes) 160 | } 161 | 162 | func TestGetDAError(t *testing.T) { 163 | // Test error case 164 | near.TestSetError("test error") 165 | err := near.GetDAError() 166 | assert.Error(t, err) 167 | assert.Contains(t, err.Error(), "test error") 168 | 169 | // // Test no error case 170 | err = near.GetDAError() 171 | assert.NoError(t, err) 172 | } 173 | -------------------------------------------------------------------------------- /gopkg/da-rpc/near_unix.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | 3 | package near 4 | 5 | //#cgo LDFLAGS: -L/usr/local/lib -lnear_da_rpc_sys -lm 6 | import "C" 7 | -------------------------------------------------------------------------------- /gopkg/sidecar/near.go: -------------------------------------------------------------------------------- 1 | // Package sidecar provides a client for interacting with the Near Protocol Sidecar service. 2 | // 3 | // The sidecar service is responsible for submitting and retrieving data blobs to and from the Near blockchain. 4 | // It acts as an intermediary between the application and the Near blockchain, abstracting away the complexities 5 | // of interacting with the blockchain directly. 6 | // 7 | // Security Considerations: 8 | // - The sidecar service should be running on a trusted host and port. 9 | // - The host and port should be configurable and not hardcoded. 10 | // - The client should verify the identity of the sidecar service using TLS certificates. 11 | // - The client should validate and sanitize all input parameters to prevent injection attacks. 12 | // - The client should handle errors gracefully and not leak sensitive information in error messages. 13 | // - The client should use secure communication channels (e.g., HTTPS) to prevent eavesdropping and tampering. 14 | // - The client should have proper authentication and authorization mechanisms to prevent unauthorized access. 15 | // 16 | // Usage: 17 | // 18 | // 1. Create a new client instance using the `NewClient` function, providing the host and configuration. 19 | // 20 | // client, err := sidecar.NewClient("http://localhost:5888", &sidecar.ConfigureClientRequest{...}) 21 | // if err != nil { 22 | // // Handle error 23 | // } 24 | // 25 | // 2. Use the client to interact with the sidecar service. 26 | // 27 | // // Submit a blob 28 | // blob := sidecar.Blob{Data: []byte("test_data")} 29 | // blobRef, err := client.SubmitBlob(blob) 30 | // if err != nil { 31 | // // Handle error 32 | // } 33 | // 34 | // // Get a blob 35 | // retrievedBlob, err := client.GetBlob(*blobRef) 36 | // if err != nil { 37 | // // Handle error 38 | // } 39 | // 40 | // 3. Close the client when done. 41 | // 42 | // client.Close() 43 | package sidecar 44 | 45 | import ( 46 | "bytes" 47 | "encoding/hex" 48 | "encoding/json" 49 | "errors" 50 | "fmt" 51 | "net/http" 52 | 53 | log "github.com/sirupsen/logrus" 54 | ) 55 | 56 | // Client represents a client for interacting with the Near Protocol Sidecar service. 57 | type Client struct { 58 | client *http.Client 59 | host string 60 | config *ConfigureClientRequest 61 | } 62 | 63 | // NewClient creates a new instance of the Near Protocol Sidecar client. 64 | // It takes the host and configuration as parameters and returns a pointer to the client. 65 | // If the host is empty, it defaults to "http://localhost:5888". 66 | // The configuration can be nil, assuming the sidecar is set up outside of this package. 67 | func NewClient(host string, config *ConfigureClientRequest) (*Client, error) { 68 | if host == "" { 69 | host = "http://localhost:5888" 70 | } 71 | client := &Client{ 72 | client: &http.Client{}, 73 | host: host, 74 | config: config, 75 | } 76 | return client, client.Health() 77 | } 78 | 79 | func (c *Client) GetHost() string { 80 | return c.host 81 | } 82 | 83 | // ConfigureClient configures the Near Protocol Sidecar client with the provided configuration. 84 | // It sends a PUT request to the "/configure" endpoint with the configuration as JSON payload. 85 | func (c *Client) ConfigureClient(req *ConfigureClientRequest) error { 86 | if req == nil { 87 | req = c.config 88 | } 89 | jsonData, err := json.Marshal(req) 90 | if err != nil { 91 | return fmt.Errorf("failed to marshal configure client request: %v", err) 92 | } 93 | 94 | httpReq, err := http.NewRequest(http.MethodPut, c.host+"/configure", bytes.NewBuffer(jsonData)) 95 | if err != nil { 96 | return fmt.Errorf("failed to create configure client request: %v", err) 97 | } 98 | httpReq.Header.Set("Content-Type", "application/json") 99 | 100 | resp, err := c.client.Do(httpReq) 101 | if err != nil { 102 | return fmt.Errorf("failed to send configure client request: %v", err) 103 | } 104 | defer resp.Body.Close() 105 | 106 | if resp.StatusCode != http.StatusOK { 107 | return fmt.Errorf("failed to configure client, status code: %d", resp.StatusCode) 108 | } 109 | 110 | return nil 111 | } 112 | 113 | // GetBlob retrieves a blob from the Near blockchain using the provided BlobRef. 114 | // It sends a GET request to the "/blob" endpoint with the transaction ID as a query parameter. 115 | func (c *Client) GetBlob(b BlobRef) (*Blob, error) { 116 | resp, err := c.client.Get(c.host + "/blob?transaction_id=" + b.ID()) 117 | if err != nil { 118 | return nil, fmt.Errorf("failed to send get blob request: %v", err) 119 | } 120 | defer resp.Body.Close() 121 | 122 | if resp.StatusCode != http.StatusOK { 123 | return nil, fmt.Errorf("failed to get blob, status code: %d", resp.StatusCode) 124 | } 125 | 126 | var blob Blob 127 | err = json.NewDecoder(resp.Body).Decode(&blob) 128 | if err != nil { 129 | return nil, fmt.Errorf("failed to decode blob response: %v", err) 130 | } 131 | 132 | return &blob, nil 133 | } 134 | 135 | // SubmitBlob submits a blob to the Near blockchain. 136 | // It sends a POST request to the "/blob" endpoint with the blob data as JSON payload. 137 | // The response contains the transaction ID of the submitted blob. 138 | func (c *Client) SubmitBlob(b Blob) (*BlobRef, error) { 139 | if b.Data == nil { 140 | return nil, errors.New("blob data cannot be nil") 141 | } 142 | 143 | jsonData, err := b.MarshalJSON() 144 | if err != nil { 145 | return nil, fmt.Errorf("failed to marshal blob: %v", err) 146 | } 147 | log.Debug("near-sidecar: SubmitBlob json: ", jsonData) 148 | 149 | resp, err := c.client.Post(c.host+"/blob", "application/json", bytes.NewBuffer(jsonData)) 150 | if err != nil { 151 | return nil, fmt.Errorf("failed to send submit blob request: %v", err) 152 | } 153 | defer resp.Body.Close() 154 | 155 | if resp.StatusCode != http.StatusOK { 156 | return nil, fmt.Errorf("failed to submit blob, status code: %d", resp.StatusCode) 157 | } 158 | 159 | var blobRef BlobRef 160 | err = json.NewDecoder(resp.Body).Decode(&blobRef) 161 | if err != nil { 162 | return nil, fmt.Errorf("failed to decode transaction ID: %v", err) 163 | } 164 | 165 | return &blobRef, nil 166 | } 167 | 168 | // Health checks the health of the Near Protocol Sidecar service. 169 | // It sends a GET request to the "/health" endpoint and expects a successful response. 170 | func (c *Client) Health() error { 171 | resp, err := c.client.Get(c.host + "/health") 172 | if err != nil { 173 | return fmt.Errorf("failed to send health check request: %v", err) 174 | } 175 | defer resp.Body.Close() 176 | 177 | if resp.StatusCode != http.StatusOK { 178 | return fmt.Errorf("health check failed, status code: %d", resp.StatusCode) 179 | } 180 | 181 | return nil 182 | } 183 | 184 | // Close closes the Near Protocol Sidecar client. 185 | // It should be called when the client is no longer needed. 186 | func (c *Client) Close() { 187 | // Perform any necessary cleanup or resource release 188 | } 189 | 190 | // BlobRef represents a reference to a blob on the Near blockchain. 191 | type BlobRef struct { 192 | transactionID [EncodedBlobRefSize]byte 193 | } 194 | 195 | // EncodedBlobRefSize is the size of an encoded BlobRef in bytes. 196 | const EncodedBlobRefSize = 32 197 | 198 | // NewBlobRef creates a new BlobRef from the provided transaction ID. 199 | // It returns an error if the transaction ID is not exactly 32 bytes. 200 | func NewBlobRef(transactionID []byte) (*BlobRef, error) { 201 | if len(transactionID) != EncodedBlobRefSize { 202 | return nil, errors.New("invalid transaction ID length") 203 | } 204 | var ref BlobRef 205 | copy(ref.transactionID[:], transactionID) 206 | return &ref, nil 207 | } 208 | 209 | // Deref returns the transaction ID of the BlobRef. 210 | func (r *BlobRef) Deref() []byte { 211 | return r.transactionID[:] 212 | } 213 | 214 | // ID returns the transaction ID of the BlobRef as a hex-encoded string. 215 | func (r *BlobRef) ID() string { 216 | return hex.EncodeToString(r.transactionID[:]) 217 | } 218 | 219 | // MarshalJSON marshals the BlobRef to JSON format. 220 | // It encodes the transaction ID as a hex string. 221 | func (r *BlobRef) MarshalJSON() ([]byte, error) { 222 | json, err := json.Marshal(struct { 223 | TransactionID string `json:"transaction_id"` 224 | }{ 225 | TransactionID: r.ID(), 226 | }) 227 | return json, err 228 | } 229 | 230 | // UnmarshalJSON unmarshals the BlobRef from JSON format. 231 | // It decodes the transaction ID from a hex string. 232 | func (r *BlobRef) UnmarshalJSON(data []byte) error { 233 | var aux struct { 234 | TransactionID string `json:"transaction_id"` 235 | } 236 | if err := json.Unmarshal(data, &aux); err != nil { 237 | return err 238 | } 239 | transactionID, err := hex.DecodeString(aux.TransactionID) 240 | if err != nil { 241 | return err 242 | } 243 | copy(r.transactionID[:], transactionID) 244 | return nil 245 | } 246 | 247 | // Blob represents a blob of data stored on the Near blockchain. 248 | type Blob struct { 249 | Data []byte `json:"data"` 250 | } 251 | 252 | // MarshalJSON marshals the Blob to JSON format. 253 | // It encodes the data as a hex string. 254 | func (b *Blob) MarshalJSON() ([]byte, error) { 255 | return json.Marshal(struct { 256 | Data string `json:"data"` 257 | }{ 258 | Data: hex.EncodeToString(b.Data), 259 | }) 260 | } 261 | 262 | // UnmarshalJSON unmarshals the Blob from JSON format. 263 | // It decodes the data from a hex string. 264 | func (b *Blob) UnmarshalJSON(data []byte) error { 265 | var aux struct { 266 | Data string `json:"data"` 267 | } 268 | if err := json.Unmarshal(data, &aux); err != nil { 269 | return err 270 | } 271 | decodedData, err := hex.DecodeString(aux.Data) 272 | if err != nil { 273 | return err 274 | } 275 | b.Data = decodedData 276 | return nil 277 | } 278 | 279 | // Network represents a Near network. 280 | type Network string 281 | 282 | const ( 283 | Mainnet Network = "mainnet" 284 | Testnet Network = "testnet" 285 | Localnet Network = "localnet" 286 | ) 287 | 288 | // ConfigureClientRequest represents a request to configure the Near Protocol Sidecar client. 289 | type ConfigureClientRequest struct { 290 | AccountID string `json:"account_id"` 291 | SecretKey string `json:"secret_key"` 292 | ContractID string `json:"contract_id"` 293 | Network Network `json:"network"` 294 | Namespace *Namespace `json:"namespace"` 295 | } 296 | 297 | // Namespace represents a namespace on the Near blockchain. 298 | type Namespace struct { 299 | ID int `json:"id"` 300 | Version int `json:"version"` 301 | } 302 | -------------------------------------------------------------------------------- /gopkg/sidecar/near_test.go: -------------------------------------------------------------------------------- 1 | package sidecar 2 | 3 | import ( 4 | "bytes" 5 | "encoding/hex" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "os" 11 | "testing" 12 | 13 | "github.com/ethereum/go-ethereum/log" 14 | "github.com/stretchr/testify/assert" 15 | ) 16 | 17 | func initClient(t *testing.T) *Client { 18 | return InitLocalClient(t, "../../test/http-sidecar.json") 19 | } 20 | 21 | func InitLocalClient(t *testing.T, path string) *Client { 22 | configData, err := os.ReadFile(path) 23 | assert.NoError(t, err) 24 | log.Debug("initClient configData ", string(configData)) 25 | 26 | // Unmarshal the JSON data into a ConfigureClientRequest struct 27 | var conf ConfigureClientRequest 28 | err = json.Unmarshal(configData, &conf) 29 | assert.NoError(t, err) 30 | 31 | client, err := NewClient("http://localhost:5888", &conf) 32 | assert.NoError(t, err) 33 | 34 | err = client.ConfigureClient(&conf) 35 | assert.NoError(t, err) 36 | 37 | return client 38 | } 39 | 40 | func TestGetInvalidBlob(t *testing.T) { 41 | client := initClient(t) 42 | defer client.Close() 43 | 44 | invalidTransactionID := []byte("invalid_transaction_id") 45 | log.Info("TestGetInvalidBlob invalidTransactionID ", invalidTransactionID) 46 | 47 | invalidBlobRef := &BlobRef{} 48 | log.Info("TestGetInvalidBlob invalidBlobRef ", invalidBlobRef) 49 | 50 | copy(invalidBlobRef.transactionID[:], invalidTransactionID) 51 | blob, err := client.GetBlob(*invalidBlobRef) 52 | log.Info("TestGetInvalidBlob invalidBlob ", blob) 53 | 54 | assert.Error(t, err, "failed to get blob, status code: 500") 55 | assert.Nil(t, blob) 56 | } 57 | 58 | func TestSubmitGetBlob(t *testing.T) { 59 | testName := "TestSubmitGetBlob " 60 | client := initClient(t) 61 | defer client.Close() 62 | 63 | // Test submitting a blob 64 | data := []byte("test_data") 65 | blob := &Blob{Data: data} 66 | log.Info(testName, "blob ", blob) 67 | 68 | blobRef, err := client.SubmitBlob(*blob) 69 | log.Info(testName, "blobRef ", blobRef) 70 | assert.NoError(t, err) 71 | assert.NotNil(t, blobRef) 72 | 73 | // Test getting the submitted blob 74 | blob, err = client.GetBlob(*blobRef) 75 | assert.NoError(t, err) 76 | 77 | log.Info("TestSubmitGetBlob blob ", blob) 78 | if !bytes.Equal(blob.Data, data) { 79 | t.Fatalf("expected blob data %v but got %v", data, blob.Data) 80 | } 81 | 82 | // Test submitting an empty blob 83 | emptyBlob := Blob{} 84 | blobRef, err = client.SubmitBlob(emptyBlob) 85 | log.Info("TestSubmitBlob emptyBlob ", emptyBlob) 86 | assert.Errorf(t, err, "blob data cannot be nil") 87 | assert.Nil(t, blobRef) 88 | } 89 | 90 | func TestHealth(t *testing.T) { 91 | client := initClient(t) 92 | defer client.Close() 93 | 94 | // Test checking the health of the service 95 | err := client.Health() 96 | assert.NoError(t, err) 97 | } 98 | 99 | func TestBlobMarshalUnmarshal(t *testing.T) { 100 | data := []byte("test_data") 101 | blob := Blob{Data: data} 102 | 103 | // Test marshaling the blob 104 | jsonData, err := blob.MarshalJSON() 105 | assert.NoError(t, err) 106 | 107 | // Test unmarshaling the blob 108 | var unmarshaled Blob 109 | err = unmarshaled.UnmarshalJSON(jsonData) 110 | assert.NoError(t, err) 111 | 112 | if !bytes.Equal(unmarshaled.Data, data) { 113 | t.Fatalf("unmarshaled blob data does not match original data") 114 | } 115 | } 116 | 117 | func TestNewBlobRefInvalidTransactionID(t *testing.T) { 118 | invalidTransactionID := []byte("invalid_transaction_id") 119 | _, err := NewBlobRef(invalidTransactionID) 120 | assert.Error(t, err, "invalid transaction ID length") 121 | } 122 | 123 | func generateTransactionID(t *testing.T) []byte { 124 | 125 | hex, err := hex.DecodeString("5d0472abe8eef76f9a44a3695d584af4de6e2ddde82dabfa5c8f29e5aec1270d") 126 | log.Info("generateTransactionID hex ", hex) 127 | assert.NoError(t, err) 128 | 129 | blobRef, err := NewBlobRef(hex) 130 | log.Info("generateTransactionID blobRef", blobRef) 131 | assert.NoError(t, err) 132 | 133 | return blobRef.transactionID[:] 134 | } 135 | 136 | func TestAltDA(t *testing.T) { 137 | client := initClient(t) 138 | defer client.Close() 139 | 140 | baseUrl := fmt.Sprintf("%s/plasma", client.host) 141 | img := generateTransactionID(t) 142 | 143 | body := bytes.NewReader(img) 144 | url := fmt.Sprintf("%s/put", baseUrl) 145 | req, err := http.NewRequest(http.MethodPost, url, body) 146 | assert.NoError(t, err) 147 | 148 | req.Header.Set("Content-Type", "application/octet-stream") 149 | resp, err := http.DefaultClient.Do(req) 150 | assert.NoError(t, err) 151 | defer resp.Body.Close() 152 | 153 | assert.Equal(t, resp.StatusCode, http.StatusOK) 154 | 155 | b, err := io.ReadAll(resp.Body) 156 | assert.NoError(t, err) 157 | 158 | fmt.Println(b) 159 | 160 | comm := DecodeCommitmentData(b) 161 | assert.NotNil(t, comm) 162 | 163 | encoded := EncodeCommitment(comm) 164 | fmt.Println("encoded comm", encoded) 165 | 166 | req, err = http.NewRequest(http.MethodGet, fmt.Sprintf("%s/get/0x%x", baseUrl, encoded), nil) 167 | assert.NoError(t, err) 168 | 169 | resp, err = http.DefaultClient.Do(req) 170 | assert.NoError(t, err) 171 | 172 | assert.Equal(t, resp.StatusCode, http.StatusOK) 173 | defer resp.Body.Close() 174 | 175 | input, err := io.ReadAll(resp.Body) 176 | assert.NoError(t, err) 177 | assert.Equal(t, img, input) 178 | } 179 | 180 | // Encode adds a commitment type prefix self describing the commitment. 181 | func EncodeCommitment(c []byte) []byte { 182 | return append([]byte{byte(1)}, c...) 183 | } 184 | 185 | // DecodeCommitmentData parses the commitment into a known commitment type. 186 | // The input type is determined by the first byte of the raw data. 187 | // The input type is discarded and the commitment is passed to the appropriate constructor. 188 | func DecodeCommitmentData(input []byte) []byte { 189 | if len(input) == 0 { 190 | fmt.Println(("input is empty")) 191 | return nil 192 | } 193 | t := input[0] 194 | data := input[1:] 195 | switch t { 196 | case 0: 197 | fmt.Println("gave keccak commitment") 198 | return nil 199 | case 1: 200 | fmt.Println("gave generic commitment") 201 | return data 202 | default: 203 | fmt.Println("gave bad commitment") 204 | return nil 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /http-config.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "account_id": "HTTP_API_TEST_ACCOUNT_ID", 3 | "secret_key": "HTTP_API_TEST_SECRET_KEY", 4 | "contract_id": "HTTP_API_TEST_ACCOUNT_ID", 5 | "network": "testnet", 6 | "namespace": HTTP_API_TEST_NAMESPACE 7 | } 8 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | # Default recipe 2 | default: 3 | just --list 4 | 5 | GHCR_BASE := "ghcr.io/nuffle-labs/data-availability" 6 | 7 | docker-sidecar: 8 | docker build -t {{GHCR_BASE}}/sidecar:dev -f bin/sidecar/Dockerfile . 9 | # For backwards compat 10 | docker tag {{GHCR_BASE}}/sidecar:dev {{GHCR_BASE}}/http-api:dev 11 | 12 | docker-sidecar-push: 13 | docker push {{GHCR_BASE}}/sidecar:dev 14 | # Backwards compat 15 | docker push {{GHCR_BASE}}/http-api:dev 16 | 17 | devnet: 18 | docker compose up -d --build near-localnet 19 | docker compose up -d near-localnet-set-key 20 | 21 | changelog: 22 | git-cliff > CHANGELOG.md 23 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | # This specifies the version of Rust we use to build. 3 | # Individual crates in the workspace may support a lower version, as indicated by `rust-version` field in each crate's `Cargo.toml`. 4 | # The version specified below, should be at least as high as the maximum `rust-version` within the workspace. 5 | channel = "1.75.0" 6 | components = [ "rustfmt", "rust-src" ] 7 | targets = [ "wasm32-unknown-unknown" ] 8 | -------------------------------------------------------------------------------- /scripts/enrich.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | JSON_FILE=${1:-"http-config.template.json"} 5 | ENRICHED_JSON_FILE=${2:-"http-config.json"} 6 | 7 | # Read the JSON file 8 | JSON_CONTENT=$(cat "$JSON_FILE") 9 | 10 | # Replace the placeholders with environment variable values 11 | JSON_CONTENT=$(echo "$JSON_CONTENT" | sed "s/HTTP_API_TEST_ACCOUNT_ID/$HTTP_API_TEST_ACCOUNT_ID/g") 12 | JSON_CONTENT=$(echo "$JSON_CONTENT" | sed "s/HTTP_API_TEST_SECRET_KEY/$HTTP_API_TEST_SECRET_KEY/g") 13 | JSON_CONTENT=$(echo "$JSON_CONTENT" | sed "s/HTTP_API_TEST_NAMESPACE/$HTTP_API_TEST_NAMESPACE/g") 14 | 15 | # Write the updated JSON content back to the file 16 | echo "$JSON_CONTENT" > "$ENRICHED_JSON_FILE" 17 | -------------------------------------------------------------------------------- /taplo.toml: -------------------------------------------------------------------------------- 1 | exclude = [ ".direnv/**", "**/node_modules/**", "cdk-stack/**", "op-stack/**", "bin/light-client/**" ] 2 | include = [ "**/*.toml" ] 3 | 4 | [formatting] 5 | align_comments = false 6 | align_entries = true 7 | allowed_blank_lines = 1 8 | array_auto_collapse = true 9 | array_auto_expand = true 10 | column_width = 170 11 | compact_arrays = false 12 | reorder_keys = true 13 | -------------------------------------------------------------------------------- /test/http-sidecar.json: -------------------------------------------------------------------------------- 1 | { 2 | "account_id": "test.near", 3 | "secret_key": "ed25519:4dagBsEqCv3Ao5wa4KKFa57xNAH4wuBjh9wdTNYeCqDSeA9zE7fCnHSvWpU8t68jUpcCGqgfYwcH68suPaqmdcgm", 4 | "contract_id": "test.near", 5 | "network": "http://near-localnet:3030", 6 | "namespace": null, 7 | "should_cache": false 8 | } 9 | -------------------------------------------------------------------------------- /test/node_key.json: -------------------------------------------------------------------------------- 1 | { 2 | "account_id": "node", 3 | "public_key": "ed25519:7cPtaw4Q4Nn2d5SYZTz3JuHYojwxEPgRDeb5BRs5Buva", 4 | "secret_key": "ed25519:3iu8Pb1ogb7eNEeS2iJth5SZqnz7y35VQ7HBpzqa7LPdUEuXXjoeT5DyHmR5ygmhY4z8i9eYBotHZUBTZb2Viubx" 5 | } 6 | -------------------------------------------------------------------------------- /test/sandbox.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bookworm as builder 2 | WORKDIR /usr/src/app 3 | RUN apt-get update && apt-get install --assume-yes curl 4 | RUN curl -LJO https://s3-us-west-1.amazonaws.com/build.nearprotocol.com/nearcore/Linux-x86_64/1.38.0/aac5e42fe8975e27faca53e31f53f9c67a5b4e35/near-sandbox.tar.gz 5 | RUN tar -xf near-sandbox.tar.gz 6 | 7 | FROM debian:bookworm-slim as runtime 8 | 9 | LABEL org.opencontainers.image.source https://github.com/nuffle-labs/data-availability 10 | 11 | WORKDIR /usr/local/bin 12 | COPY --from=builder /usr/src/app/Linux-x86_64/near-sandbox /usr/local/bin/near-sandbox 13 | RUN apt-get update && apt-get install --assume-yes curl jq 14 | RUN near-sandbox --home /root/.near init 15 | 16 | COPY * /root/.near 17 | 18 | RUN cat /root/.near/validator_key.json 19 | 20 | ENTRYPOINT [ "near-sandbox", "--home", "/root/.near", "run" ] 21 | -------------------------------------------------------------------------------- /test/validator_key.json: -------------------------------------------------------------------------------- 1 | { 2 | "account_id": "test.near", 3 | "public_key": "ed25519:5qJULsTNSQT1R5FAacVKNk3R3sgh8rfpwnJcqLnC5u1F", 4 | "secret_key": "ed25519:4dagBsEqCv3Ao5wa4KKFa57xNAH4wuBjh9wdTNYeCqDSeA9zE7fCnHSvWpU8t68jUpcCGqgfYwcH68suPaqmdcgm" 5 | } 6 | -------------------------------------------------------------------------------- /ws.code-workspace: -------------------------------------------------------------------------------- 1 | { 2 | "folders": [ 3 | { 4 | "path": "." 5 | } 6 | ], 7 | "settings": {} 8 | } --------------------------------------------------------------------------------