├── .github ├── dependabot.yml ├── hooks │ └── pre-commit ├── pull_request_template.md └── workflows │ ├── deb_packager.yml │ ├── lint.yml │ ├── push-docker.yml │ ├── regression-tests.yml │ ├── release.yml │ ├── test-autoclaiml2l2.yml │ ├── test-e2e.yml │ ├── test-e2ecompress.yml │ ├── test-edge.yml │ ├── test-l2l2.yml │ ├── test-multiplerollups.yml │ ├── test-sovereignchain.yml │ ├── test.yml │ └── updatedeps.yml ├── .gitignore ├── .goreleaser.yml ├── Dockerfile ├── DockerfileE2ETest ├── LICENSE ├── Makefile ├── README.md ├── SECURITY.md ├── autoclaimservice ├── README.md ├── autoclaim │ ├── autoclaim.go │ └── config.go ├── blockchainmanager │ ├── config.go │ └── manager.go ├── config │ ├── config.go │ ├── config.local.toml │ └── default.go ├── main.go └── test.keystore.autoclaim ├── bridgectrl ├── bridgectrl.go ├── bridgectrl_test.go ├── config.go ├── hash.go ├── interfaces.go ├── merkletree.go ├── merkletree_test.go └── pb │ ├── query.pb.go │ ├── query.pb.gw.go │ └── query_grpc.pb.go ├── claimtxman ├── claimtxman.go ├── claimtxman_test.go ├── compose_compress_claim.go ├── compose_compress_claim_test.go ├── config.go ├── groups_trigger.go ├── interfaces.go ├── mocks │ ├── bridge_service_interface.go │ ├── etherman_i.go │ ├── storage_compressed_interface.go │ ├── storage_interface.go │ └── tx_monitorer.go ├── monitor_compressed_txs.go ├── monitor_compressed_txs_test.go ├── monitortxs.go ├── nonce_cache.go ├── pending_txs.go ├── pending_txs_test.go ├── store_changes.go └── types │ ├── monitoredtx.go │ └── monitoredtx_test.go ├── cmd ├── main.go ├── run.go └── version.go ├── config ├── config.debug.toml ├── config.go ├── config.local.toml ├── default.go ├── network.go └── types │ ├── duration.go │ └── keystore.go ├── db ├── config.go ├── pgstorage │ ├── config.go │ ├── interfaces.go │ ├── migrations │ │ ├── 0001.sql │ │ ├── 0002.sql │ │ ├── 0002_test.go │ │ ├── 0003.sql │ │ ├── 0003_test.go │ │ ├── 0004.sql │ │ ├── 0004_test.go │ │ ├── 0005.sql │ │ ├── 0005_test.go │ │ ├── 0006.sql │ │ ├── 0006_test.go │ │ ├── 0007.sql │ │ ├── 0007_test.go │ │ ├── 0008.sql │ │ ├── 0008_test.go │ │ ├── 0009.sql │ │ ├── 0009_test.go │ │ ├── 0010.sql │ │ ├── 0010_test.go │ │ ├── 0011.sql │ │ ├── 0011_test.go │ │ ├── 0012.sql │ │ ├── 0012_test.go │ │ ├── 0013.sql │ │ ├── 0013_test.go │ │ ├── 0014.sql │ │ ├── 0014_test.go │ │ ├── 0015.sql │ │ ├── 0015_test.go │ │ ├── 0016.sql │ │ ├── 0016_test.go │ │ ├── 0017.sql │ │ ├── 0017_test.go │ │ ├── 0018.sql │ │ ├── 0018_test.go │ │ └── utils_test.go │ ├── pgstorage.go │ ├── pgstorage_monitored_txs_group.go │ ├── pgstorage_test.go │ └── utils.go ├── storage.go └── storage_test.go ├── docker-compose.yml ├── docs ├── architecture.drawio.png ├── e2e-realnetwork-test.md └── running_local.md ├── etherman ├── config.go ├── etherman.go ├── etherman_test.go ├── metrics │ └── metrics.go ├── simulated.go ├── smartcontracts │ ├── ERC20 │ │ ├── ERC20.abi │ │ ├── ERC20.bin │ │ ├── ERC20.go │ │ ├── IERC20.abi │ │ └── IERC20.bin │ ├── abi │ │ ├── bridgel2sovereignchain.abi │ │ ├── claimcompressor.abi │ │ ├── globalexitrootmanagerl2sovereignchain.abi │ │ ├── mockpolygonrollupmanager.abi │ │ ├── mockverifier.abi │ │ ├── oldglobalexitrootmanagerl2sovereignchain.abi │ │ ├── oldpolygonzkevmbridge.abi │ │ ├── pol.abi │ │ ├── polygonrollupmanager.abi │ │ ├── polygonzkevm.abi │ │ ├── polygonzkevmbridgev2.abi │ │ ├── polygonzkevmglobalexitroot.abi │ │ └── proxy.abi │ ├── bin │ │ ├── bridgel2sovereignchain.bin │ │ ├── claimcompressor.bin │ │ ├── globalexitrootmanagerl2sovereignchain.bin │ │ ├── mockpolygonrollupmanager.bin │ │ ├── mockverifier.bin │ │ ├── oldglobalexitrootmanagerl2sovereignchain.bin │ │ ├── oldpolygonzkevmbridge.bin │ │ ├── pol.bin │ │ ├── polygonrollupmanager.bin │ │ ├── polygonzkevm.bin │ │ ├── polygonzkevmbridgev2.bin │ │ ├── polygonzkevmglobalexitroot.bin │ │ └── proxy.bin │ ├── bridgel2sovereignchain │ │ └── bridgel2sovereignchain.go │ ├── claimcompressor │ │ └── claimcompressor.go │ ├── globalexitrootmanagerl2sovereignchain │ │ └── globalexitrootmanagerl2sovereignchain.go │ ├── mockpolygonrollupmanager │ │ └── mockpolygonrollupmanager.go │ ├── mockverifier │ │ └── mockverifier.go │ ├── oldglobalexitrootmanagerl2sovereignchain │ │ └── oldglobalexitrootmanagerl2sovereignchain.go │ ├── oldpolygonzkevmbridge │ │ └── oldpolygonzkevmbridge.go │ ├── pol │ │ └── pol.go │ ├── polygonrollupmanager │ │ └── polygonrollupmanager.go │ ├── polygonzkevm │ │ └── polygonzkevm.go │ ├── polygonzkevmbridgev2 │ │ └── polygonzkevmbridgev2.go │ ├── polygonzkevmglobalexitroot │ │ └── polygonzkevmglobalexitroot.go │ ├── proxy │ │ └── proxy.go │ ├── readme.md │ └── script.sh └── types.go ├── go.mod ├── go.sum ├── hex ├── hex.go └── hex_test.go ├── jsonrpcclient ├── client.go ├── types │ ├── codec.go │ ├── codec_test.go │ ├── errors.go │ └── types.go └── zkevm.go ├── log ├── config.go ├── log.go └── log_test.go ├── metrics ├── api.go ├── config.go ├── prometheus.go └── prometheus_test.go ├── packaging ├── deb │ └── zkevm-bridge │ │ └── DEBIAN │ │ ├── postinst │ │ └── postrm └── systemd │ └── zkevm-bridge.service ├── proto └── src │ └── proto │ └── bridge │ └── v1 │ └── query.proto ├── scripts ├── cmd │ ├── dependencies.go │ ├── dependencies │ │ ├── files.go │ │ ├── files_test.go │ │ ├── github.go │ │ ├── github_test.go │ │ ├── images.go │ │ ├── images_test.go │ │ ├── manager.go │ │ ├── protobuffers.go │ │ └── testvectors.go │ └── main.go └── generate-smartcontracts-bindings.sh ├── server ├── config.go ├── interfaces.go ├── metrics │ └── metrics.go ├── mock_bridgeServiceStorage.go ├── server.go ├── service.go └── service_test.go ├── synchronizer ├── config.go ├── interfaces.go ├── metrics │ └── metrics.go ├── mock_bridgectrl.go ├── mock_dbtx.go ├── mock_etherman.go ├── mock_storage.go ├── mock_zkevmclient.go ├── synchronizer.go └── synchronizer_test.go ├── test ├── benchmark │ └── api_test.go ├── client │ ├── client.go │ └── config.go ├── config │ ├── aggoracle │ │ └── config.toml │ ├── bridge_network_e2e │ │ └── cardona.toml │ ├── node │ │ ├── config.zkevm.node.toml │ │ ├── genesis.local-1.json │ │ ├── genesis.local-2.json │ │ ├── genesis.local-v1tov2.json │ │ └── genesis.local.json │ └── prover │ │ ├── config.prover.json │ │ └── initproverdb.sql ├── e2e │ ├── autoclaim_l2_l2_test.go │ ├── bridge_network_bridge_msg_test.go │ ├── bridge_network_erc20_test.go │ ├── bridge_network_eth_test.go │ ├── bridge_network_shared.go │ ├── bridge_sovereign_chain_test.go │ ├── bridge_test.go │ ├── compress_test.go │ ├── edge_test.go │ ├── l2_l2_test.go │ └── multiplerollups_test.go ├── mocksmartcontracts │ ├── BridgeMessageReceiver.sol │ ├── BridgeMessageReceiver │ │ ├── BridgeMessageReceiver.abi │ │ ├── BridgeMessageReceiver.bin │ │ ├── BridgeMessageReceiver.go │ │ ├── IBridgeMessageReceiver.abi │ │ └── IBridgeMessageReceiver.bin │ ├── PingReceiver.sol │ ├── PingReceiver │ │ └── PingReceiver.go │ ├── abi │ │ ├── PingReceiver.abi │ │ ├── erc20permitmock.abi │ │ └── polygonzkevmbridge.abi │ ├── bin │ │ ├── PingReceiver.bin │ │ ├── erc20permitmock.bin │ │ └── polygonzkevmbridge.bin │ ├── erc20permitmock │ │ └── erc20permitmock.go │ ├── json │ │ └── PingReceiver.json │ ├── polygonzkevmbridge.sol │ ├── polygonzkevmbridge │ │ └── polygonzkevmbridge.go │ ├── readme.md │ └── script.sh ├── operations │ ├── interfaces.go │ ├── manager.go │ ├── mockserver.go │ ├── token.go │ └── wait.go ├── scripts │ ├── claim │ │ └── main.go │ ├── deploytool │ │ ├── README.MD │ │ └── main.go │ ├── deposit │ │ └── main.go │ ├── forcebatchproposal │ │ └── main.go │ ├── initialClaim │ │ ├── Readme.md │ │ └── main.go │ ├── isClaimed │ │ └── main.go │ └── readLatestLER │ │ └── main.go ├── test.keystore.aggregator ├── test.keystore.aggregator-2 ├── test.keystore.claimtx ├── test.keystore.sequencer ├── test.keystore.sequencer-2 └── vectors │ ├── src │ ├── block-raw.json │ ├── claim-raw.json │ ├── deposit-raw.json │ ├── mt-bridge │ │ ├── claim-vectors.json │ │ ├── leaf-vectors.json │ │ ├── postgres-fullmt-vector.sql │ │ └── root-vectors.json │ └── smc-txevents-sendbatch-test-vector.json │ └── vectors.go ├── third_party └── google │ └── api │ ├── annotations.proto │ └── http.proto ├── utils ├── client.go ├── gerror │ └── error.go ├── helpers.go └── time_provider.go ├── version.go └── version.mk /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: "gomod" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | target-branch: "develop" 9 | -------------------------------------------------------------------------------- /.github/hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Only run lint if .go files changed 4 | git diff --cached --name-only | if grep --quiet ".*go$" 5 | then 6 | make lint 7 | fi 8 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | Closes #. 2 | 3 | ### What does this PR do? 4 | 5 | 7 | 8 | ### Reviewers 9 | 10 | Main reviewers: 11 | 12 | 13 | 14 | - @John 15 | - @Doe 16 | 17 | Codeowner reviewers: 18 | 19 | 20 | 21 | - @-Alice 22 | - @-Bob 23 | -------------------------------------------------------------------------------- /.github/workflows/deb_packager.yml: -------------------------------------------------------------------------------- 1 | name: deb_packager 2 | # test 3 | 4 | on: 5 | push: 6 | branches: 7 | - 'main' 8 | paths: 9 | - '**' 10 | tags: 11 | - 'v*.*.*' 12 | - 'v*.*.*-*' 13 | 14 | jobs: 15 | build: 16 | permissions: 17 | id-token: write 18 | contents: write 19 | runs-on: ubuntu-20.04 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v2 23 | with: 24 | fetch-depth: 0 25 | - name: Set up Go 26 | uses: actions/setup-go@master 27 | with: 28 | go-version: 1.22.x 29 | # Variables 30 | - name: Adding TAG to ENV 31 | run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV 32 | - name: adding version 33 | run: | 34 | NUMERIC_VERSION=$( echo ${{ env.GIT_TAG }} | sed 's/[^0-9.]//g' ) 35 | echo "VERSION=$NUMERIC_VERSION" >> $GITHUB_ENV 36 | 37 | - name: build the binary 38 | run: make build 39 | 40 | - name: making directory structure 41 | run: mkdir -p packaging/deb/zkevm-bridge/usr/bin/ 42 | - name: copying necessary binary for amd64 43 | run: cp -rp dist/zkevm-bridge packaging/deb/zkevm-bridge/usr/bin/zkevm-bridge 44 | - name: create directory for service file 45 | run: mkdir -p packaging/deb/zkevm-bridge/lib/systemd/system 46 | - name: copy the service file 47 | run: cp -rp packaging/systemd/zkevm-bridge.service packaging/deb/zkevm-bridge/lib/systemd/system/ 48 | 49 | 50 | # Control file creation 51 | - name: create control file 52 | run: | 53 | echo "Package: zkevm-bridge" >> packaging/deb/zkevm-bridge/DEBIAN/control 54 | echo "Version: ${{ env.VERSION }}" >> packaging/deb/zkevm-bridge/DEBIAN/control 55 | echo "Section: base" >> packaging/deb/zkevm-bridge/DEBIAN/control 56 | echo "Priority: optional" >> packaging/deb/zkevm-bridge/DEBIAN/control 57 | echo "Architecture: amd64" >> packaging/deb/zkevm-bridge/DEBIAN/control 58 | echo "Maintainer: devops@polygon.technology" >> packaging/deb/zkevm-bridge/DEBIAN/control 59 | echo "Description: zkevm-bridge binary package" >> packaging/deb/zkevm-bridge/DEBIAN/control 60 | 61 | - name: Creating package for binary for zkevm-bridge ${{ env.ARCH }} 62 | run: cp -rp packaging/deb/zkevm-bridge packaging/deb/zkevm-bridge-${{ env.GIT_TAG }}-${{ env.ARCH }} 63 | env: 64 | ARCH: amd64 65 | 66 | - name: Running package build 67 | run: dpkg-deb --build --root-owner-group packaging/deb/zkevm-bridge-${{ env.GIT_TAG }}-${{ env.ARCH }} 68 | env: 69 | ARCH: amd64 70 | 71 | - name: create checksum for the amd64 package 72 | run: cd packaging/deb/ && sha256sum zkevm-bridge-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb > zkevm-bridge-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb.checksum 73 | env: 74 | ARCH: amd64 75 | 76 | 77 | - name: Release zkevm-bridge Packages 78 | uses: softprops/action-gh-release@v1 79 | with: 80 | tag_name: ${{ env.GIT_TAG }} 81 | prerelease: true 82 | files: | 83 | packaging/deb/zkevm-bridge**.deb 84 | packaging/deb/zkevm-bridge**.deb.checksum -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | - develop 8 | pull_request: 9 | 10 | jobs: 11 | lint: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Install Go 15 | uses: actions/setup-go@v1 16 | with: 17 | go-version: 1.24.x 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Lint 21 | run: | 22 | make install-linter 23 | make lint 24 | -------------------------------------------------------------------------------- /.github/workflows/push-docker.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | tags: 4 | - 'v[0-9]*.[0-9]*.[0-9]*' # GitHub uses glob patterns, not regex 5 | workflow_dispatch: {} 6 | 7 | permissions: 8 | contents: read 9 | packages: write # needed to push to GHCR with GITHUB_TOKEN 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 17 | 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v3 20 | 21 | - name: Set up Docker Buildx 22 | uses: docker/setup-buildx-action@v3 23 | 24 | - name: Login to GHCR 25 | uses: docker/login-action@v3 26 | with: 27 | registry: ghcr.io 28 | username: ${{ github.actor }} 29 | password: ${{ secrets.GITHUB_TOKEN }} 30 | 31 | - name: Normalize GHCR image name (lowercase owner) 32 | # e.g., ghcr.io/0xpolygon/zkevm-bridge-service 33 | run: | 34 | GHCR_OWNER_LC=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') 35 | echo "GHCR_IMAGE_NAME=ghcr.io/${GHCR_OWNER_LC}/zkevm-bridge-service" >> $GITHUB_ENV 36 | 37 | - name: Build and push 38 | id: docker_build 39 | uses: docker/build-push-action@v5 40 | with: 41 | context: . 42 | platforms: linux/amd64,linux/arm64 43 | push: true 44 | tags: | 45 | ${{ env.GHCR_IMAGE_NAME }}:${{ github.ref_name }} 46 | build-args: | 47 | PRIVATE_TOKEN=${{ secrets.GIT_TOKEN }} 48 | -------------------------------------------------------------------------------- /.github/workflows/regression-tests.yml: -------------------------------------------------------------------------------- 1 | name: Regression Tests 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [develop] 7 | 8 | concurrency: 9 | group: regression-tests-${{ github.event.pull_request.number || github.ref }} 10 | cancel-in-progress: true 11 | 12 | env: 13 | ENCLAVE_NAME: cdk 14 | 15 | jobs: 16 | build-docker-image: 17 | runs-on: ubuntu-latest 18 | timeout-minutes: 10 19 | steps: 20 | - uses: actions/checkout@v4 21 | 22 | - name: Build docker image 23 | run: make build-docker 24 | 25 | - name: Save docker image to archive 26 | run: docker save --output /tmp/zkevm-bridge-service.tar zkevm-bridge-service 27 | 28 | - name: Upload archive 29 | uses: actions/upload-artifact@v4 30 | with: 31 | name: zkevm-bridge-service 32 | path: /tmp/zkevm-bridge-service.tar 33 | 34 | deploy-cdk-stack: 35 | runs-on: ubuntu-latest 36 | timeout-minutes: 30 37 | needs: build-docker-image 38 | steps: 39 | - uses: actions/checkout@v4 40 | 41 | - name: Checkout kurtosis-cdk repository 42 | uses: actions/checkout@v4 43 | with: 44 | repository: 0xPolygon/kurtosis-cdk 45 | path: kurtosis-cdk 46 | ref: v0.4.18 47 | 48 | - name: Install Kurtosis CDK tools 49 | uses: .github/actions/kurtosis-pre-run 50 | 51 | - name: Download archive 52 | uses: actions/download-artifact@v4 53 | with: 54 | name: zkevm-bridge-service 55 | path: /tmp 56 | 57 | - name: Load docker image 58 | run: | 59 | docker load --input /tmp/zkevm-bridge-service.tar 60 | docker image ls -a 61 | 62 | - name: Deploy kurtosis enclave 63 | working-directory: kurtosis-cdk 64 | run: | 65 | kurtosis run --enclave ${{ env.ENCLAVE_NAME }} . '{"args": {"zkevm_bridge_service_image": "zkevm-bridge-service:latest"}}' 66 | 67 | - name: Monitor verified batches 68 | working-directory: ./kurtosis-cdk/.github/scripts 69 | run: | 70 | ./monitor-cdk-chain.sh \ 71 | --enclave ${{ env.ENCLAVE_NAME }} \ 72 | --rpc-url $(kurtosis port print ${{ env.ENCLAVE_NAME }} cdk-erigon-rpc-001 rpc) 73 | 74 | - name: Dump enclave 75 | if: ${{ !cancelled() }} 76 | run: kurtosis enclave dump ${{ env.ENCLAVE_NAME }} ./dump 77 | 78 | - name: Upload enclave dump 79 | if: ${{ !cancelled() }} 80 | uses: actions/upload-artifact@v4 81 | with: 82 | name: dump_regression_tests_${{ github.run_id }} 83 | path: ./dump 84 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: goreleaser 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v[0-9]+.[0-9]+.[0-9]+' # this action will only run on tags that follow semver 7 | 8 | jobs: 9 | goreleaser: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v4 14 | with: 15 | fetch-depth: 0 16 | 17 | - name: Set up Go 18 | uses: actions/setup-go@v5 19 | with: 20 | go-version: 1.24 21 | 22 | - name: Get packr 23 | run: go install github.com/gobuffalo/packr/v2/packr2@v2.8.3 24 | 25 | - name: Prepare 26 | run: git reset --hard 27 | 28 | - name: Run GoReleaser 29 | uses: goreleaser/goreleaser-action@v4 30 | with: 31 | version: latest 32 | args: release --clean 33 | env: 34 | GITHUB_TOKEN: ${{ secrets.GIT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/test-autoclaiml2l2.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | - develop 8 | - update-external-dependencies 9 | pull_request: 10 | jobs: 11 | test-autoclaiml2l2: 12 | strategy: 13 | matrix: 14 | go-version: [ 1.24.x ] 15 | goarch: [ "amd64" ] 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Install Go 21 | uses: actions/setup-go@v1 22 | with: 23 | go-version: ${{ matrix.go-version }} 24 | env: 25 | GOARCH: ${{ matrix.goarch }} 26 | - name: Test 27 | run: make test-autoclaiml2l2 28 | -------------------------------------------------------------------------------- /.github/workflows/test-e2e.yml: -------------------------------------------------------------------------------- 1 | name: Test-e2e 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | - develop 8 | - update-external-dependencies 9 | pull_request: 10 | jobs: 11 | test-e2e: 12 | strategy: 13 | matrix: 14 | go-version: [ 1.24.x ] 15 | goarch: [ "amd64" ] 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Install Go 21 | uses: actions/setup-go@v1 22 | with: 23 | go-version: ${{ matrix.go-version }} 24 | env: 25 | GOARCH: ${{ matrix.goarch }} 26 | - name: Test 27 | run: make test-full 28 | -------------------------------------------------------------------------------- /.github/workflows/test-e2ecompress.yml: -------------------------------------------------------------------------------- 1 | name: Test-e2compress 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | - develop 8 | - update-external-dependencies 9 | pull_request: 10 | jobs: 11 | test-e2ecompress: 12 | strategy: 13 | matrix: 14 | go-version: [ 1.24.x ] 15 | goarch: [ "amd64" ] 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Install Go 21 | uses: actions/setup-go@v1 22 | with: 23 | go-version: ${{ matrix.go-version }} 24 | env: 25 | GOARCH: ${{ matrix.goarch }} 26 | - name: Test 27 | run: make test-e2ecompress 28 | -------------------------------------------------------------------------------- /.github/workflows/test-edge.yml: -------------------------------------------------------------------------------- 1 | name: Test-edge 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | - develop 8 | - update-external-dependencies 9 | pull_request: 10 | jobs: 11 | test-edge: 12 | strategy: 13 | matrix: 14 | go-version: [ 1.24.x ] 15 | goarch: [ "amd64" ] 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Install Go 21 | uses: actions/setup-go@v1 22 | with: 23 | go-version: ${{ matrix.go-version }} 24 | env: 25 | GOARCH: ${{ matrix.goarch }} 26 | - name: Test 27 | run: make test-edge 28 | -------------------------------------------------------------------------------- /.github/workflows/test-l2l2.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | - develop 8 | - update-external-dependencies 9 | pull_request: 10 | jobs: 11 | test-l2l2: 12 | strategy: 13 | matrix: 14 | go-version: [ 1.24.x ] 15 | goarch: [ "amd64" ] 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Install Go 21 | uses: actions/setup-go@v1 22 | with: 23 | go-version: ${{ matrix.go-version }} 24 | env: 25 | GOARCH: ${{ matrix.goarch }} 26 | - name: Test 27 | run: make test-l2l2 28 | -------------------------------------------------------------------------------- /.github/workflows/test-multiplerollups.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | - develop 8 | - update-external-dependencies 9 | pull_request: 10 | jobs: 11 | test-multiplerollups: 12 | strategy: 13 | matrix: 14 | go-version: [ 1.24.x ] 15 | goarch: [ "amd64" ] 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Install Go 21 | uses: actions/setup-go@v1 22 | with: 23 | go-version: ${{ matrix.go-version }} 24 | env: 25 | GOARCH: ${{ matrix.goarch }} 26 | - name: Test 27 | run: make test-multiplerollups 28 | -------------------------------------------------------------------------------- /.github/workflows/test-sovereignchain.yml: -------------------------------------------------------------------------------- 1 | name: Test-sovereignchain 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | - develop 8 | - update-external-dependencies 9 | pull_request: 10 | jobs: 11 | test-sovereignchain: 12 | strategy: 13 | matrix: 14 | go-version: [ 1.24.x ] 15 | goarch: [ "amd64" ] 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Install Go 21 | uses: actions/setup-go@v1 22 | with: 23 | go-version: ${{ matrix.go-version }} 24 | env: 25 | GOARCH: ${{ matrix.goarch }} 26 | - name: Test 27 | run: make test-sovereignchain 28 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | - develop 8 | - update-external-dependencies 9 | pull_request: 10 | jobs: 11 | test: 12 | strategy: 13 | matrix: 14 | go-version: [ 1.24.x ] 15 | goarch: [ "amd64" ] 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Install Go 21 | uses: actions/setup-go@v1 22 | with: 23 | go-version: ${{ matrix.go-version }} 24 | env: 25 | GOARCH: ${{ matrix.goarch }} 26 | - name: Test 27 | run: make test 28 | - name: Benchmark Test 29 | run: make bench 30 | -------------------------------------------------------------------------------- /.github/workflows/updatedeps.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Update dependencies 3 | on: 4 | schedule: 5 | - cron: '0 */8 * * *' 6 | workflow_dispatch: 7 | jobs: 8 | updatedeps: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout code 12 | uses: actions/checkout@v2 13 | - name: Install Go 14 | uses: actions/setup-go@v1 15 | with: 16 | go-version: "1.24.x" 17 | env: 18 | GOARCH: "amd64" 19 | - name: Install Protoc 20 | uses: arduino/setup-protoc@v1 21 | - name: Install protoc-gen-go 22 | run: | 23 | go install github.com/golang/protobuf/protoc-gen-go@latest 24 | go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest 25 | go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@latest 26 | - name: Update deps 27 | env: 28 | DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} 29 | DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }} 30 | GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} 31 | run: make update-external-dependencies 32 | - name: Create Pull Request 33 | uses: peter-evans/create-pull-request@v3 34 | with: 35 | token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} 36 | commit-message: update external dependencies 37 | title: Update external dependencies 38 | body: Check for new images, test vectors and proto files and update the code to use them. 39 | branch: update-external-dependencies 40 | base: develop 41 | author: int-bot 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | /dist/ 17 | .vscode 18 | 19 | config/config.mainnet.toml 20 | config/config.testnet.toml 21 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # .goreleaser.yaml 2 | builds: 3 | - main: ./cmd/ 4 | goos: 5 | - linux 6 | - darwin 7 | goarch: 8 | - amd64 9 | - arm64 10 | env: 11 | - CGO_ENABLED=0 12 | ldflags: 13 | - -X github.com/0xPolygonHermez/zkevm-bridge-service.Version={{.Version}} 14 | - -X github.com/0xPolygonHermez/zkevm-bridge-service.GitRev={{.Commit}} 15 | - -X github.com/0xPolygonHermez/zkevm-bridge-service.BuildDate={{.Date}} 16 | - -X github.com/0xPolygonHermez/zkevm-bridge-service.GitBranch={{.Branch}} 17 | release: 18 | # If set to auto, will mark the release as not ready for production 19 | # in case there is an indicator for this in the tag e.g. v1.0.0-rc1 20 | # If set to true, will mark the release as not ready for production. 21 | # Default is false. 22 | prerelease: true 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # CONTAINER FOR BUILDING BINARY 2 | FROM golang:1.24 AS build 3 | 4 | ENV CGO_ENABLED=0 5 | # INSTALL DEPENDENCIES 6 | RUN go install github.com/gobuffalo/packr/v2/packr2@v2.8.3 7 | COPY go.mod go.sum /src/ 8 | RUN cd /src && go mod download 9 | 10 | # BUILD BINARY 11 | COPY . /src 12 | RUN cd /src/db && packr2 13 | RUN cd /src && make build 14 | 15 | # CONTAINER FOR RUNNING BINARY 16 | FROM postgres:latest 17 | RUN apt-get update 18 | RUN apt-get install ca-certificates -y 19 | COPY --from=build /src/dist/zkevm-bridge /app/zkevm-bridge 20 | COPY --from=build /src/dist/test-deploy-tool /app/test-deploy-tool 21 | COPY --from=build /src/dist/zkevm-autoclaimer /app/zkevm-autoclaimer 22 | COPY --from=build /src/test/vectors /app/test/vectors 23 | EXPOSE 8080 24 | EXPOSE 9090 25 | CMD ["/bin/sh", "-c", "/app/zkevm-bridge run"] 26 | -------------------------------------------------------------------------------- /DockerfileE2ETest: -------------------------------------------------------------------------------- 1 | # CONTAINER FOR BUILDING BINARY 2 | FROM golang:1.24 AS build 3 | 4 | ENV CGO_ENABLED=0 5 | 6 | # BUILD BINARY 7 | COPY . /src 8 | RUN cd /src && make build-test-e2e-real_network 9 | 10 | # CONTAINER FOR RUNNING BINARY 11 | FROM alpine:3.16.0 AS ERC20 12 | COPY --from=build /src/dist/zkevm-bridge-e2e-real_network-erc20 /app/zkevm-bridge-e2e-real_network-erc20 13 | CMD ["/bin/sh", "-c", "/app/zkevm-bridge-e2e-real_network-erc20 -test.failfast -test.v"] 14 | 15 | # CONTAINER FOR RUNNING BINARY 16 | FROM alpine:3.16.0 AS MSG 17 | COPY --from=build /src/dist/zkevm-bridge-e2e-real_network-bridgemsg /app/zkevm-bridge-e2e-real_network-bridgemsg 18 | CMD ["/bin/sh", "-c", "/app/zkevm-bridge-e2e-real_network-bridgemsg -test.failfast -test.v"] 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # zkEVM Bridge service 2 | 3 | This repo implements a backend service written in Go, that enables clients, like the [web UI](https://github.com/0xPolygonHermez/zkevm-bridge-ui), 4 | to interact with the [bridge smart contract](https://github.com/0xPolygonHermez/zkevm-contracts) by providing Merkleproofs. 5 | 6 | ## Architecture 7 | 8 |

9 | 10 |

11 | 12 | ## Running the bridge service 13 | 14 | - [Running locally](docs/running_local.md) 15 | 16 | ## Running e2e test for real networks 17 | There are a test for ERC20 L1->L2 and L2->L1. This test is for be run externally. 18 | For this reason you can build execution docker: 19 | ``` 20 | make build-docker-e2e-real_network 21 | ``` 22 | 23 | - To run you need to pass a configuration file as `test/config/bridge_network_e2e/cardona.toml` 24 | - Example of usage: 25 | 26 | ``` 27 | #!/bin/bash 28 | make build-docker-e2e-real_network 29 | mkdir tmp 30 | cat < ./tmp/test.toml 31 | TestAddrPrivate= "${{ SECRET_PRIVATE_ADDR }}" 32 | [ConnectionConfig] 33 | L1NodeURL="${{ SECRET_L1URL }}" 34 | L2NodeURL="${{ L2URL }}" 35 | BridgeURL="${{ BRIDGEURL }}" 36 | L1BridgeAddr="${{ BRIDGE_ADDR_L1 }}" 37 | L2BridgeAddr="${{ BRIDGE_ADDR_L2 }}" 38 | EOF 39 | docker run --volume "./tmp/:/config/" --env BRIDGE_TEST_CONFIG_FILE=/config/test.toml bridge-e2e-realnetwork-erc20 40 | ``` 41 | 42 | ## Contact 43 | 44 | For more discussions, please head to the [R&D Discord](https://discord.gg/0xPolygonRnD) 45 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Polygon Technology Security Information 2 | 3 | ## Link to vulnerability disclosure details (Bug Bounty). 4 | - Websites and Applications: https://hackerone.com/polygon-technology 5 | - Smart Contracts: https://immunefi.com/bounty/polygon 6 | 7 | ## Languages that our team speaks and understands. 8 | Preferred-Languages: en 9 | 10 | ## Security-related job openings at Polygon. 11 | https://polygon.technology/careers 12 | 13 | ## Polygon security contact details. 14 | security@polygon.technology 15 | 16 | ## The URL for accessing the security.txt file. 17 | Canonical: https://polygon.technology/security.txt 18 | -------------------------------------------------------------------------------- /autoclaimservice/README.md: -------------------------------------------------------------------------------- 1 | # AutoClaim service 2 | This service allows to claim deposits whose destination is the network specified by the bridgeAddress and RPC network. 3 | 4 | ## Configuration file: 5 | This is an example of the configuration file. 6 | ``` 7 | [Log] 8 | Level = "debug" 9 | Outputs = ["stdout"] 10 | 11 | [AutoClaim] 12 | AuthorizedClaimMessageAddresses = [] 13 | AutoClaimInterval = "10m" 14 | MaxNumberOfClaimsPerGroup = 10 15 | BridgeURL = "http://localhost:8080" 16 | 17 | [BlockchainManager] 18 | PrivateKey = {Path = "./test.keystore.autoclaim", Password = "testonly"} 19 | L2RPC = "http://localhost:8123" 20 | PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" 21 | ClaimCompressorAddress = "0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6" 22 | ``` 23 | - The log seccion allows to modify the log level. By default: debug mode 24 | - The autoclaim seccion allows to configure the next parameters: 25 | - `AuthorizedClaimMessageAddresses` These are the addresses that can use the autoclaim feature for bridge messages. By default: none 26 | - `AutoClaimInterval` This is the param that controls the interval to check new bridges. By default: 10m 27 | - `MaxNumberOfClaimsPerGroup` This param allow to control the maximum numbre of claims that can be grouped in a single tx. By default: 10 28 | - `BridgeURL` This is the bridge service URL to get the bridges that can be claimed. By default: localhost:8080 29 | - The BlockchainManager seccion allows to modify network parameters. 30 | - `PrivateKey` is the wallet used to send the claim txs. By default: 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 31 | - `L2RPC` is the URL of the L2 node to send claim txs. By default: localhost:8123 32 | - `PolygonBridgeAddress` is the L2 bridge address. By default: 0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E 33 | - `ClaimCompressorAddress` is the compressor smc address. By default: 0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6 34 | 35 | ***Note: If ClaimCompressorAddress is not defined or MaxNumberOfClaimsPerGroup is 0, then the claim compressor feature is disabled and claim txs will be sent one by one. 36 | 37 | ## How to run: 38 | This is a command example of how to run this service: 39 | ``` 40 | go run main.go run --cfg ./config/config.local.toml 41 | ``` -------------------------------------------------------------------------------- /autoclaimservice/autoclaim/config.go: -------------------------------------------------------------------------------- 1 | package autoclaim 2 | 3 | import ( 4 | "github.com/0xPolygonHermez/zkevm-bridge-service/config/types" 5 | "github.com/ethereum/go-ethereum/common" 6 | ) 7 | 8 | // Config represents the configuration of the AutoClaim package 9 | type Config struct { 10 | // AuthorizedClaimMessageAddresses are the allowed address to bridge message with autoClaim 11 | AuthorizedClaimMessageAddresses []common.Address `mapstructure:"AuthorizedClaimMessageAddresses"` 12 | // AutoClaimInterval is time between each iteration 13 | AutoClaimInterval types.Duration `mapstructure:"AutoClaimInterval"` 14 | // MaxNumberOfClaimsPerGroup is the maximum number of claims per group. 0 means group claims is disabled 15 | MaxNumberOfClaimsPerGroup int `mapstructure:"MaxNumberOfClaimsPerGroup"` 16 | // BridgeURL is the URL of the bridge service 17 | BridgeURL string `mapstructure:"BridgeURL"` 18 | } 19 | -------------------------------------------------------------------------------- /autoclaimservice/blockchainmanager/config.go: -------------------------------------------------------------------------------- 1 | package blockchainmanager 2 | 3 | import ( 4 | "github.com/0xPolygonHermez/zkevm-bridge-service/config/types" 5 | "github.com/ethereum/go-ethereum/common" 6 | ) 7 | 8 | // Config is the configuration struct for the different environments. 9 | type Config struct { 10 | // L2RPC is the URL of the L2 node 11 | L2RPC string `mapstructure:"L2RPC"` 12 | // PrivateKey defines the key store file that is going 13 | // to be read in order to provide the private key to sign the claim txs 14 | PrivateKey types.KeystoreFileConfig `mapstructure:"PrivateKey"` 15 | // PolygonBridgeAddress is the l2 bridge smc address 16 | PolygonBridgeAddress common.Address `mapstructure:"PolygonBridgeAddress"` 17 | // ClaimCompressorAddress is the l2 claim compressor smc address. If it's not set, then group claims is disabled 18 | ClaimCompressorAddress common.Address `mapstructure:"ClaimCompressorAddress"` 19 | } 20 | -------------------------------------------------------------------------------- /autoclaimservice/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "path/filepath" 5 | "strings" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/autoclaimservice/autoclaim" 8 | "github.com/0xPolygonHermez/zkevm-bridge-service/autoclaimservice/blockchainmanager" 9 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 10 | "github.com/mitchellh/mapstructure" 11 | "github.com/spf13/viper" 12 | ) 13 | 14 | // Config struct 15 | type Config struct { 16 | Log log.Config 17 | AutoClaim autoclaim.Config 18 | BlockchainManager blockchainmanager.Config 19 | } 20 | 21 | // Load loads the configuration 22 | func Load(configFilePath string) (*Config, error) { 23 | cfg, err := Default() 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | if configFilePath != "" { 29 | dirName, fileName := filepath.Split(configFilePath) 30 | 31 | fileExtension := strings.TrimPrefix(filepath.Ext(fileName), ".") 32 | fileNameWithoutExtension := strings.TrimSuffix(fileName, "."+fileExtension) 33 | 34 | viper.AddConfigPath(dirName) 35 | viper.SetConfigName(fileNameWithoutExtension) 36 | viper.SetConfigType(fileExtension) 37 | } 38 | 39 | viper.AutomaticEnv() 40 | replacer := strings.NewReplacer(".", "_") 41 | viper.SetEnvKeyReplacer(replacer) 42 | viper.SetEnvPrefix("ZKEVM_AUTOCLAIM") 43 | 44 | if err = viper.ReadInConfig(); err != nil { 45 | if _, ok := err.(viper.ConfigFileNotFoundError); ok { 46 | log.Infof("config file not found") 47 | } else { 48 | log.Infof("error reading config file: ", err) 49 | return nil, err 50 | } 51 | } 52 | 53 | decodeHooks := []viper.DecoderConfigOption{ 54 | // this allows arrays to be decoded from env var separated by ",", example: MY_VAR="value1,value2,value3" 55 | viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(mapstructure.TextUnmarshallerHookFunc(), mapstructure.StringToSliceHookFunc(","))), 56 | } 57 | err = viper.Unmarshal(&cfg, decodeHooks...) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | return cfg, nil 63 | } 64 | -------------------------------------------------------------------------------- /autoclaimservice/config/config.local.toml: -------------------------------------------------------------------------------- 1 | [Log] 2 | Level = "debug" 3 | Outputs = ["stdout"] 4 | 5 | [AutoClaim] 6 | AuthorizedClaimMessageAddresses = [] 7 | AutoClaimInterval = "10m" 8 | MaxNumberOfClaimsPerGroup = 10 9 | BridgeURL = "http://localhost:8080" 10 | 11 | [BlockchainManager] 12 | PrivateKey = {Path = "./test.keystore.autoclaim", Password = "testonly"} 13 | L2RPC = "http://localhost:8123" 14 | PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" 15 | ClaimCompressorAddress = "0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6" 16 | -------------------------------------------------------------------------------- /autoclaimservice/config/default.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "bytes" 5 | 6 | "github.com/mitchellh/mapstructure" 7 | "github.com/spf13/viper" 8 | ) 9 | 10 | // DefaultValues is the default configuration 11 | const DefaultValues = ` 12 | [Log] 13 | Level = "debug" 14 | Outputs = ["stdout"] 15 | 16 | [AutoClaim] 17 | AuthorizedClaimMessageAddresses = [] 18 | AutoClaimInterval = "10m" 19 | MaxNumberOfClaimsPerGroup = 10 20 | BridgeURL = "http://localhost:8080" 21 | 22 | [BlockchainManager] 23 | PrivateKey = {Path = "./test/test.keystore", Password = "testonly"} 24 | L2RPC = "http://localhost:8123" 25 | PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" 26 | ClaimCompressorAddress = "0x0000000000000000000000000000000000000000" 27 | 28 | ` 29 | 30 | // Default parses the default configuration values. 31 | func Default() (*Config, error) { 32 | var cfg Config 33 | viper.SetConfigType("toml") 34 | 35 | err := viper.ReadConfig(bytes.NewBuffer([]byte(DefaultValues))) 36 | if err != nil { 37 | return nil, err 38 | } 39 | err = viper.Unmarshal(&cfg, viper.DecodeHook(mapstructure.TextUnmarshallerHookFunc())) 40 | if err != nil { 41 | return nil, err 42 | } 43 | return &cfg, nil 44 | } 45 | -------------------------------------------------------------------------------- /autoclaimservice/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/signal" 7 | 8 | zkevmbridgeservice "github.com/0xPolygonHermez/zkevm-bridge-service" 9 | "github.com/0xPolygonHermez/zkevm-bridge-service/autoclaimservice/autoclaim" 10 | "github.com/0xPolygonHermez/zkevm-bridge-service/autoclaimservice/blockchainmanager" 11 | "github.com/0xPolygonHermez/zkevm-bridge-service/autoclaimservice/config" 12 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 13 | cli "github.com/urfave/cli/v2" 14 | ) 15 | 16 | const ( 17 | flagCfg = "cfg" 18 | flagNetwork = "network" 19 | ) 20 | 21 | const ( 22 | // App name 23 | appName = "zkevm-bridge" 24 | ) 25 | 26 | func main() { 27 | app := cli.NewApp() 28 | app.Name = appName 29 | app.Version = zkevmbridgeservice.Version 30 | flags := []cli.Flag{ 31 | &cli.StringFlag{ 32 | Name: flagCfg, 33 | Aliases: []string{"c"}, 34 | Usage: "Configuration `FILE`", 35 | Required: false, 36 | }, 37 | } 38 | 39 | app.Commands = []*cli.Command{ 40 | { 41 | Name: "version", 42 | Aliases: []string{}, 43 | Usage: "Application version and build", 44 | Action: version, 45 | }, 46 | { 47 | Name: "run", 48 | Aliases: []string{}, 49 | Usage: "Run the zkevm bridge", 50 | Action: run, 51 | Flags: flags, 52 | }, 53 | } 54 | 55 | err := app.Run(os.Args) 56 | if err != nil { 57 | fmt.Printf("\nError: %v\n", err) 58 | os.Exit(1) 59 | } 60 | } 61 | 62 | func version(*cli.Context) error { 63 | zkevmbridgeservice.PrintVersion(os.Stdout) 64 | return nil 65 | } 66 | 67 | func run(ctx *cli.Context) error { 68 | configFilePath := ctx.String(flagCfg) 69 | c, err := config.Load(configFilePath) 70 | if err != nil { 71 | return err 72 | } 73 | log.Init(c.Log) 74 | bm, err := blockchainmanager.NewClient(ctx.Context, &c.BlockchainManager) 75 | if err != nil { 76 | return err 77 | } 78 | ac, err := autoclaim.NewAutoClaim(ctx.Context, &c.AutoClaim, bm) 79 | if err != nil { 80 | return err 81 | } 82 | go ac.Start() 83 | 84 | // Wait for an in interrupt. 85 | ch := make(chan os.Signal, 1) 86 | signal.Notify(ch, os.Interrupt) 87 | <-ch 88 | 89 | return nil 90 | } 91 | -------------------------------------------------------------------------------- /autoclaimservice/test.keystore.autoclaim: -------------------------------------------------------------------------------- 1 | {"address":"f39fd6e51aad88f6f4ce6ab8827279cfffb92266","crypto":{"cipher":"aes-128-ctr","ciphertext":"d005030a7684f3adad2447cbb27f63039eec2224c451eaa445de0d90502b9f3d","cipherparams":{"iv":"dc07a54bc7e388efa89c34d42f2ebdb4"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"cf2ec55ecae11171de575112cfb16963570533a9c46fb774473ceb11519eb24a"},"mac":"3eb180d405a5da6e462b2adc00091c14856c91d574bf27348714506357d6e177"},"id":"035454db-6b6d-477f-8a79-ce24c10b185f","version":3} -------------------------------------------------------------------------------- /bridgectrl/bridgectrl.go: -------------------------------------------------------------------------------- 1 | package bridgectrl 2 | 3 | import ( 4 | "context" 5 | "math" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 8 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 9 | "github.com/0xPolygonHermez/zkevm-bridge-service/utils/gerror" 10 | ) 11 | 12 | const ( 13 | // KeyLen is the length of key and value in the Merkle Tree 14 | KeyLen = 32 15 | ) 16 | 17 | // BridgeController struct 18 | type BridgeController struct { 19 | exitTrees []*MerkleTree 20 | rollupsTree *MerkleTree 21 | merkleTreeIDs map[uint32]uint8 22 | } 23 | 24 | // NewBridgeController creates new BridgeController. 25 | func NewBridgeController(ctx context.Context, cfg Config, networkIDs []uint32, mtStore interface{}) (*BridgeController, error) { 26 | var ( 27 | merkleTreeIDs = make(map[uint32]uint8) 28 | exitTrees []*MerkleTree 29 | ) 30 | 31 | for i, networkID := range networkIDs { 32 | merkleTreeIDs[networkID] = uint8(i) // nolint:gosec 33 | mt, err := NewMerkleTree(ctx, mtStore.(merkleTreeStore), cfg.Height, networkID) 34 | if err != nil { 35 | return nil, err 36 | } 37 | exitTrees = append(exitTrees, mt) 38 | } 39 | rollupsTree, err := NewMerkleTree(ctx, mtStore.(merkleTreeStore), cfg.Height, math.MaxInt32) 40 | if err != nil { 41 | log.Error("error creating rollupsTree. Error: ", err) 42 | return nil, err 43 | } 44 | 45 | return &BridgeController{ 46 | exitTrees: exitTrees, 47 | rollupsTree: rollupsTree, 48 | merkleTreeIDs: merkleTreeIDs, 49 | }, nil 50 | } 51 | 52 | func (bt *BridgeController) GetMerkleTreeID(networkID uint32) (uint8, error) { 53 | tID, found := bt.merkleTreeIDs[networkID] 54 | if !found { 55 | return 0, gerror.ErrNetworkNotRegister 56 | } 57 | return tID, nil 58 | } 59 | 60 | // AddDeposit adds deposit information to the bridge tree. 61 | func (bt *BridgeController) AddDeposit(ctx context.Context, deposit *etherman.Deposit, dbTx interface{}) error { 62 | leaf := hashDeposit(deposit) 63 | tID, err := bt.GetMerkleTreeID(deposit.NetworkID) 64 | if err != nil { 65 | return err 66 | } 67 | return bt.exitTrees[tID].addLeaf(ctx, deposit.Id, leaf, deposit.DepositCount, dbTx) 68 | } 69 | 70 | // ReorgMT reorg the specific merkle tree. 71 | func (bt *BridgeController) ReorgMT(ctx context.Context, depositCount uint32, networkID uint32, dbTx interface{}) error { 72 | tID, err := bt.GetMerkleTreeID(networkID) 73 | if err != nil { 74 | return err 75 | } 76 | return bt.exitTrees[tID].resetLeaf(ctx, depositCount, dbTx) 77 | } 78 | 79 | // RollbackMT resets the specific merkle tree. 80 | func (bt *BridgeController) RollbackMT(ctx context.Context, networkID uint32, dbTx interface{}) error { 81 | tID, err := bt.GetMerkleTreeID(networkID) 82 | if err != nil { 83 | return err 84 | } 85 | return bt.exitTrees[tID].rollbackMT(ctx, networkID, dbTx) 86 | } 87 | 88 | // GetExitRoot returns the dedicated merkle tree's root. 89 | // only use for the test purpose 90 | func (bt *BridgeController) GetExitRoot(ctx context.Context, tID uint8, dbTx interface{}) ([]byte, error) { 91 | return bt.exitTrees[tID].getRoot(ctx, dbTx) 92 | } 93 | 94 | func (bt *BridgeController) AddRollupExitLeaf(ctx context.Context, rollupLeaf etherman.RollupExitLeaf, dbTx interface{}) error { 95 | err := bt.rollupsTree.addRollupExitLeaf(ctx, rollupLeaf, dbTx) 96 | if err != nil { 97 | log.Error("error adding rollupleaf. Error: ", err) 98 | return err 99 | } 100 | return nil 101 | } 102 | -------------------------------------------------------------------------------- /bridgectrl/config.go: -------------------------------------------------------------------------------- 1 | package bridgectrl 2 | 3 | // Config is state config 4 | type Config struct { 5 | // Height is the depth of the merkle tree 6 | Height uint8 7 | } 8 | -------------------------------------------------------------------------------- /bridgectrl/hash.go: -------------------------------------------------------------------------------- 1 | package bridgectrl 2 | 3 | import ( 4 | "encoding/binary" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 7 | "github.com/iden3/go-iden3-crypto/keccak256" 8 | "golang.org/x/crypto/sha3" 9 | ) 10 | 11 | // Hash calculates the keccak hash of elements. 12 | func Hash(data ...[KeyLen]byte) [KeyLen]byte { 13 | var res [KeyLen]byte 14 | hash := sha3.NewLegacyKeccak256() 15 | for _, d := range data { 16 | hash.Write(d[:]) //nolint:errcheck,gosec 17 | } 18 | copy(res[:], hash.Sum(nil)) 19 | return res 20 | } 21 | 22 | // HashZero is an empty hash 23 | var HashZero = [KeyLen]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 24 | 25 | func generateZeroHashes(height uint8) [][KeyLen]byte { 26 | var zeroHashes = [][KeyLen]byte{ 27 | HashZero, 28 | } 29 | // This generates a leaf = HashZero in position 0. In the rest of the positions that are equivalent to the ascending levels, 30 | // we set the hashes of the nodes. So all nodes from level i=5 will have the same value and same children nodes. 31 | for i := 1; i <= int(height); i++ { 32 | zeroHashes = append(zeroHashes, Hash(zeroHashes[i-1], zeroHashes[i-1])) 33 | } 34 | return zeroHashes 35 | } 36 | 37 | func hashDeposit(deposit *etherman.Deposit) [KeyLen]byte { 38 | var res [KeyLen]byte 39 | origNet := make([]byte, 4) //nolint:mnd 40 | binary.BigEndian.PutUint32(origNet, uint32(deposit.OriginalNetwork)) 41 | destNet := make([]byte, 4) //nolint:mnd 42 | binary.BigEndian.PutUint32(destNet, uint32(deposit.DestinationNetwork)) 43 | var buf [KeyLen]byte 44 | metaHash := keccak256.Hash(deposit.Metadata) 45 | copy(res[:], keccak256.Hash([]byte{deposit.LeafType}, origNet, deposit.OriginalAddress[:], destNet, deposit.DestinationAddress[:], deposit.Amount.FillBytes(buf[:]), metaHash)) 46 | return res 47 | } 48 | -------------------------------------------------------------------------------- /bridgectrl/interfaces.go: -------------------------------------------------------------------------------- 1 | package bridgectrl 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 7 | "github.com/ethereum/go-ethereum/common" 8 | ) 9 | 10 | // merkleTreeStore interface for the Merkle Tree 11 | type merkleTreeStore interface { 12 | Get(ctx context.Context, key []byte, dbTx interface{}) ([][]byte, error) 13 | BulkSet(ctx context.Context, rows [][]interface{}, dbTx interface{}) error 14 | GetRoot(ctx context.Context, depositCount uint32, network uint32, dbTx interface{}) ([]byte, error) 15 | SetRoot(ctx context.Context, root []byte, depositID uint64, network uint32, dbTx interface{}) error 16 | GetLastDepositCount(ctx context.Context, networkID uint32, dbTx interface{}) (uint32, error) 17 | AddRollupExitLeaves(ctx context.Context, rows [][]interface{}, dbTx interface{}) error 18 | GetRollupExitLeavesByRoot(ctx context.Context, root common.Hash, dbTx interface{}) ([]etherman.RollupExitLeaf, error) 19 | GetLatestRollupExitLeaves(ctx context.Context, dbTx interface{}) ([]etherman.RollupExitLeaf, error) 20 | IsRollupExitRoot(ctx context.Context, root common.Hash, dbTx interface{}) (bool, error) 21 | } 22 | -------------------------------------------------------------------------------- /claimtxman/config.go: -------------------------------------------------------------------------------- 1 | package claimtxman 2 | 3 | import ( 4 | "github.com/0xPolygonHermez/zkevm-bridge-service/config/types" 5 | "github.com/ethereum/go-ethereum/common" 6 | ) 7 | 8 | // Config is configuration for L2 claim transaction manager 9 | type Config struct { 10 | //Enabled whether to enable this module 11 | Enabled bool `mapstructure:"Enabled"` 12 | // FrequencyToMonitorTxs frequency of the resending failed txs 13 | FrequencyToMonitorTxs types.Duration `mapstructure:"FrequencyToMonitorTxs"` 14 | // PrivateKey defines the key store file that is going 15 | // to be read in order to provide the private key to sign the claim txs 16 | PrivateKey types.KeystoreFileConfig `mapstructure:"PrivateKey"` 17 | // RetryInterval is time between each retry 18 | RetryInterval types.Duration `mapstructure:"RetryInterval"` 19 | // RetryNumber is the number of retries before giving up 20 | RetryNumber int `mapstructure:"RetryNumber"` 21 | // AuthorizedClaimMessageAddresses are the allowed address to bridge message with autoClaim 22 | AuthorizedClaimMessageAddresses []common.Address `mapstructure:"AuthorizedClaimMessageAddresses"` 23 | // Enables the ability to Claim bridges between L2s automatically 24 | AreClaimsBetweenL2sEnabled bool `mapstructure:"AreClaimsBetweenL2sEnabled"` 25 | // Claim tx manager will track the deposit during this time in number of blocks 26 | IgnoreDepositAfterXL1Blocks uint64 `mapstructure:"IgnoreDepositAfterXL1Blocks"` 27 | 28 | // GroupingClaims is the configuration for grouping claims 29 | GroupingClaims ConfigGroupingClaims `mapstructure:"GroupingClaims"` 30 | } 31 | 32 | type ConfigGroupingClaims struct { 33 | //Enabled whether to enable this module 34 | Enabled bool `mapstructure:"Enabled"` 35 | //FrequencyToProcessCompressedClaims wait time to process compressed claims 36 | FrequencyToProcessCompressedClaims types.Duration `mapstructure:"FrequencyToProcessCompressedClaims"` 37 | // TriggerNumberOfClaims is the number of claims to trigger sending the grouped claim tx 38 | TriggerNumberOfClaims int `mapstructure:"TriggerNumberOfClaims"` 39 | // MaxNumberOfClaimsPerGroup is the maximum number of claims per group 40 | MaxNumberOfClaimsPerGroup int `mapstructure:"MaxNumberOfClaimsPerGroup"` 41 | // TriggerRetainedClaimPeriod is maximum time that a claim can be retainer before creating a group 42 | TriggerRetainedClaimPeriod types.Duration `mapstructure:"TriggerRetainedClaimPeriod"` 43 | // MaxRetries is the maximum number of retries to send a compressed claim tx 44 | MaxRetries int32 `mapstructure:"MaxRetries"` 45 | // RetryInterval is time between each retry 46 | RetryInterval types.Duration `mapstructure:"RetryInterval"` 47 | // RetryTimeout is the maximum time to wait for a claim tx to be mined 48 | RetryTimeout types.Duration `mapstructure:"RetryTimeout"` 49 | // GasOffset is the offset for the gas estimation 50 | GasOffset uint64 `mapstructure:"GasOffset"` 51 | } 52 | -------------------------------------------------------------------------------- /claimtxman/groups_trigger.go: -------------------------------------------------------------------------------- 1 | package claimtxman 2 | 3 | import ( 4 | "time" 5 | 6 | ctmtypes "github.com/0xPolygonHermez/zkevm-bridge-service/claimtxman/types" 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 8 | ) 9 | 10 | type GroupsTrigger struct { 11 | Cfg ConfigGroupingClaims 12 | } 13 | 14 | func NewGroupsTrigger(cfg ConfigGroupingClaims) *GroupsTrigger { 15 | return &GroupsTrigger{Cfg: cfg} 16 | } 17 | 18 | func (t *GroupsTrigger) ChooseTxs(now time.Time, TxCandidatesForGroup []ctmtypes.MonitoredTx) []ctmtypes.MonitoredTx { 19 | if t.isRetainedPeriodSurpassed(now, TxCandidatesForGroup) { 20 | return t.chooseGroupTx(TxCandidatesForGroup) 21 | } 22 | if len(TxCandidatesForGroup) >= t.Cfg.TriggerNumberOfClaims { 23 | return t.chooseGroupTx(TxCandidatesForGroup) 24 | } 25 | return nil 26 | } 27 | 28 | func (t *GroupsTrigger) isRetainedPeriodSurpassed(now time.Time, TxCandidatesForGroup []ctmtypes.MonitoredTx) bool { 29 | for _, tx := range TxCandidatesForGroup { 30 | elapsed := now.Sub(tx.CreatedAt) 31 | if elapsed > t.Cfg.TriggerRetainedClaimPeriod.Duration { 32 | log.Debugf("Claim deposit_id: %d has surpassed the retained period, elapsed: %s", tx.DepositID, elapsed) 33 | return true 34 | } 35 | } 36 | return false 37 | } 38 | 39 | func (t *GroupsTrigger) chooseGroupTx(TxCandidatesForGroup []ctmtypes.MonitoredTx) []ctmtypes.MonitoredTx { 40 | group := []ctmtypes.MonitoredTx{} 41 | for _, tx := range TxCandidatesForGroup { 42 | group = append(group, tx) 43 | if len(group) == t.Cfg.MaxNumberOfClaimsPerGroup { 44 | break 45 | } 46 | } 47 | return group 48 | } 49 | -------------------------------------------------------------------------------- /claimtxman/interfaces.go: -------------------------------------------------------------------------------- 1 | package claimtxman 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/bridgectrl" 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/claimtxman/types" 8 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 9 | "github.com/ethereum/go-ethereum/common" 10 | ) 11 | 12 | type StorageInterface interface { 13 | AddBlock(ctx context.Context, block *etherman.Block, dbTx interface{}) (uint64, error) 14 | UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint32, dbTx interface{}) error 15 | UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID, networkID uint32, dbTx interface{}) error 16 | GetDepositsFromOtherL2ToClaim(ctx context.Context, destinationNetwork uint32, dbTx interface{}) ([]*etherman.Deposit, error) 17 | GetPendingDepositsToClaim(ctx context.Context, destAddress common.Address, destNetwork, leafType, limit, offset uint32, fromNetwork int8, dbTx interface{}) ([]*etherman.Deposit, uint64, error) 18 | GetLatestTrustedGERByDeposit(ctx context.Context, depositCnt, networkID, destinationNetwork uint32, dbTx interface{}) (common.Hash, error) 19 | AddClaimTx(ctx context.Context, mTx types.MonitoredTx, dbTx interface{}) error 20 | UpdateClaimTx(ctx context.Context, mTx types.MonitoredTx, dbTx interface{}) error 21 | GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint32, dbTx interface{}) ([]types.MonitoredTx, error) 22 | GetDepositByDepositID(ctx context.Context, depositID uint64, dbTx interface{}) (*etherman.Deposit, error) 23 | IgnoreDeposit(ctx context.Context, depositID uint64, dbTx interface{}) error 24 | // atomic 25 | Rollback(ctx context.Context, dbTx interface{}) error 26 | BeginDBTransaction(ctx context.Context) (interface{}, error) 27 | Commit(ctx context.Context, dbTx interface{}) error 28 | } 29 | 30 | type bridgeServiceInterface interface { 31 | GetClaimProofForCompressed(ctx context.Context, ger common.Hash, depositCnt, networkID uint32, dbTx interface{}) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) 32 | GetDepositStatus(ctx context.Context, depositCount, networkID, destNetworkID uint32) (string, error) 33 | } 34 | -------------------------------------------------------------------------------- /claimtxman/mocks/tx_monitorer.go: -------------------------------------------------------------------------------- 1 | // Code generated by mockery. DO NOT EDIT. 2 | 3 | package mock_txcompressor 4 | 5 | import mock "github.com/stretchr/testify/mock" 6 | 7 | // TxMonitorer is an autogenerated mock type for the TxMonitorer type 8 | type TxMonitorer struct { 9 | mock.Mock 10 | } 11 | 12 | type TxMonitorer_Expecter struct { 13 | mock *mock.Mock 14 | } 15 | 16 | func (_m *TxMonitorer) EXPECT() *TxMonitorer_Expecter { 17 | return &TxMonitorer_Expecter{mock: &_m.Mock} 18 | } 19 | 20 | // MonitorTxs provides a mock function with no fields 21 | func (_m *TxMonitorer) MonitorTxs() error { 22 | ret := _m.Called() 23 | 24 | if len(ret) == 0 { 25 | panic("no return value specified for MonitorTxs") 26 | } 27 | 28 | var r0 error 29 | if rf, ok := ret.Get(0).(func() error); ok { 30 | r0 = rf() 31 | } else { 32 | r0 = ret.Error(0) 33 | } 34 | 35 | return r0 36 | } 37 | 38 | // TxMonitorer_MonitorTxs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MonitorTxs' 39 | type TxMonitorer_MonitorTxs_Call struct { 40 | *mock.Call 41 | } 42 | 43 | // MonitorTxs is a helper method to define mock.On call 44 | func (_e *TxMonitorer_Expecter) MonitorTxs() *TxMonitorer_MonitorTxs_Call { 45 | return &TxMonitorer_MonitorTxs_Call{Call: _e.mock.On("MonitorTxs")} 46 | } 47 | 48 | func (_c *TxMonitorer_MonitorTxs_Call) Run(run func()) *TxMonitorer_MonitorTxs_Call { 49 | _c.Call.Run(func(args mock.Arguments) { 50 | run() 51 | }) 52 | return _c 53 | } 54 | 55 | func (_c *TxMonitorer_MonitorTxs_Call) Return(_a0 error) *TxMonitorer_MonitorTxs_Call { 56 | _c.Call.Return(_a0) 57 | return _c 58 | } 59 | 60 | func (_c *TxMonitorer_MonitorTxs_Call) RunAndReturn(run func() error) *TxMonitorer_MonitorTxs_Call { 61 | _c.Call.Return(run) 62 | return _c 63 | } 64 | 65 | // NewTxMonitorer creates a new instance of TxMonitorer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 66 | // The first argument is typically a *testing.T value. 67 | func NewTxMonitorer(t interface { 68 | mock.TestingT 69 | Cleanup(func()) 70 | }) *TxMonitorer { 71 | mock := &TxMonitorer{} 72 | mock.Mock.Test(t) 73 | 74 | t.Cleanup(func() { mock.AssertExpectations(t) }) 75 | 76 | return mock 77 | } 78 | -------------------------------------------------------------------------------- /claimtxman/monitor_compressed_txs_test.go: -------------------------------------------------------------------------------- 1 | package claimtxman_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/claimtxman" 7 | ctmtypes "github.com/0xPolygonHermez/zkevm-bridge-service/claimtxman/types" 8 | deepcopy "github.com/barkimedes/go-deepcopy" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestDeepCopy(t *testing.T) { 13 | pendingTx := &claimtxman.PendingTxs{ 14 | GroupTx: make(map[uint64]*ctmtypes.MonitoredTxGroup), 15 | TxCandidatesForGroup: make([]ctmtypes.MonitoredTx, 0), 16 | LastGroupTxID: 0, 17 | } 18 | pendingTx.AddGroup(ctmtypes.MonitoredTxGroup{ 19 | DbEntry: ctmtypes.MonitoredTxGroupDBEntry{ 20 | CompressedTxData: []byte{}, 21 | DepositIDs: []uint64{}, 22 | }, 23 | Txs: []ctmtypes.MonitoredTx{}, 24 | }) 25 | 26 | initialStatus, err := deepcopy.Anything(pendingTx) 27 | require.NoError(t, err) 28 | copied := *initialStatus.(*claimtxman.PendingTxs) 29 | require.Equal(t, pendingTx, &copied) 30 | require.Equal(t, len(pendingTx.GroupTx), len(copied.GroupTx)) 31 | require.Equal(t, pendingTx.GroupTx[0].DbEntry, copied.GroupTx[0].DbEntry) 32 | require.Equal(t, pendingTx.GroupTx[0].Txs, copied.GroupTx[0].Txs) 33 | require.Equal(t, pendingTx.LastGroupTxID, copied.LastGroupTxID) 34 | require.Equal(t, pendingTx.TxCandidatesForGroup, copied.TxCandidatesForGroup) 35 | } 36 | 37 | func TestDeepCopy2(t *testing.T) { 38 | mTxs := make([]ctmtypes.MonitoredTx, 0) 39 | groups := make(map[uint64]ctmtypes.MonitoredTxGroupDBEntry) 40 | groups[uint64(1)] = ctmtypes.MonitoredTxGroupDBEntry{} 41 | 42 | lastGroupID := uint64(0) 43 | 44 | pendingTx, err := claimtxman.NewPendingTxs(mTxs, groups, lastGroupID) 45 | require.NoError(t, err) 46 | 47 | initialStatus, err := deepcopy.Anything(&pendingTx) 48 | require.NoError(t, err) 49 | copied := *initialStatus.(*claimtxman.PendingTxs) 50 | require.Equal(t, pendingTx, copied) 51 | } 52 | -------------------------------------------------------------------------------- /claimtxman/nonce_cache.go: -------------------------------------------------------------------------------- 1 | package claimtxman 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/utils" 7 | "github.com/ethereum/go-ethereum/common" 8 | lru "github.com/hashicorp/golang-lru/v2" 9 | ) 10 | 11 | const ( 12 | cacheSize = 1000 13 | ) 14 | 15 | type NonceCache struct { 16 | ctx context.Context 17 | // client is the ethereum client 18 | l2Node *utils.Client 19 | nonceCache *lru.Cache[string, uint64] 20 | } 21 | 22 | func NewNonceCache(ctx context.Context, l2Node *utils.Client) (*NonceCache, error) { 23 | cache, err := lru.New[string, uint64](int(cacheSize)) 24 | if err != nil { 25 | return nil, err 26 | } 27 | return &NonceCache{ 28 | ctx: ctx, 29 | l2Node: l2Node, 30 | nonceCache: cache, 31 | }, nil 32 | } 33 | 34 | func (tm *NonceCache) GetNextNonce(from common.Address) (uint64, error) { 35 | nonce, err := tm.l2Node.NonceAt(tm.ctx, from, nil) 36 | if err != nil { 37 | return 0, err 38 | } 39 | if tempNonce, found := tm.nonceCache.Get(from.Hex()); found { 40 | if tempNonce >= nonce { 41 | nonce = tempNonce + 1 42 | } 43 | } 44 | tm.nonceCache.Add(from.Hex(), nonce) 45 | return nonce, nil 46 | } 47 | 48 | func (tm *NonceCache) Remove(from string) { 49 | tm.nonceCache.Remove(from) 50 | } 51 | -------------------------------------------------------------------------------- /claimtxman/store_changes.go: -------------------------------------------------------------------------------- 1 | package claimtxman 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | ctmtypes "github.com/0xPolygonHermez/zkevm-bridge-service/claimtxman/types" 9 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 10 | ) 11 | 12 | type StoreChanges struct { 13 | AddGroups []ctmtypes.MonitoredTxGroupDBEntry 14 | UpdateGroups []ctmtypes.MonitoredTxGroupDBEntry 15 | UpdateTxs []ctmtypes.MonitoredTx 16 | } 17 | 18 | func NewStoreChanges() *StoreChanges { 19 | return &StoreChanges{} 20 | } 21 | func (sc *StoreChanges) AddGroup(group ctmtypes.MonitoredTxGroupDBEntry) { 22 | sc.AddGroups = append(sc.AddGroups, group) 23 | } 24 | func (sc *StoreChanges) UpdateGroup(group ctmtypes.MonitoredTxGroupDBEntry) { 25 | sc.UpdateGroups = append(sc.UpdateGroups, group) 26 | } 27 | func (sc *StoreChanges) UpdateTx(tx ctmtypes.MonitoredTx) { 28 | sc.UpdateTxs = append(sc.UpdateTxs, tx) 29 | } 30 | 31 | func (sc *StoreChanges) Execute(ctx context.Context, storage StorageCompressedInterface, dbTx interface{}) error { 32 | for i := range sc.AddGroups { 33 | log.Debugf("Adding group %d ", sc.AddGroups[i].GroupID) 34 | err := storage.AddMonitoredTxsGroup(ctx, &sc.AddGroups[i], dbTx) 35 | if err != nil { 36 | return fmt.Errorf("storeChanges.Execute error adding MonitoresTxGroup. Err: %w", err) 37 | } 38 | log.Infof("Added group %d", sc.AddGroups[i].GroupID) 39 | } 40 | 41 | for i := range sc.UpdateGroups { 42 | sc.UpdateGroups[i].UpdatedAt = time.Now() 43 | err := storage.UpdateMonitoredTxsGroup(ctx, &sc.UpdateGroups[i], dbTx) 44 | if err != nil { 45 | return err 46 | } 47 | log.Infof("Updated group %d", sc.UpdateGroups[i].GroupID) 48 | } 49 | 50 | for i := range sc.UpdateTxs { 51 | if sc.UpdateTxs[i].GroupID != nil { 52 | log.Debugf("Updating tx deposit_id: %d. Group_id:%d", sc.UpdateTxs[i].DepositID, *sc.UpdateTxs[i].GroupID) 53 | } else { 54 | log.Warnf("Updating tx deposit_id: %d. Group_id is nil", sc.UpdateTxs[i].DepositID) 55 | } 56 | err := storage.UpdateClaimTx(ctx, sc.UpdateTxs[i], dbTx) 57 | if err != nil { 58 | return fmt.Errorf("storeChanges.Execute error UpdateClaimTx. Err: %w", err) 59 | } 60 | } 61 | return nil 62 | } 63 | -------------------------------------------------------------------------------- /claimtxman/types/monitoredtx_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "math/big" 5 | "testing" 6 | 7 | "github.com/ethereum/go-ethereum/common" 8 | "github.com/ethereum/go-ethereum/core/types" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestHistoryHashSlice(t *testing.T) { 14 | mTx := MonitoredTx{ 15 | History: make(map[common.Hash]bool), 16 | } 17 | tx1 := types.NewTransaction(0, common.HexToAddress("0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"), big.NewInt(10), 100000, big.NewInt(1000000000), []byte{}) 18 | tx2 := types.NewTransaction(1, common.HexToAddress("0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"), big.NewInt(11), 100001, big.NewInt(1000000010), []byte{}) 19 | txs := []*types.Transaction{tx1, tx2} 20 | err := mTx.AddHistory(tx1) 21 | require.NoError(t, err) 22 | history := mTx.HistoryHashSlice() 23 | t.Logf("history: %s", common.Bytes2Hex(history[0])) 24 | assert.Equal(t, txs[0].Hash(), common.BytesToHash(history[0])) 25 | t.Log("TEST1: ", txs[0].Hash(), common.BytesToHash(history[0])) 26 | 27 | err = mTx.AddHistory(tx2) 28 | require.NoError(t, err) 29 | history = mTx.HistoryHashSlice() 30 | var eq []bool 31 | for i := range history { 32 | t.Logf("history %d: %s", i, common.Bytes2Hex(history[i])) 33 | for j := range txs { 34 | if common.BytesToHash(history[i]) == txs[j].Hash() { 35 | eq = append(eq, true) 36 | t.Log("TEST2: ", txs[j].Hash(), common.BytesToHash(history[i])) 37 | } 38 | } 39 | } 40 | assert.Equal(t, 2, len(eq)) 41 | 42 | mTx.RemoveHistory(tx1) 43 | history = mTx.HistoryHashSlice() 44 | t.Logf("history %d: %s", 0, common.Bytes2Hex(history[0])) 45 | assert.Equal(t, txs[1].Hash(), common.BytesToHash(history[0])) 46 | t.Log("TEST3: ", txs[1].Hash(), common.BytesToHash(history[0])) 47 | } 48 | 49 | func TestHistoryHashesString(t *testing.T) { 50 | tx := MonitoredTx{ 51 | History: make(map[common.Hash]bool), 52 | } 53 | tx.History[common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001")] = true 54 | tx.History[common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002")] = true 55 | result := tx.HistoryHashesString() 56 | expectedResults := []string{"0x0000000000000000000000000000000000000000000000000000000000000001,0x0000000000000000000000000000000000000000000000000000000000000002", "0x0000000000000000000000000000000000000000000000000000000000000002,0x0000000000000000000000000000000000000000000000000000000000000001"} 57 | assert.Contains(t, expectedResults, result, "Result should be one of the expected solutions") 58 | } 59 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | zkevmbridgeservice "github.com/0xPolygonHermez/zkevm-bridge-service" 8 | cli "github.com/urfave/cli/v2" 9 | ) 10 | 11 | const ( 12 | flagCfg = "cfg" 13 | flagNetwork = "network" 14 | ) 15 | 16 | const ( 17 | // App name 18 | appName = "zkevm-bridge" 19 | ) 20 | 21 | func main() { 22 | app := cli.NewApp() 23 | app.Name = appName 24 | app.Version = zkevmbridgeservice.Version 25 | flags := []cli.Flag{ 26 | &cli.StringFlag{ 27 | Name: flagCfg, 28 | Aliases: []string{"c"}, 29 | Usage: "Configuration `FILE`", 30 | Required: false, 31 | }, 32 | &cli.StringFlag{ 33 | Name: flagNetwork, 34 | Aliases: []string{"n"}, 35 | Usage: "Network: mainnet, testnet, internaltestnet, local. By default it uses mainnet", 36 | Required: false, 37 | }, 38 | } 39 | 40 | app.Commands = []*cli.Command{ 41 | { 42 | Name: "version", 43 | Aliases: []string{}, 44 | Usage: "Application version and build", 45 | Action: versionCmd, 46 | }, 47 | { 48 | Name: "run", 49 | Aliases: []string{}, 50 | Usage: "Run the zkevm bridge", 51 | Action: start, 52 | Flags: flags, 53 | }, 54 | } 55 | 56 | err := app.Run(os.Args) 57 | if err != nil { 58 | fmt.Printf("\nError: %v\n", err) 59 | os.Exit(1) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /cmd/version.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | zkevmbridgeservice "github.com/0xPolygonHermez/zkevm-bridge-service" 7 | cli "github.com/urfave/cli/v2" 8 | ) 9 | 10 | func versionCmd(*cli.Context) error { 11 | zkevmbridgeservice.PrintVersion(os.Stdout) 12 | return nil 13 | } 14 | -------------------------------------------------------------------------------- /config/config.debug.toml: -------------------------------------------------------------------------------- 1 | [Log] 2 | Level = "debug" 3 | Outputs = ["stdout"] 4 | 5 | [SyncDB] 6 | Database = "postgres" 7 | [SyncDB.PgStorage] 8 | User = "test_user" 9 | Password = "test_password" 10 | Name = "test_db" 11 | Host = "localhost" 12 | Port = "5435" 13 | MaxConns = 20 14 | 15 | [ClaimTxManager] 16 | Enabled = true 17 | FrequencyToMonitorTxs = "1s" 18 | PrivateKey = {Path = "../test/test.keystore.claimtx", Password = "testonly"} 19 | RetryInterval = "1s" 20 | RetryNumber = 10 21 | AuthorizedClaimMessageAddresses = ["0x90F79bf6EB2c4f870365E785982E1f101E93b906"] 22 | AreClaimsBetweenL2sEnabled = false 23 | IgnoreDepositAfterXL1Blocks = 300 #L1 blocks 24 | [ClaimTxManager.GroupingClaims] 25 | Enabled = false 26 | TriggerNumberOfClaims = 20 27 | MaxNumberOfClaimsPerGroup = 25 28 | TriggerRetainedClaimPeriod = "1m" 29 | MaxRetries = 2 30 | RetryInterval = "10s" 31 | RetryTimeout = "30s" 32 | FrequencyToProcessCompressedClaims = "1m" 33 | GasOffset = 100000 34 | 35 | [Etherman] 36 | L1URL = "http://localhost:8545" 37 | L2URLs = ["http://localhost:8123"] 38 | 39 | [Synchronizer] 40 | SyncInterval = "1s" 41 | SyncChunkSize = 100 42 | ForceL2SyncChunk = false 43 | 44 | [BridgeController] 45 | Height = 32 46 | 47 | [BridgeServer] 48 | GRPCPort = "9090" 49 | HTTPPort = "8080" 50 | CacheSize = 100000 51 | DefaultPageLimit = 25 52 | MaxPageLimit = 100 53 | FinalizedGEREnabled = true 54 | [BridgeServer.DB] 55 | Database = "postgres" 56 | [BridgeServer.DB.PgStorage] 57 | User = "test_user" 58 | Password = "test_password" 59 | Name = "test_db" 60 | Host = "localhost" 61 | Port = "5435" 62 | MaxConns = 20 63 | 64 | [NetworkConfig] 65 | GenBlockNumber = 0 66 | PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" 67 | PolygonZkEVMGlobalExitRootAddress = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318" 68 | PolygonRollupManagerAddress = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" 69 | L2ClaimCompressorAddress = "0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6" 70 | L2PolygonBridgeAddresses = ["0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E"] 71 | RequireSovereignChainSmcs = [false] 72 | L2PolygonZkEVMGlobalExitRootAddresses = ["0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa"] 73 | 74 | [Metrics] 75 | Enabled = false 76 | Host = "0.0.0.0" 77 | Port = 8090 -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | "path/filepath" 6 | "strings" 7 | 8 | "github.com/0xPolygonHermez/zkevm-bridge-service/bridgectrl" 9 | "github.com/0xPolygonHermez/zkevm-bridge-service/claimtxman" 10 | "github.com/0xPolygonHermez/zkevm-bridge-service/db" 11 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 12 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 13 | "github.com/0xPolygonHermez/zkevm-bridge-service/metrics" 14 | "github.com/0xPolygonHermez/zkevm-bridge-service/server" 15 | "github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer" 16 | "github.com/mitchellh/mapstructure" 17 | "github.com/spf13/viper" 18 | ) 19 | 20 | // Config struct 21 | type Config struct { 22 | Log log.Config 23 | SyncDB db.Config 24 | ClaimTxManager claimtxman.Config 25 | Etherman etherman.Config 26 | Synchronizer synchronizer.Config 27 | BridgeController bridgectrl.Config 28 | BridgeServer server.Config 29 | // Configuration of the metrics service where metrics will be published 30 | Metrics metrics.Config 31 | NetworkConfig 32 | } 33 | 34 | // Load loads the configuration 35 | func Load(configFilePath string, network string) (*Config, error) { 36 | cfg, err := Default() 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | if configFilePath != "" { 42 | dirName, fileName := filepath.Split(configFilePath) 43 | 44 | fileExtension := strings.TrimPrefix(filepath.Ext(fileName), ".") 45 | fileNameWithoutExtension := strings.TrimSuffix(fileName, "."+fileExtension) 46 | 47 | viper.AddConfigPath(dirName) 48 | viper.SetConfigName(fileNameWithoutExtension) 49 | viper.SetConfigType(fileExtension) 50 | } 51 | 52 | viper.AutomaticEnv() 53 | replacer := strings.NewReplacer(".", "_") 54 | viper.SetEnvKeyReplacer(replacer) 55 | viper.SetEnvPrefix("ZKEVM_BRIDGE") 56 | 57 | if err = viper.ReadInConfig(); err != nil { 58 | if _, ok := err.(viper.ConfigFileNotFoundError); ok { 59 | log.Infof("config file not found") 60 | } else { 61 | log.Infof("error reading config file: ", err) 62 | return nil, err 63 | } 64 | } 65 | 66 | decodeHooks := []viper.DecoderConfigOption{ 67 | // this allows arrays to be decoded from env var separated by ",", example: MY_VAR="value1,value2,value3" 68 | viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(mapstructure.TextUnmarshallerHookFunc(), mapstructure.StringToSliceHookFunc(","))), 69 | } 70 | err = viper.Unmarshal(&cfg, decodeHooks...) 71 | if err != nil { 72 | return nil, err 73 | } 74 | 75 | if viper.IsSet("NetworkConfig") && network != "" { 76 | return nil, errors.New("network details are provided in the config file (the [NetworkConfig] section) and as a flag (the --network or -n). Configure it only once and try again please") 77 | } 78 | if !viper.IsSet("NetworkConfig") && network == "" { 79 | return nil, errors.New("network details are not provided. Please configure the [NetworkConfig] section in your config file, or provide a --network flag") 80 | } 81 | if !viper.IsSet("NetworkConfig") && network != "" { 82 | cfg.loadNetworkConfig(network) 83 | } 84 | if len(cfg.L2PolygonZkEVMGlobalExitRootAddresses) != len(cfg.RequireSovereignChainSmcs) || 85 | len(cfg.RequireSovereignChainSmcs) != len(cfg.L2PolygonBridgeAddresses) || 86 | len(cfg.L2PolygonBridgeAddresses) != len(cfg.Etherman.L2URLs) { 87 | return nil, errors.New("the length of RequireSovereignChainSmcs, L2PolygonZkEVMGlobalExitRootAddresses, L2PolygonBridgeAddresses and L2URLs must be the same") 88 | } 89 | 90 | return cfg, nil 91 | } 92 | -------------------------------------------------------------------------------- /config/config.local.toml: -------------------------------------------------------------------------------- 1 | [Log] 2 | Level = "debug" 3 | Outputs = ["stdout"] 4 | 5 | [SyncDB] 6 | Database = "postgres" 7 | [SyncDB.PgStorage] 8 | User = "test_user" 9 | Password = "test_password" 10 | Name = "test_db" 11 | Host = "zkevm-bridge-db" 12 | Port = "5432" 13 | MaxConns = 20 14 | 15 | [ClaimTxManager] 16 | Enabled = true 17 | FrequencyToMonitorTxs = "1s" 18 | PrivateKey = {Path = "/pk/keystore.claimtxmanager", Password = "testonly"} 19 | RetryInterval = "1s" 20 | RetryNumber = 10 21 | AuthorizedClaimMessageAddresses = ["0x90F79bf6EB2c4f870365E785982E1f101E93b906"] 22 | AreClaimsBetweenL2sEnabled = false 23 | IgnoreDepositAfterXL1Blocks = 300 #L1 blocks 24 | [ClaimTxManager.GroupingClaims] 25 | Enabled = false 26 | TriggerNumberOfClaims = 20 27 | MaxNumberOfClaimsPerGroup = 25 28 | TriggerRetainedClaimPeriod = "1m" 29 | MaxRetries = 2 30 | RetryInterval = "10s" 31 | RetryTimeout = "30s" 32 | FrequencyToProcessCompressedClaims = "1m" 33 | GasOffset = 100000 34 | 35 | [Etherman] 36 | L1URL = "http://zkevm-mock-l1-network:8545" 37 | L2URLs = ["http://zkevm-node:8123"] 38 | 39 | [Synchronizer] 40 | SyncInterval = "1s" 41 | SyncChunkSize = 100 42 | ForceL2SyncChunk = false 43 | 44 | [BridgeController] 45 | Height = 32 46 | 47 | [BridgeServer] 48 | GRPCPort = "9090" 49 | HTTPPort = "8080" 50 | CacheSize = 100000 51 | DefaultPageLimit = 25 52 | MaxPageLimit = 100 53 | FinalizedGEREnabled = true 54 | [BridgeServer.DB] 55 | Database = "postgres" 56 | [BridgeServer.DB.PgStorage] 57 | User = "test_user" 58 | Password = "test_password" 59 | Name = "test_db" 60 | Host = "zkevm-bridge-db" 61 | Port = "5432" 62 | MaxConns = 20 63 | 64 | [NetworkConfig] 65 | GenBlockNumber = 0 66 | PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" 67 | PolygonZkEVMGlobalExitRootAddress = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318" 68 | PolygonRollupManagerAddress = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" 69 | L2ClaimCompressorAddress = "0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6" 70 | L2PolygonBridgeAddresses = ["0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E"] 71 | RequireSovereignChainSmcs = [false] 72 | L2PolygonZkEVMGlobalExitRootAddresses = ["0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa"] 73 | 74 | [Metrics] 75 | Enabled = false 76 | Host = "0.0.0.0" 77 | Port = 8090 -------------------------------------------------------------------------------- /config/default.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "bytes" 5 | 6 | "github.com/mitchellh/mapstructure" 7 | "github.com/spf13/viper" 8 | ) 9 | 10 | // DefaultValues is the default configuration 11 | const DefaultValues = ` 12 | [Log] 13 | Level = "debug" 14 | Outputs = ["stdout"] 15 | 16 | [SyncDB] 17 | Database = "postgres" 18 | [SyncDB.PgStorage] 19 | User = "test_user" 20 | Password = "test_password" 21 | Name = "test_db" 22 | Host = "zkevm-bridge-db" 23 | Port = "5432" 24 | MaxConns = 20 25 | 26 | [ClaimTxManager] 27 | Enabled = false 28 | FrequencyToMonitorTxs = "1s" 29 | PrivateKey = {Path = "./test/test.keystore", Password = "testonly"} 30 | RetryInterval = "1s" 31 | RetryNumber = 10 32 | AuthorizedClaimMessageAddresses = [] 33 | AreClaimsBetweenL2sEnabled = false 34 | IgnoreDepositAfterXL1Blocks = 300 35 | [ClaimTxManager.GroupingClaims] 36 | Enabled = false 37 | FrequencyToProcessCompressedClaims = "10m" 38 | TriggerNumberOfClaims = 10 39 | MaxNumberOfClaimsPerGroup = 10 40 | TriggerRetainedClaimPeriod = "30s" 41 | MaxRetries = 2 42 | RetryInterval = "10s" 43 | RetryTimeout = "30s" 44 | GasOffset = 0 45 | 46 | 47 | [Etherman] 48 | L1URL = "http://localhost:8545" 49 | L2URLs = [""] 50 | 51 | [Synchronizer] 52 | SyncInterval = "2s" 53 | SyncChunkSize = 100 54 | ForceL2SyncChunk = false 55 | 56 | [BridgeController] 57 | Height = 32 58 | 59 | [BridgeServer] 60 | GRPCPort = "9090" 61 | HTTPPort = "8080" 62 | DefaultPageLimit = 25 63 | CacheSize = 100000 64 | MaxPageLimit = 100 65 | FinalizedGEREnabled = false 66 | [BridgeServer.DB] 67 | Database = "postgres" 68 | [BridgeServer.DB.PgStorage] 69 | User = "test_user" 70 | Password = "test_password" 71 | Name = "test_db" 72 | Host = "zkevm-bridge-db" 73 | Port = "5432" 74 | MaxConns = 20 75 | 76 | [Metrics] 77 | Enabled = false 78 | Host = "0.0.0.0" 79 | Port = 8090 80 | ` 81 | 82 | // Default parses the default configuration values. 83 | func Default() (*Config, error) { 84 | var cfg Config 85 | viper.SetConfigType("toml") 86 | 87 | err := viper.ReadConfig(bytes.NewBuffer([]byte(DefaultValues))) 88 | if err != nil { 89 | return nil, err 90 | } 91 | err = viper.Unmarshal(&cfg, viper.DecodeHook(mapstructure.TextUnmarshallerHookFunc())) 92 | if err != nil { 93 | return nil, err 94 | } 95 | return &cfg, nil 96 | } 97 | -------------------------------------------------------------------------------- /config/types/duration.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/invopop/jsonschema" 7 | ) 8 | 9 | // Duration is a wrapper type that parses time duration from text. 10 | type Duration struct { 11 | time.Duration `validate:"required"` 12 | } 13 | 14 | // UnmarshalText unmarshalls time duration from text. 15 | func (d *Duration) UnmarshalText(data []byte) error { 16 | duration, err := time.ParseDuration(string(data)) 17 | if err != nil { 18 | return err 19 | } 20 | d.Duration = duration 21 | return nil 22 | } 23 | 24 | // NewDuration returns Duration wrapper 25 | func NewDuration(duration time.Duration) Duration { 26 | return Duration{duration} 27 | } 28 | 29 | // JSONSchema returns a custom schema to be used for the JSON Schema generation of this type 30 | func (Duration) JSONSchema() *jsonschema.Schema { 31 | return &jsonschema.Schema{ 32 | Type: "string", 33 | Title: "Duration", 34 | Description: "Duration expressed in units: [ns, us, ms, s, m, h, d]", 35 | Examples: []interface{}{ 36 | "1m", 37 | "300ms", 38 | }, 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /config/types/keystore.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // KeystoreFileConfig has all the information needed to load a private key from a key store file 4 | type KeystoreFileConfig struct { 5 | // Path is the file path for the key store file 6 | Path string `mapstructure:"Path"` 7 | 8 | // Password is the password to decrypt the key store file 9 | Password string `mapstructure:"Password"` 10 | } 11 | -------------------------------------------------------------------------------- /db/config.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "github.com/0xPolygonHermez/zkevm-bridge-service/db/pgstorage" 5 | ) 6 | 7 | // Config struct 8 | type Config struct { 9 | // Database type 10 | Database string `mapstructure:"Database"` 11 | 12 | PgStorage pgstorage.Config 13 | } 14 | -------------------------------------------------------------------------------- /db/pgstorage/config.go: -------------------------------------------------------------------------------- 1 | package pgstorage 2 | 3 | // Config struct 4 | type Config struct { 5 | // Database name 6 | Name string `mapstructure:"Name"` 7 | 8 | // User name 9 | User string `mapstructure:"User"` 10 | 11 | // Password of the user 12 | Password string `mapstructure:"Password"` 13 | 14 | // Host address 15 | Host string `mapstructure:"Host"` 16 | 17 | // Port Number 18 | Port string `mapstructure:"Port"` 19 | 20 | // MaxConns is the maximum number of connections in the pool. 21 | MaxConns int `mapstructure:"MaxConns"` 22 | } 23 | -------------------------------------------------------------------------------- /db/pgstorage/interfaces.go: -------------------------------------------------------------------------------- 1 | package pgstorage 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/jackc/pgconn" 7 | pgx "github.com/jackc/pgx/v4" 8 | ) 9 | 10 | type execQuerier interface { 11 | Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) 12 | Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) 13 | QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row 14 | CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) 15 | } 16 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0001.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Down 2 | DROP SCHEMA IF EXISTS syncv2 CASCADE; 3 | DROP SCHEMA IF EXISTS mtv2 CASCADE; 4 | 5 | -- +migrate Up 6 | CREATE SCHEMA syncv2; 7 | CREATE SCHEMA mtv2; 8 | 9 | -- History 10 | CREATE TABLE syncv2.block 11 | ( 12 | id SERIAL PRIMARY KEY, 13 | block_num BIGINT, 14 | block_hash BYTEA NOT NULL, 15 | parent_hash BYTEA, 16 | network_id INTEGER, 17 | 18 | received_at TIMESTAMP WITH TIME ZONE NOT NULL 19 | ); 20 | 21 | -- insert the block with block_id = 0 for the trusted exit root table 22 | INSERT INTO syncv2.block (id, block_hash, received_at) VALUES (0, '\\x0', to_timestamp(0)); 23 | 24 | CREATE TABLE syncv2.exit_root 25 | ( 26 | id SERIAL, 27 | block_id BIGINT REFERENCES syncv2.block (id) ON DELETE CASCADE, 28 | global_exit_root BYTEA, 29 | exit_roots BYTEA[], 30 | PRIMARY KEY (id), 31 | CONSTRAINT UC UNIQUE (block_id, global_exit_root) 32 | ); 33 | 34 | CREATE TABLE syncv2.batch 35 | ( 36 | batch_num BIGINT PRIMARY KEY, 37 | sequencer BYTEA, 38 | raw_tx_data BYTEA, 39 | global_exit_root BYTEA, 40 | timestamp TIMESTAMP WITH TIME ZONE 41 | ); 42 | 43 | CREATE TABLE syncv2.verified_batch 44 | ( 45 | batch_num BIGINT PRIMARY KEY REFERENCES syncv2.batch (batch_num), 46 | aggregator BYTEA, 47 | tx_hash BYTEA, 48 | block_id BIGINT NOT NULL REFERENCES syncv2.block (id) ON DELETE CASCADE 49 | ); 50 | 51 | CREATE TABLE syncv2.forced_batch 52 | ( 53 | batch_num BIGINT, 54 | block_id BIGINT NOT NULL REFERENCES syncv2.block (id) ON DELETE CASCADE, 55 | forced_batch_num BIGINT, 56 | sequencer BYTEA, 57 | global_exit_root BYTEA, 58 | raw_tx_data BYTEA 59 | ); 60 | 61 | CREATE TABLE syncv2.deposit 62 | ( 63 | leaf_type INTEGER, 64 | network_id INTEGER, 65 | orig_net INTEGER, 66 | orig_addr BYTEA NOT NULL, 67 | amount VARCHAR, 68 | dest_net INTEGER NOT NULL, 69 | dest_addr BYTEA NOT NULL, 70 | block_id BIGINT NOT NULL REFERENCES syncv2.block (id) ON DELETE CASCADE, 71 | deposit_cnt BIGINT, 72 | tx_hash BYTEA NOT NULL, 73 | metadata BYTEA NOT NULL, 74 | PRIMARY KEY (network_id, deposit_cnt) 75 | ); 76 | 77 | CREATE TABLE syncv2.claim 78 | ( 79 | network_id INTEGER NOT NULL, 80 | index BIGINT, -- deposit count 81 | orig_net INTEGER, 82 | orig_addr BYTEA NOT NULL, 83 | amount VARCHAR, 84 | dest_addr BYTEA NOT NULL, 85 | block_id BIGINT NOT NULL REFERENCES syncv2.block (id) ON DELETE CASCADE, 86 | tx_hash BYTEA NOT NULL, 87 | PRIMARY KEY (network_id, index) 88 | ); 89 | 90 | CREATE TABLE syncv2.token_wrapped 91 | ( 92 | network_id INTEGER NOT NULL, 93 | orig_net INTEGER, 94 | orig_token_addr BYTEA NOT NULL, 95 | wrapped_token_addr BYTEA NOT NULL, 96 | block_id BIGINT NOT NULL REFERENCES syncv2.block (id) ON DELETE CASCADE, 97 | name VARCHAR, 98 | symbol VARCHAR, 99 | decimals INTEGER 100 | ); 101 | 102 | CREATE TABLE mtv2.rht 103 | ( 104 | key BYTEA PRIMARY KEY, 105 | value BYTEA[] 106 | ); 107 | 108 | CREATE TABLE mtv2.root 109 | ( 110 | root BYTEA, 111 | deposit_cnt BIGINT, 112 | network INTEGER, 113 | PRIMARY KEY(deposit_cnt, network) 114 | ); -------------------------------------------------------------------------------- /db/pgstorage/migrations/0002.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Down 2 | 3 | ALTER TABLE mt.rht DROP COLUMN IF EXISTS root_id; 4 | ALTER TABLE mt.root DROP COLUMN IF EXISTS id; 5 | 6 | ALTER TABLE mt.root DROP CONSTRAINT IF EXISTS root_pkey; 7 | ALTER TABLE mt.rht DROP CONSTRAINT IF EXISTS rht_pkey; 8 | 9 | ALTER TABLE mt.root ADD CONSTRAINT root_pkey PRIMARY KEY (deposit_cnt, network); 10 | ALTER TABLE mt.rht ADD CONSTRAINT rht_pkey PRIMARY KEY (key); 11 | 12 | DROP INDEX IF EXISTS mt.root_network_idx; 13 | DROP INDEX IF EXISTS mt.deposit_idx; 14 | DROP INDEX IF EXISTS sync.block_idx; 15 | DROP INDEX IF EXISTS mt.root_idx; 16 | DROP INDEX IF EXISTS sync.exit_roots_idx; 17 | 18 | ALTER SCHEMA mt RENAME TO mtv2; 19 | ALTER SCHEMA sync RENAME TO syncv2; 20 | 21 | -- +migrate Up 22 | DROP SCHEMA IF EXISTS sync CASCADE; 23 | DROP SCHEMA IF EXISTS mt CASCADE; 24 | 25 | ALTER SCHEMA mtv2 RENAME TO mt; 26 | ALTER SCHEMA syncv2 RENAME TO sync; 27 | 28 | ALTER TABLE mt.root DROP CONSTRAINT IF EXISTS root_pkey; 29 | ALTER TABLE mt.rht DROP CONSTRAINT IF EXISTS rht_pkey; 30 | 31 | ALTER TABLE mt.rht DROP COLUMN IF EXISTS root_id; 32 | ALTER TABLE mt.root DROP COLUMN IF EXISTS id; 33 | 34 | ALTER TABLE mt.root ADD COLUMN id SERIAL PRIMARY KEY; 35 | ALTER TABLE mt.rht ADD COLUMN root_id BIGINT NOT NULL DEFAULT 1 CONSTRAINT rht_root_id_fkey REFERENCES mt.root (id) ON DELETE CASCADE; 36 | ALTER TABLE mt.rht ALTER COLUMN root_id DROP DEFAULT; 37 | 38 | -- +migrate StatementBegin 39 | DO $$ 40 | DECLARE 41 | rt RECORD; 42 | pkey BYTEA; 43 | pvalue BYTEA []; 44 | BEGIN 45 | FOR rt IN SELECT * FROM mt.root 46 | LOOP 47 | IF rt.deposit_cnt > 0 THEN 48 | rt.deposit_cnt = rt.deposit_cnt - 1; 49 | END IF; 50 | pkey = rt.root; 51 | FOR i IN reverse 31..0 52 | LOOP 53 | UPDATE mt.rht SET root_id = rt.id WHERE key = pkey RETURNING value INTO pvalue; 54 | 55 | IF rt.deposit_cnt & (1 << i) > 0 THEN 56 | pkey = pvalue[2]; 57 | ELSE 58 | pkey = pvalue[1]; 59 | END IF; 60 | END LOOP; 61 | END LOOP; 62 | END; 63 | $$ LANGUAGE plpgsql; 64 | -- +migrate StatementEnd 65 | 66 | -- Create indexes 67 | CREATE INDEX IF NOT EXISTS root_network_idx ON mt.root(root, network); 68 | CREATE INDEX IF NOT EXISTS deposit_idx ON mt.root(deposit_cnt); 69 | CREATE INDEX IF NOT EXISTS block_idx ON sync.exit_root(block_id); 70 | CREATE INDEX IF NOT EXISTS root_idx ON mt.root(root); 71 | CREATE INDEX IF NOT EXISTS exit_roots_idx ON sync.exit_root(exit_roots); -------------------------------------------------------------------------------- /db/pgstorage/migrations/0003.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Down 2 | 3 | ALTER TABLE mt.rht DROP COLUMN IF EXISTS deposit_id; 4 | ALTER TABLE mt.root DROP COLUMN IF EXISTS deposit_id; 5 | ALTER TABLE sync.deposit DROP COLUMN IF EXISTS id; 6 | 7 | ALTER TABLE sync.deposit ADD CONSTRAINT deposit_pkey PRIMARY KEY (network_id, deposit_cnt); 8 | 9 | ALTER TABLE mt.root ADD COLUMN id SERIAL PRIMARY KEY; 10 | ALTER TABLE mt.rht ADD COLUMN root_id BIGINT NOT NULL DEFAULT 1 CONSTRAINT rht_root_id_fkey REFERENCES mt.root (id) ON DELETE CASCADE; 11 | ALTER TABLE mt.rht ALTER COLUMN root_id DROP DEFAULT; 12 | 13 | -- +migrate StatementBegin 14 | DO $$ 15 | DECLARE 16 | rt RECORD; 17 | pkey BYTEA; 18 | pvalue BYTEA []; 19 | BEGIN 20 | FOR rt IN SELECT * FROM mt.root 21 | LOOP 22 | pkey = rt.root; 23 | FOR i IN reverse 31..0 24 | LOOP 25 | UPDATE mt.rht SET root_id = rt.id WHERE key = pkey RETURNING value INTO pvalue; 26 | 27 | IF rt.deposit_cnt & (1 << i) > 0 THEN 28 | pkey = pvalue[2]; 29 | ELSE 30 | pkey = pvalue[1]; 31 | END IF; 32 | END LOOP; 33 | END LOOP; 34 | END; 35 | $$ LANGUAGE plpgsql; 36 | -- +migrate StatementEnd 37 | 38 | UPDATE mt.root SET deposit_cnt = deposit_cnt + 1; 39 | 40 | DROP INDEX IF EXISTS mt.rht_key_idx; 41 | 42 | -- +migrate Up 43 | 44 | ALTER TABLE mt.rht DROP COLUMN IF EXISTS root_id; 45 | ALTER TABLE mt.root DROP COLUMN IF EXISTS id; 46 | 47 | ALTER TABLE sync.deposit DROP CONSTRAINT IF EXISTS deposit_pkey; 48 | ALTER TABLE sync.deposit ADD COLUMN id SERIAL PRIMARY KEY; 49 | 50 | ALTER TABLE mt.root ADD COLUMN deposit_id BIGINT NOT NULL DEFAULT 1 CONSTRAINT root_deposit_id_fkey REFERENCES sync.deposit (id) ON DELETE CASCADE; 51 | ALTER TABLE mt.root ALTER COLUMN deposit_id DROP DEFAULT; 52 | UPDATE mt.root AS r SET deposit_id = d.id FROM sync.deposit AS d WHERE d.deposit_cnt = r.deposit_cnt AND d.network_id = r.network; 53 | 54 | ALTER TABLE mt.rht ADD COLUMN deposit_id BIGINT NOT NULL DEFAULT 1 CONSTRAINT rht_deposit_id_fkey REFERENCES sync.deposit (id) ON DELETE CASCADE; 55 | ALTER TABLE mt.rht ALTER COLUMN deposit_id DROP DEFAULT; 56 | 57 | UPDATE mt.root SET deposit_cnt = deposit_cnt - 1; 58 | 59 | -- Create indexes 60 | CREATE INDEX IF NOT EXISTS rht_key_idx ON mt.rht(key); 61 | 62 | -- Delete duplicates 63 | CREATE TABLE mt.rht_temp AS (SELECT key, min(value), max(deposit_id) FROM mt.rht GROUP BY key HAVING count(key) > 1); 64 | DELETE FROM mt.rht where key in (select key FROM mt.rht_temp); 65 | INSERT INTO mt.rht(key, value, deposit_id) (SELECT b.key, b.min, b.max FROM mt.rht_temp b); 66 | 67 | -- +migrate StatementBegin 68 | DO $$ 69 | DECLARE 70 | rt RECORD; 71 | pkey BYTEA; 72 | pvalue BYTEA []; 73 | BEGIN 74 | FOR rt IN SELECT * FROM mt.root 75 | LOOP 76 | pkey = rt.root; 77 | FOR i IN reverse 31..0 78 | LOOP 79 | UPDATE mt.rht SET deposit_id = rt.deposit_id WHERE key = pkey RETURNING value INTO pvalue; 80 | 81 | IF rt.deposit_cnt & (1 << i) > 0 THEN 82 | pkey = pvalue[2]; 83 | ELSE 84 | pkey = pvalue[1]; 85 | END IF; 86 | END LOOP; 87 | END LOOP; 88 | END; 89 | $$ LANGUAGE plpgsql; 90 | -- +migrate StatementEnd 91 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0004.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Down 2 | 3 | DROP TABLE IF EXISTS sync.monitored_txs; 4 | 5 | ALTER TABLE sync.deposit DROP COLUMN ready_for_claim; 6 | 7 | ALTER TABLE sync.block DROP CONSTRAINT block_hash_unique; 8 | 9 | -- +migrate Up 10 | 11 | ALTER TABLE 12 | sync.deposit 13 | ADD 14 | COLUMN ready_for_claim BOOLEAN NOT NULL DEFAULT FALSE; 15 | 16 | ALTER TABLE 17 | sync.block 18 | ADD 19 | CONSTRAINT block_hash_unique UNIQUE (block_hash); 20 | 21 | CREATE TABLE sync.monitored_txs ( 22 | id BIGINT PRIMARY KEY, 23 | block_id BIGINT REFERENCES sync.block (id) ON DELETE CASCADE, 24 | from_addr BYTEA NOT NULL, 25 | to_addr BYTEA, 26 | nonce BIGINT NOT NULL, 27 | value VARCHAR, 28 | data BYTEA, 29 | gas BIGINT NOT NULL, 30 | status VARCHAR NOT NULL, 31 | history BYTEA [], 32 | created_at TIMESTAMP WITH TIME ZONE NOT NULL, 33 | updated_at TIMESTAMP WITH TIME ZONE NOT NULL 34 | ); 35 | 36 | UPDATE 37 | sync.deposit 38 | SET 39 | ready_for_claim = true 40 | WHERE 41 | deposit_cnt <= ( 42 | SELECT 43 | deposit_cnt 44 | FROM 45 | mt.root 46 | WHERE 47 | root = ( 48 | SELECT 49 | exit_roots [1] 50 | FROM 51 | sync.exit_root 52 | WHERE 53 | block_id = 0 54 | ORDER BY 55 | id DESC 56 | LIMIT 57 | 1 58 | ) 59 | AND network = 0 60 | ) 61 | AND network_id = 0; 62 | 63 | UPDATE 64 | sync.deposit 65 | SET 66 | ready_for_claim = true 67 | WHERE 68 | deposit_cnt <= ( 69 | SELECT 70 | deposit_cnt 71 | FROM 72 | mt.root 73 | WHERE 74 | root = ( 75 | SELECT 76 | exit_roots [2] 77 | FROM 78 | sync.exit_root 79 | WHERE 80 | block_id > 0 81 | ORDER BY 82 | id DESC 83 | LIMIT 84 | 1 85 | ) 86 | AND network = 1 87 | ) 88 | AND network_id != 0; 89 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0005.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Down 2 | 3 | CREATE TABLE IF NOT EXISTS sync.batch 4 | ( 5 | batch_num BIGINT PRIMARY KEY, 6 | sequencer BYTEA, 7 | raw_tx_data BYTEA, 8 | global_exit_root BYTEA, 9 | timestamp TIMESTAMP WITH TIME ZONE 10 | ); 11 | 12 | CREATE TABLE IF NOT EXISTS sync.verified_batch 13 | ( 14 | batch_num BIGINT PRIMARY KEY REFERENCES sync.batch (batch_num), 15 | aggregator BYTEA, 16 | tx_hash BYTEA, 17 | block_id BIGINT NOT NULL REFERENCES sync.block (id) ON DELETE CASCADE 18 | ); 19 | 20 | CREATE TABLE IF NOT EXISTS sync.forced_batch 21 | ( 22 | batch_num BIGINT, 23 | block_id BIGINT NOT NULL REFERENCES sync.block (id) ON DELETE CASCADE, 24 | forced_batch_num BIGINT, 25 | sequencer BYTEA, 26 | global_exit_root BYTEA, 27 | raw_tx_data BYTEA 28 | ); 29 | 30 | -- +migrate Up 31 | 32 | DROP TABLE IF EXISTS sync.verified_batch; 33 | DROP TABLE IF EXISTS sync.forced_batch; 34 | DROP TABLE IF EXISTS sync.batch; 35 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0006.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | ALTER TABLE mt.rht ADD primary key(key, deposit_id); 3 | ALTER TABLE mt.root ADD primary key(deposit_id); 4 | ALTER TABLE sync.token_wrapped ADD primary key(network_id,orig_net,orig_token_addr); 5 | DROP TABLE IF EXISTS mt.rht_temp; 6 | 7 | CREATE INDEX IF NOT EXISTS claim_block_id ON sync.claim USING btree (block_id); 8 | CREATE INDEX IF NOT EXISTS deposit_block_id ON sync.deposit USING btree (block_id); 9 | CREATE INDEX IF NOT EXISTS token_wrapped_block_id ON sync.token_wrapped USING btree (block_id); 10 | 11 | ALTER TABLE sync.monitored_txs 12 | DROP COLUMN IF EXISTS block_id; 13 | ALTER TABLE sync.monitored_txs 14 | RENAME COLUMN id TO deposit_id; 15 | 16 | -- +migrate Down 17 | ALTER TABLE mt.rht DROP CONSTRAINT rht_pkey; 18 | ALTER TABLE mt.root DROP CONSTRAINT root_pkey; 19 | ALTER TABLE sync.token_wrapped DROP CONSTRAINT token_wrapped_pkey; 20 | CREATE TABLE IF NOT EXISTS mt.rht_temp AS (SELECT key, min(value), max(deposit_id) FROM mt.rht GROUP BY key HAVING count(key) > 1); 21 | 22 | DROP INDEX IF EXISTS sync.claim_block_id; 23 | DROP INDEX IF EXISTS sync.deposit_block_id; 24 | DROP INDEX IF EXISTS sync.token_wrapped_block_id; 25 | 26 | ALTER TABLE sync.monitored_txs 27 | ADD COLUMN block_id BIGINT DEFAULT 0 REFERENCES sync.block (id) ON DELETE CASCADE; 28 | ALTER TABLE sync.monitored_txs 29 | RENAME COLUMN deposit_id TO id; 30 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0007.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | ALTER TABLE mt.root 3 | DROP COLUMN IF EXISTS deposit_cnt; 4 | 5 | CREATE TABLE IF NOT EXISTS mt.rollup_exit 6 | ( 7 | id BIGSERIAL PRIMARY KEY, 8 | leaf BYTEA, 9 | rollup_id BIGINT, 10 | root BYTEA, 11 | block_id BIGINT NOT NULL REFERENCES sync.block (id) ON DELETE CASCADE 12 | ); 13 | 14 | ALTER TABLE sync.claim 15 | ADD COLUMN IF NOT EXISTS rollup_index BIGINT DEFAULT 0, 16 | ADD COLUMN IF NOT EXISTS mainnet_flag BOOLEAN DEFAULT FALSE; 17 | 18 | -- +migrate Down 19 | ALTER TABLE mt.root 20 | ADD COLUMN deposit_cnt BIGINT; 21 | 22 | DROP TABLE IF EXISTS mt.rollup_exit; 23 | 24 | ALTER TABLE sync.claim 25 | DROP COLUMN IF EXISTS rollup_index, 26 | DROP COLUMN IF EXISTS mainnet_flag; 27 | 28 | -- +migrate StatementBegin 29 | DO $$ 30 | DECLARE 31 | rt RECORD; 32 | BEGIN 33 | FOR rt IN SELECT * FROM mt.root 34 | LOOP 35 | UPDATE mt.root SET deposit_cnt = (SELECT deposit_cnt FROM sync.deposit WHERE id = rt.deposit_id) WHERE deposit_id = rt.deposit_id; 36 | END LOOP; 37 | END; 38 | $$ LANGUAGE plpgsql; 39 | -- +migrate StatementEnd 40 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0008.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | ALTER TABLE sync.deposit 3 | ALTER COLUMN orig_net TYPE BIGINT, 4 | ALTER COLUMN dest_net TYPE BIGINT; 5 | 6 | ALTER TABLE sync.claim 7 | ALTER COLUMN orig_net TYPE BIGINT; 8 | 9 | ALTER TABLE sync.token_wrapped 10 | ALTER COLUMN orig_net TYPE BIGINT; 11 | 12 | 13 | -- +migrate Down 14 | ALTER TABLE sync.deposit 15 | ALTER COLUMN orig_net TYPE INTEGER, 16 | ALTER COLUMN dest_net TYPE INTEGER; 17 | 18 | ALTER TABLE sync.claim 19 | ALTER COLUMN orig_net TYPE INTEGER; 20 | 21 | ALTER TABLE sync.token_wrapped 22 | ALTER COLUMN orig_net TYPE INTEGER; -------------------------------------------------------------------------------- /db/pgstorage/migrations/0009.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | 3 | 4 | CREATE TABLE IF NOT EXISTS sync.monitored_txs_group 5 | ( 6 | group_id int8 PRIMARY KEY, 7 | status VARCHAR NOT NULL, -- Status of the group 8 | deposit_ids int8[], -- Deposit IDs in the group 9 | -- Num of retries done for this group 10 | num_retries int4 NOT NULL, 11 | compressed_tx_data BYTEA NULL, 12 | claim_tx_history VARCHAR NULL, 13 | created_at timestamptz NOT NULL, 14 | updated_at timestamptz NOT NULL, 15 | last_log VARCHAR NULL 16 | ); 17 | 18 | ALTER TABLE sync.monitored_txs 19 | ADD COLUMN IF NOT EXISTS group_id BIGINT DEFAULT NULL; 20 | ALTER TABLE sync.monitored_txs 21 | ADD COLUMN IF NOT EXISTS global_exit_root BYTEA NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'; 22 | -- ADD CONSTRAINT fk_group_id FOREIGN KEY (group_id) REFERENCES sync.monitored_txs_group(group_id) ON DELETE CASCADE; 23 | 24 | 25 | -- +migrate Down 26 | ALTER TABLE sync.monitored_txs 27 | DROP COLUMN IF EXISTS group_id; 28 | ALTER TABLE sync.monitored_txs 29 | DROP COLUMN IF EXISTS global_exit_root; 30 | 31 | DROP TABLE IF EXISTS sync.monitored_txs_group; -------------------------------------------------------------------------------- /db/pgstorage/migrations/0010.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | 3 | -- This migration will delete all empty blocks 4 | DELETE FROM sync.block 5 | WHERE NOT EXISTS (SELECT * 6 | FROM sync.claim 7 | WHERE sync.claim.block_id = sync.block.id) 8 | AND NOT EXISTS (SELECT * 9 | FROM sync.deposit 10 | WHERE sync.deposit.block_id = sync.block.id) 11 | AND NOT EXISTS (SELECT * 12 | FROM sync.token_wrapped 13 | WHERE sync.token_wrapped.block_id = sync.block.id) 14 | AND NOT EXISTS (SELECT * 15 | FROM sync.exit_root 16 | WHERE sync.exit_root.block_id = sync.block.id) 17 | AND NOT EXISTS (SELECT * 18 | FROM mt.rollup_exit 19 | WHERE mt.rollup_exit.block_id = sync.block.id) 20 | AND sync.block.id != 0; 21 | 22 | 23 | -- +migrate Down 24 | 25 | -- no action is needed, the data must remain deleted as it is useless -------------------------------------------------------------------------------- /db/pgstorage/migrations/0011.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | 3 | ALTER TABLE sync.claim DROP CONSTRAINT claim_pkey; 4 | ALTER TABLE sync.claim ADD PRIMARY KEY (index, rollup_index, network_id); 5 | 6 | -- +migrate Down 7 | 8 | ALTER TABLE sync.claim DROP CONSTRAINT claim_pkey; 9 | ALTER TABLE sync.claim ADD PRIMARY KEY (network_id, index); -------------------------------------------------------------------------------- /db/pgstorage/migrations/0011_test.go: -------------------------------------------------------------------------------- 1 | package migrations_test 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | type migrationTest0011 struct{} 13 | 14 | const ( 15 | originalDepositSQL = ` 16 | INSERT INTO sync.claim (block_id, network_id, index, mainnet_flag, rollup_index, orig_addr, dest_addr, tx_hash) 17 | VALUES(69, 0, 1, false, 0, decode('00','hex'), decode('00','hex'), decode('00','hex')); 18 | ` // Rollup 1 to L1 19 | conflictingDeposit = ` 20 | INSERT INTO sync.claim (block_id, network_id, index, mainnet_flag, rollup_index, orig_addr, dest_addr, tx_hash) 21 | VALUES(69, 0, 1, false, 1, decode('00','hex'), decode('00','hex'), decode('00','hex')); 22 | ` // Rollup 2 to L1 23 | ) 24 | 25 | func (m migrationTest0011) InsertData(db *sql.DB) error { 26 | block := "INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(69, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A015F52816296C3','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 1, '0001-01-01 01:00:00.000');" 27 | if _, err := db.Exec(block); err != nil { 28 | return err 29 | } 30 | 31 | if _, err := db.Exec(originalDepositSQL); err != nil { 32 | return err 33 | } 34 | _, err := db.Exec(conflictingDeposit) 35 | if err == nil || !strings.Contains(err.Error(), "ERROR: duplicate key value violates unique constraint \"claim_pkey\" (SQLSTATE 23505)") { 36 | return errors.New("should violate primary key") 37 | } 38 | 39 | return nil 40 | } 41 | 42 | func (m migrationTest0011) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { 43 | // check that original row still in there 44 | selectClaim := `SELECT block_id, network_id, index, mainnet_flag, rollup_index FROM sync.claim;` 45 | row := db.QueryRow(selectClaim) 46 | var ( 47 | block_id, network_id, index, rollup_index int 48 | mainnet_flag bool 49 | ) 50 | assert.NoError(t, row.Scan(&block_id, &network_id, &index, &mainnet_flag, &rollup_index)) 51 | assert.Equal(t, 69, block_id) 52 | assert.Equal(t, 0, network_id) 53 | assert.Equal(t, 1, index) 54 | assert.Equal(t, false, mainnet_flag) 55 | assert.Equal(t, 0, rollup_index) 56 | 57 | // Add deposit that originally would have caused pkey violation 58 | _, err := db.Exec(conflictingDeposit) 59 | assert.NoError(t, err) 60 | 61 | // Remove conflicting deposit so it's possible to run the migration down 62 | _, err = db.Exec("DELETE FROM sync.claim WHERE rollup_index = 1;") 63 | assert.NoError(t, err) 64 | } 65 | 66 | func (m migrationTest0011) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { 67 | // check that original row still in there 68 | selectClaim := `SELECT block_id, network_id, index, mainnet_flag, rollup_index FROM sync.claim;` 69 | row := db.QueryRow(selectClaim) 70 | var ( 71 | block_id, network_id, index, rollup_index int 72 | mainnet_flag bool 73 | ) 74 | assert.NoError(t, row.Scan(&block_id, &network_id, &index, &mainnet_flag, &rollup_index)) 75 | assert.Equal(t, 69, block_id) 76 | assert.Equal(t, 0, network_id) 77 | assert.Equal(t, 1, index) 78 | assert.Equal(t, false, mainnet_flag) 79 | assert.Equal(t, 0, rollup_index) 80 | } 81 | 82 | func TestMigration0011(t *testing.T) { 83 | runMigrationTest(t, 11, migrationTest0011{}) 84 | } 85 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0012.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | DELETE FROM sync.exit_root WHERE block_id = 0; -- This will clean up old and unnecessary values 3 | ALTER TABLE sync.exit_root ADD COLUMN network_id INTEGER NOT NULL DEFAULT 0; 4 | 5 | ALTER TABLE IF EXISTS sync.exit_root DROP CONSTRAINT IF EXISTS UC; 6 | ALTER TABLE IF EXISTS sync.exit_root ADD CONSTRAINT UC UNIQUE(block_id, global_exit_root, network_id); 7 | 8 | -- +migrate Down 9 | ALTER TABLE sync.exit_root DROP COLUMN network_id; 10 | 11 | ALTER TABLE IF EXISTS sync.exit_root DROP CONSTRAINT IF EXISTS UC; 12 | ALTER TABLE IF EXISTS sync.exit_root ADD CONSTRAINT UC UNIQUE(block_id, global_exit_root); 13 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0013.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | ALTER TABLE sync.claim ALTER COLUMN network_id TYPE BIGINT; 3 | ALTER TABLE sync.deposit ALTER COLUMN network_id TYPE BIGINT; 4 | ALTER TABLE sync.token_wrapped ALTER COLUMN network_id TYPE BIGINT; 5 | ALTER TABLE sync.block ALTER COLUMN network_id TYPE BIGINT; 6 | ALTER TABLE sync.exit_root ALTER COLUMN network_id TYPE BIGINT; 7 | ALTER TABLE mt.root ALTER COLUMN network TYPE BIGINT; 8 | 9 | ALTER TABLE sync.block ALTER COLUMN id TYPE BIGINT; 10 | CREATE SEQUENCE IF NOT EXISTS sync.block_id_seq; 11 | ALTER TABLE sync.block ALTER COLUMN id SET NOT NULL; 12 | ALTER TABLE sync.block ALTER COLUMN id SET DEFAULT nextval('sync.block_id_seq'); 13 | ALTER SEQUENCE sync.block_id_seq OWNED BY sync.block.id; 14 | 15 | ALTER TABLE sync.exit_root ALTER COLUMN id TYPE BIGINT; 16 | CREATE SEQUENCE IF NOT EXISTS sync.exit_root_id_seq; 17 | ALTER TABLE sync.exit_root ALTER COLUMN id SET NOT NULL; 18 | ALTER TABLE sync.exit_root ALTER COLUMN id SET DEFAULT nextval('sync.exit_root_id_seq'); 19 | ALTER SEQUENCE sync.exit_root_id_seq OWNED BY sync.exit_root.id; 20 | 21 | ALTER TABLE sync.deposit ALTER COLUMN id TYPE BIGINT; 22 | CREATE SEQUENCE IF NOT EXISTS sync.deposit_id_seq; 23 | ALTER TABLE sync.deposit ALTER COLUMN id SET NOT NULL; 24 | ALTER TABLE sync.deposit ALTER COLUMN id SET DEFAULT nextval('sync.deposit_id_seq'); 25 | ALTER SEQUENCE sync.deposit_id_seq OWNED BY sync.deposit.id; 26 | 27 | -- +migrate Down 28 | ALTER TABLE sync.claim ALTER COLUMN network_id TYPE INTEGER; 29 | ALTER TABLE sync.deposit ALTER COLUMN network_id TYPE INTEGER; 30 | ALTER TABLE sync.token_wrapped ALTER COLUMN network_id TYPE INTEGER; 31 | ALTER TABLE mt.root ALTER COLUMN network TYPE INTEGER; 32 | ALTER TABLE sync.block ALTER COLUMN network_id TYPE INTEGER; 33 | ALTER TABLE sync.exit_root ALTER COLUMN network_id TYPE INTEGER; 34 | 35 | -- No need to revert the SERIAL to BIGSERIAL type changed -------------------------------------------------------------------------------- /db/pgstorage/migrations/0014.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | 3 | ALTER TABLE sync.claim DROP CONSTRAINT claim_pkey; 4 | ALTER TABLE sync.claim ADD PRIMARY KEY (index, rollup_index, network_id, mainnet_flag); 5 | 6 | -- +migrate Down 7 | 8 | ALTER TABLE sync.claim DROP CONSTRAINT claim_pkey; 9 | ALTER TABLE sync.claim ADD PRIMARY KEY (index, rollup_index, network_id); -------------------------------------------------------------------------------- /db/pgstorage/migrations/0014_test.go: -------------------------------------------------------------------------------- 1 | package migrations_test 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | type migrationTest0014 struct{} 13 | 14 | const conflictingDepositNet1ToNet3 = ` 15 | INSERT INTO sync.claim (block_id, network_id, index, mainnet_flag, rollup_index, orig_addr, dest_addr, tx_hash) 16 | VALUES(69, 3, 0, false, 0, decode('00','hex'), decode('00','hex'), decode('00','hex')); 17 | ` // Rollup 1 to Rollup 3 18 | 19 | func (m migrationTest0014) InsertData(db *sql.DB) error { 20 | block := "INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(69, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A015F52816296C3','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 3, '0001-01-01 01:00:00.000');" 21 | if _, err := db.Exec(block); err != nil { 22 | return err 23 | } 24 | const originalDepositSQL = ` 25 | INSERT INTO sync.claim (block_id, network_id, index, mainnet_flag, rollup_index, orig_addr, dest_addr, tx_hash) 26 | VALUES(69, 3, 0, true, 0, decode('00','hex'), decode('00','hex'), decode('00','hex')); 27 | ` // L1 to Rollup 3 28 | if _, err := db.Exec(originalDepositSQL); err != nil { 29 | return err 30 | } 31 | _, err := db.Exec(conflictingDepositNet1ToNet3) 32 | if err == nil || !strings.Contains(err.Error(), "ERROR: duplicate key value violates unique constraint \"claim_pkey\" (SQLSTATE 23505)") { 33 | return errors.New("should violate primary key") 34 | } 35 | 36 | return nil 37 | } 38 | 39 | func (m migrationTest0014) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { 40 | // check that original row still in there 41 | selectClaim := `SELECT block_id, network_id, index, mainnet_flag, rollup_index FROM sync.claim;` 42 | row := db.QueryRow(selectClaim) 43 | var ( 44 | block_id, network_id, index, rollup_index int 45 | mainnet_flag bool 46 | ) 47 | assert.NoError(t, row.Scan(&block_id, &network_id, &index, &mainnet_flag, &rollup_index)) 48 | assert.Equal(t, 69, block_id) 49 | assert.Equal(t, 3, network_id) 50 | assert.Equal(t, 0, index) 51 | assert.Equal(t, true, mainnet_flag) 52 | assert.Equal(t, 0, rollup_index) 53 | 54 | // Add deposit that originally would have caused pkey violation 55 | _, err := db.Exec(conflictingDepositNet1ToNet3) 56 | assert.NoError(t, err) 57 | 58 | // Remove conflicting deposit so it's possible to run the migration down 59 | _, err = db.Exec("DELETE FROM sync.claim WHERE mainnet_flag = false;") 60 | assert.NoError(t, err) 61 | } 62 | 63 | func (m migrationTest0014) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { 64 | // check that original row still in there 65 | selectClaim := `SELECT block_id, network_id, index, mainnet_flag, rollup_index FROM sync.claim;` 66 | row := db.QueryRow(selectClaim) 67 | var ( 68 | block_id, network_id, index, rollup_index int 69 | mainnet_flag bool 70 | ) 71 | assert.NoError(t, row.Scan(&block_id, &network_id, &index, &mainnet_flag, &rollup_index)) 72 | assert.Equal(t, 69, block_id) 73 | assert.Equal(t, 3, network_id) 74 | assert.Equal(t, 0, index) 75 | assert.Equal(t, true, mainnet_flag) 76 | assert.Equal(t, 0, rollup_index) 77 | } 78 | 79 | func TestMigration0014(t *testing.T) { 80 | runMigrationTest(t, 14, migrationTest0014{}) 81 | } 82 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0015.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | 3 | CREATE TABLE IF NOT EXISTS sync.remove_exit_root( 4 | id BIGSERIAL, 5 | block_id BIGINT REFERENCES sync.block (id) ON DELETE CASCADE, 6 | global_exit_root BYTEA, 7 | network_id BIGINT, 8 | PRIMARY KEY (id) 9 | ); 10 | 11 | ALTER TABLE sync.exit_root ADD COLUMN IF NOT EXISTS allowed BOOLEAN NOT NULL DEFAULT true; 12 | 13 | 14 | 15 | -- +migrate Down 16 | 17 | DROP TABLE IF EXISTS sync.remove_exit_root; 18 | 19 | ALTER TABLE sync.exit_root DROP COLUMN IF EXISTS allowed; 20 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0016.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | ALTER TABLE sync.block DROP COLUMN IF EXISTS parent_hash; 3 | ALTER TABLE sync.block DROP COLUMN IF EXISTS received_at; 4 | 5 | -- +migrate Down 6 | ALTER TABLE sync.block ADD COLUMN IF NOT EXISTS parent_hash BYTEA DEFAULT '\x0000000000000000000000000000000000000000000000000000000000000000'; 7 | ALTER TABLE sync.block ADD COLUMN IF NOT EXISTS received_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT to_timestamp(0); 8 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0016_test.go: -------------------------------------------------------------------------------- 1 | package migrations_test 2 | 3 | import ( 4 | "database/sql" 5 | "testing" 6 | "time" 7 | 8 | "github.com/ethereum/go-ethereum/common" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | type migrationTest0016 struct{} 13 | 14 | func (m migrationTest0016) InsertData(db *sql.DB) error { 15 | block := "INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(69, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A016F52816296C3','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 0, '0001-01-01 01:00:00.000');" 16 | if _, err := db.Exec(block); err != nil { 17 | return err 18 | } 19 | block2 := "INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(70, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A016F52816296C4','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C08','hex'), 1, '0001-01-01 01:00:00.000');" 20 | if _, err := db.Exec(block2); err != nil { 21 | return err 22 | } 23 | return nil 24 | } 25 | 26 | func (m migrationTest0016) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { 27 | selectHashParent := `SELECT parent_hash FROM sync.block limit 1;` 28 | var hashParent common.Hash 29 | err := db.QueryRow(selectHashParent).Scan(&hashParent) 30 | assert.Error(t, err) 31 | 32 | selectReceivedAt := `SELECT received_at FROM sync.block limit 1;` 33 | var receivedAt time.Time 34 | err = db.QueryRow(selectReceivedAt).Scan(&receivedAt) 35 | assert.Error(t, err) 36 | 37 | selectCount := `SELECT count(*) FROM sync.block;` 38 | var count uint64 39 | err = db.QueryRow(selectCount).Scan(&count) 40 | assert.NoError(t, err) 41 | assert.Equal(t, uint64(3), count) 42 | } 43 | 44 | func (m migrationTest0016) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { 45 | // Read values from the table 46 | selectHashParent := `SELECT parent_hash FROM sync.block where block_num='2803824' AND network_id = 0;` 47 | var hashParent common.Hash 48 | err := db.QueryRow(selectHashParent).Scan(&hashParent) 49 | assert.NoError(t, err) 50 | assert.Equal(t, common.Hash{}, hashParent) 51 | 52 | selectReceivedAt := `SELECT received_at FROM sync.block where block_num='2803824' AND network_id = 0;` 53 | var receivedAt time.Time 54 | err = db.QueryRow(selectReceivedAt).Scan(&receivedAt) 55 | assert.NoError(t, err) 56 | 57 | selectCount := `SELECT count(*) FROM sync.block;` 58 | var count uint64 59 | err = db.QueryRow(selectCount).Scan(&count) 60 | assert.NoError(t, err) 61 | assert.Equal(t, uint64(3), count) 62 | } 63 | 64 | func TestMigration0016(t *testing.T) { 65 | runMigrationTest(t, 16, migrationTest0016{}) 66 | } 67 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0017.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | ALTER TABLE sync.claim ADD COLUMN IF NOT EXISTS global_index VARCHAR(255) DEFAULT ''; 3 | 4 | -- +migrate Down 5 | ALTER TABLE sync.claim DROP COLUMN IF EXISTS global_index; 6 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0017_test.go: -------------------------------------------------------------------------------- 1 | package migrations_test 2 | 3 | import ( 4 | "database/sql" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | type migrationTest0017 struct{} 11 | 12 | func (m migrationTest0017) InsertData(db *sql.DB) error { 13 | block := "INSERT INTO sync.block (id, block_num, block_hash, network_id) VALUES(1, 2803824, decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 0);" 14 | if _, err := db.Exec(block); err != nil { 15 | return err 16 | } 17 | return nil 18 | } 19 | 20 | func (m migrationTest0017) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { 21 | insertClaim1 := `INSERT INTO sync.claim 22 | (network_id, "index", orig_net, orig_addr, amount, dest_addr, block_id, tx_hash, rollup_index, mainnet_flag, global_index) 23 | VALUES(1, 11, 0, decode('0000000000000000000000000000000000000000','hex'), '90000000000000000', decode('F39FD6E51AAD88F6F4CE6AB8827279CFFFB92266','hex'), 1, decode('BF2C816AB6F8A8F5F9DDA6EE97D433CC841E69B5669A5CDF499826FA4B99C179','hex'), 1, false, 4294967307);` 24 | _, err := db.Exec(insertClaim1) 25 | assert.NoError(t, err) 26 | insertClaim2 := `INSERT INTO sync.claim 27 | (network_id, "index", orig_net, orig_addr, amount, dest_addr, block_id, tx_hash, rollup_index, mainnet_flag) 28 | VALUES(1, 12, 0, decode('0000000000000000000000000000000000000000','hex'), '90000000000000000', decode('F39FD6E51AAD88F6F4CE6AB8827279CFFFB92266','hex'), 1, decode('BF2C816AB6F8A8F5F9DDA6EE97D433CC841E69B5669A5CDF499826FA4B99C179','hex'), 1, false);` 29 | _, err = db.Exec(insertClaim2) 30 | assert.NoError(t, err) 31 | selectClaim1 := "SELECT global_index FROM sync.claim WHERE network_id = 1 AND index = 11;" 32 | var globalIndex string 33 | err = db.QueryRow(selectClaim1).Scan(&globalIndex) 34 | assert.NoError(t, err) 35 | assert.Equal(t, "4294967307", globalIndex) 36 | selectClaim2 := "SELECT global_index FROM sync.claim WHERE network_id = 1 AND index = 12;" 37 | var globalIndex2 string 38 | err = db.QueryRow(selectClaim2).Scan(&globalIndex2) 39 | assert.NoError(t, err) 40 | assert.Equal(t, "", globalIndex2) 41 | } 42 | 43 | func (m migrationTest0017) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { 44 | insertClaim1 := `INSERT INTO sync.claim 45 | (network_id, "index", orig_net, orig_addr, amount, dest_addr, block_id, tx_hash, rollup_index, mainnet_flag, global_index) 46 | VALUES(1, 13, 0, decode('0000000000000000000000000000000000000000','hex'), '90000000000000000', decode('F39FD6E51AAD88F6F4CE6AB8827279CFFFB92266','hex'), 1, decode('BF2C816AB6F8A8F5F9DDA6EE97D433CC841E69B5669A5CDF499826FA4B99C179','hex'), 1, false, 4294967307);` 47 | _, err := db.Exec(insertClaim1) 48 | assert.Error(t, err) 49 | insertClaim2 := `INSERT INTO sync.claim 50 | (network_id, "index", orig_net, orig_addr, amount, dest_addr, block_id, tx_hash, rollup_index, mainnet_flag) 51 | VALUES(1, 14, 0, decode('0000000000000000000000000000000000000000','hex'), '90000000000000000', decode('F39FD6E51AAD88F6F4CE6AB8827279CFFFB92266','hex'), 1, decode('BF2C816AB6F8A8F5F9DDA6EE97D433CC841E69B5669A5CDF499826FA4B99C179','hex'), 1, false);` 52 | _, err = db.Exec(insertClaim2) 53 | assert.NoError(t, err) 54 | selectCount := `SELECT count(*) FROM sync.claim;` 55 | var count uint64 56 | err = db.QueryRow(selectCount).Scan(&count) 57 | assert.NoError(t, err) 58 | assert.Equal(t, uint64(3), count) 59 | } 60 | 61 | func TestMigration0017(t *testing.T) { 62 | runMigrationTest(t, 17, migrationTest0017{}) 63 | } 64 | -------------------------------------------------------------------------------- /db/pgstorage/migrations/0018.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | ALTER TABLE sync.deposit ADD COLUMN IF NOT EXISTS ignore BOOLEAN DEFAULT FALSE; 3 | 4 | -- +migrate Down 5 | ALTER TABLE sync.deposit DROP COLUMN IF EXISTS ignore; 6 | -------------------------------------------------------------------------------- /db/storage.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/db/pgstorage" 8 | "github.com/0xPolygonHermez/zkevm-bridge-service/utils/gerror" 9 | ) 10 | 11 | // Storage interface 12 | type Storage interface{} 13 | 14 | // NewStorage creates a new Storage 15 | func NewStorage(ctx context.Context, cfg Config) (Storage, error) { 16 | if cfg.Database == "postgres" { 17 | pg, err := pgstorage.NewPostgresStorage(ctx, pgstorage.Config{ 18 | Name: cfg.PgStorage.Name, 19 | User: cfg.PgStorage.User, 20 | Password: cfg.PgStorage.Password, 21 | Host: cfg.PgStorage.Host, 22 | Port: cfg.PgStorage.Port, 23 | MaxConns: cfg.PgStorage.MaxConns, 24 | }) 25 | return pg, err 26 | } 27 | return nil, gerror.ErrStorageNotRegister 28 | } 29 | 30 | // RunMigrations will execute pending migrations if needed to keep 31 | // the database updated with the latest changes 32 | func RunMigrations(cfg Config) error { 33 | if cfg.Database == "postgres" { 34 | config := pgstorage.Config{ 35 | Name: cfg.PgStorage.Name, 36 | User: cfg.PgStorage.User, 37 | Password: cfg.PgStorage.Password, 38 | Host: cfg.PgStorage.Host, 39 | Port: cfg.PgStorage.Port, 40 | } 41 | return pgstorage.RunMigrationsUp(config) 42 | } 43 | return fmt.Errorf("database type not supported") 44 | } 45 | -------------------------------------------------------------------------------- /docs/architecture.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/zkevm-bridge-service/1a2a7222d2bc3691bdb4078b8b900e6f55d0cd17/docs/architecture.drawio.png -------------------------------------------------------------------------------- /docs/e2e-realnetwork-test.md: -------------------------------------------------------------------------------- 1 | # Test Real network using e2e Test 2 | This suite is for test the bridge service running in a real network. 3 | The included tests are: 4 | - ERC20 L1->L2 expecting auto-claim 5 | - ERC20 L2->L1 6 | - BridgeMessage L1->L2 7 | - BridgeMessage L2->L1 8 | 9 | # Build docker 10 | First you need to build the docker that include the tests. 11 | `make build-docker-e2e-real_network-ERC20` 12 | or 13 | `make build-docker-e2e-real_network-MSG` 14 | 15 | 16 | ## Create a config file 17 | Check the config example `test/config/bridge_network_e2e/cardona.toml` 18 | 19 | ## Execute docker using the config file 20 | - Create the config file in `/tmp/test.toml` 21 | - Launch tests: 22 | ``` 23 | $ docker run --volume "./tmp/:/config/" --env BRIDGE_TEST_CONFIG_FILE=/config/test.toml bridge-e2e-realnetwork-erc20 24 | $docker run --volume "./tmp/:/config/" --env BRIDGE_TEST_CONFIG_FILE=/config/test.toml bridge-e2e-realnetwork-erc20 25 | ``` -------------------------------------------------------------------------------- /docs/running_local.md: -------------------------------------------------------------------------------- 1 | > WARNING: This documentation is outdated, it will be updated soon 2 | 3 | # Steps to run environment locally 4 | 5 | ## Overview 6 | 7 | This documentation will help you running the following components: 8 | 9 | - zkEVM Node Databases 10 | - zkEVM Bridge Database 11 | - L1 Network 12 | - Prover 13 | - zkEVM Node 14 | - zkEVM Bridge Service 15 | 16 | ## Requirements 17 | 18 | The current version of the environment requires `go`, `docker` and `docker-compose` to be previously installed, check the links below to understand how to install them: 19 | 20 | - 21 | - 22 | - 23 | 24 | The `zkevm-bridge-service` docker image must be built at least once and every time a change is made to the code. 25 | If you haven't build the `zkevm-bridge-service` image yet, you must run: 26 | 27 | ```bash 28 | make build-docker 29 | ``` 30 | 31 | ## Controlling the environment 32 | 33 | > All the data is stored inside of each docker container, this means once you remove the container, the data will be lost. 34 | 35 | To run the environment: 36 | 37 | ```bash 38 | make run 39 | ``` 40 | 41 | To stop the environment: 42 | 43 | ```bash 44 | make stop 45 | ``` 46 | 47 | To run e2e and edge tests: 48 | 49 | ```bash 50 | make test-full 51 | make test-edge 52 | ``` 53 | 54 | ## Accessing the environment 55 | 56 | - zkEVM Bridge Database 57 | - `Type:` Postgres DB 58 | - `User:` test_user 59 | - `Password:` test_password 60 | - `Database:` test_db 61 | - `Host:` localhost 62 | - `Port:` 5435 63 | - `Url:` 64 | - zkEVM Bridge Service 65 | - `Type:` Web 66 | - `Host:` localhost 67 | - `Port:` 8080 68 | - `Url:` 69 | 70 | ## SC Addresses 71 | 72 | | Address | Description | 73 | |---|---| 74 | | 0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E | L1 Bridge | 75 | | 0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E | L2 Bridge | 76 | | 0x5FbDB2315678afecb367f032d93F642f64180aa3 | Pol token | 77 | | 0x8A791620dd6260079BF849Dc5567aDC3F2FdC318 | GlobalExitRootManager | 78 | | 0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e | RollupManager | 79 | 80 | ## Fund account on L2 with ETH 81 | 82 | If you need account with funds you can use the [deposit script](https://github.com/0xPolygonHermez/zkevm-bridge-service/blob/develop/test/scripts/deposit/main.go) 83 | to fund an account. 84 | For a list with accounts that already have ETH check out [node's docs](https://github.com/0xPolygonHermez/zkevm-node/blob/develop/docs/running_local.md#accounts). 85 | 86 | You can exchange the `l1AccHexAddress` and `l1AccHexPrivateKey` and once executing the script with 87 | ``` 88 | go run test/scripts/deposit/main.go 89 | ``` 90 | the account that you've specified under `l1AccHexAddress` would have been funded on L2. 91 | -------------------------------------------------------------------------------- /etherman/config.go: -------------------------------------------------------------------------------- 1 | package etherman 2 | 3 | // Config represents the configuration of the etherman 4 | type Config struct { 5 | L1URL string `mapstructure:"L1URL"` 6 | L2URLs []string `mapstructure:"L2URLs"` 7 | } 8 | -------------------------------------------------------------------------------- /etherman/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/metrics" 8 | "github.com/prometheus/client_golang/prometheus" 9 | ) 10 | 11 | const ( 12 | // ReadAndProcessAllEventsTimeName is the name of the label read and process all event. 13 | ReadAndProcessAllEventsTimeName = "read_and_process_all_event_time" 14 | 15 | // ProcessAllEventTimeName is the name of the label to process all event. 16 | ProcessAllEventTimeName = "process_all_event_time" 17 | 18 | // ProcessSingleEventTimeName is the name of the label to process a single event. 19 | ProcessSingleEventTimeName = "process_single_event_time" 20 | 21 | // GetEventsTimeName is the name of the label to get L1 events. 22 | GetEventsTimeName = "get_events_time" 23 | 24 | // EventCounterName is the name of the label to count the processed events. 25 | EventCounterName = "processed_events_counter" 26 | ) 27 | 28 | var Prefix string 29 | 30 | // Register the metrics for the etherman package. 31 | func Register(networkID uint32) { 32 | // Prefix for the metrics of the etherman package. 33 | Prefix = "etherman_networkID_" + fmt.Sprintf("%d", networkID) + "_" 34 | var ( 35 | counters []prometheus.CounterOpts 36 | histograms []prometheus.HistogramOpts 37 | ) 38 | 39 | counters = []prometheus.CounterOpts{ 40 | { 41 | Name: Prefix + EventCounterName, 42 | Help: "[ETHERMAN] count processed events", 43 | }, 44 | } 45 | 46 | histograms = []prometheus.HistogramOpts{ 47 | { 48 | Name: Prefix + ReadAndProcessAllEventsTimeName, 49 | Help: "[ETHERMAN] read and process all event time", 50 | }, 51 | { 52 | Name: Prefix + ProcessAllEventTimeName, 53 | Help: "[ETHERMAN] process all event time", 54 | }, 55 | { 56 | Name: Prefix + ProcessSingleEventTimeName, 57 | Help: "[ETHERMAN] process single event time", 58 | }, 59 | { 60 | Name: Prefix + GetEventsTimeName, 61 | Help: "[ETHERMAN] get L1 events time", 62 | }, 63 | } 64 | 65 | metrics.RegisterCounters(counters...) 66 | metrics.RegisterHistograms(histograms...) 67 | } 68 | 69 | // ReadAndProcessAllEventsTime observes the time read and process all event on the histogram. 70 | func ReadAndProcessAllEventsTime(lastProcessTime time.Duration) { 71 | execTimeInSeconds := float64(lastProcessTime) / float64(time.Second) 72 | metrics.HistogramObserve(Prefix+ReadAndProcessAllEventsTimeName, execTimeInSeconds) 73 | } 74 | 75 | // ProcessAllEventTime observes the time to process all event on the histogram. 76 | func ProcessAllEventTime(lastProcessTime time.Duration) { 77 | execTimeInSeconds := float64(lastProcessTime) / float64(time.Second) 78 | metrics.HistogramObserve(Prefix+ProcessAllEventTimeName, execTimeInSeconds) 79 | } 80 | 81 | // ProcessSingleEventTime observes the time to process a single event on the histogram. 82 | func ProcessSingleEventTime(lastProcessTime time.Duration) { 83 | execTimeInSeconds := float64(lastProcessTime) / float64(time.Second) 84 | metrics.HistogramObserve(Prefix+ProcessSingleEventTimeName, execTimeInSeconds) 85 | } 86 | 87 | // GetEventsTime observes the time to get the events from L1 on the histogram. 88 | func GetEventsTime(lastProcessTime time.Duration) { 89 | execTimeInSeconds := float64(lastProcessTime) / float64(time.Second) 90 | metrics.HistogramObserve(Prefix+GetEventsTimeName, execTimeInSeconds) 91 | } 92 | 93 | // EventCounter increases the counter for the processed events 94 | func EventCounter() { 95 | metrics.CounterInc(Prefix + EventCounterName) 96 | } 97 | -------------------------------------------------------------------------------- /etherman/smartcontracts/ERC20/ERC20.abi: -------------------------------------------------------------------------------- 1 | [{"inputs":[{"internalType":"string","name":"n","type":"string"},{"internalType":"string","name":"s","type":"string"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"burn","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"mint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"}] -------------------------------------------------------------------------------- /etherman/smartcontracts/ERC20/IERC20.abi: -------------------------------------------------------------------------------- 1 | [{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"}] -------------------------------------------------------------------------------- /etherman/smartcontracts/ERC20/IERC20.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/zkevm-bridge-service/1a2a7222d2bc3691bdb4078b8b900e6f55d0cd17/etherman/smartcontracts/ERC20/IERC20.bin -------------------------------------------------------------------------------- /etherman/smartcontracts/abi/mockverifier.abi: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "inputs": [ 4 | { 5 | "internalType": "bytes32[24]", 6 | "name": "proof", 7 | "type": "bytes32[24]" 8 | }, 9 | { 10 | "internalType": "uint256[1]", 11 | "name": "pubSignals", 12 | "type": "uint256[1]" 13 | } 14 | ], 15 | "name": "verifyProof", 16 | "outputs": [ 17 | { 18 | "internalType": "bool", 19 | "name": "", 20 | "type": "bool" 21 | } 22 | ], 23 | "stateMutability": "pure", 24 | "type": "function" 25 | } 26 | ] -------------------------------------------------------------------------------- /etherman/smartcontracts/bin/mockverifier.bin: -------------------------------------------------------------------------------- 1 | 608060405234801561001057600080fd5b50610158806100206000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c80639121da8a14610030575b600080fd5b61004661003e366004610089565b600192915050565b604051901515815260200160405180910390f35b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60008061032080848603121561009e57600080fd5b6103008401858111156100b057600080fd5b8493508561031f8601126100c357600080fd5b604051602080820182811067ffffffffffffffff821117156100e7576100e761005a565b6040529286019281888511156100fc57600080fd5b5b8484101561011457833581529281019281016100fd565b50949790965094505050505056fea264697066735822122066b50cbb730099c9f1f258fa949f9d4e1a1ef7636af905817cebb300b2be0d2664736f6c63430008140033 -------------------------------------------------------------------------------- /etherman/smartcontracts/readme.md: -------------------------------------------------------------------------------- 1 | ## smartcontracts 2 | 3 | ### Folder generated_binding 4 | The folder `generated_binding` have autogenerated files to use the contracts 5 | You can generate from root folder project invoking: 6 | ``` 7 | make generate-smartcontracts-bindings 8 | ``` 9 | 10 | ## Folder json 11 | This folder contains the compiled contracts, usually you can find it at: [https://github.com/0xPolygonHermez/zkevm-contracts](https://github.com/0xPolygonHermez/zkevm-contracts) -------------------------------------------------------------------------------- /etherman/smartcontracts/script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | dir=$(pwd) 6 | 7 | gen() { 8 | local package=$1 9 | 10 | abigen --bin bin/${package}.bin --abi abi/${package}.abi --pkg=${package} --out=${package}/${package}.go 11 | } 12 | 13 | 14 | compilegen() { 15 | local package=$1 16 | 17 | docker run --rm --user $(id -u) -v "${dir}:/contracts" ethereum/solc:0.8.20-alpine - "/contracts/${package}.sol" -o "/contracts/${package}" --abi --bin --overwrite --optimize --evm-version paris 18 | abigen --bin ${package}/${package}.bin --abi ${package}/${package}.abi --pkg=${package} --out=${package}/${package}.go 19 | } 20 | 21 | gen claimcompressor 22 | gen oldglobalexitrootmanagerl2sovereignchain 23 | gen globalexitrootmanagerl2sovereignchain 24 | gen bridgel2sovereignchain 25 | gen polygonzkevmbridgev2 26 | -------------------------------------------------------------------------------- /etherman/types.go: -------------------------------------------------------------------------------- 1 | package etherman 2 | 3 | import ( 4 | "math/big" 5 | 6 | "github.com/ethereum/go-ethereum/common" 7 | ) 8 | 9 | // Block struct 10 | type Block struct { 11 | ID uint64 12 | BlockNumber uint64 13 | BlockHash common.Hash 14 | NetworkID uint32 15 | GlobalExitRoots []GlobalExitRoot 16 | RemoveL2GER []GlobalExitRoot 17 | Deposits []Deposit 18 | Claims []Claim 19 | Tokens []TokenWrapped 20 | VerifiedBatches []VerifiedBatch 21 | ActivateEtrog []bool 22 | } 23 | 24 | // GlobalExitRoot struct 25 | type GlobalExitRoot struct { 26 | BlockID uint64 27 | BlockNumber uint64 28 | ExitRoots []common.Hash 29 | GlobalExitRoot common.Hash 30 | NetworkID uint32 31 | ID uint64 32 | } 33 | 34 | // Deposit struct 35 | type Deposit struct { 36 | Id uint64 37 | LeafType uint8 38 | OriginalNetwork uint32 39 | OriginalAddress common.Address 40 | Amount *big.Int 41 | DestinationNetwork uint32 42 | DestinationAddress common.Address 43 | DepositCount uint32 44 | BlockID uint64 45 | BlockNumber uint64 46 | NetworkID uint32 47 | TxHash common.Hash 48 | Metadata []byte 49 | // it is only used for the bridge service 50 | ReadyForClaim bool 51 | } 52 | 53 | // Claim struct 54 | type Claim struct { 55 | MainnetFlag bool 56 | RollupIndex uint32 57 | Index uint32 58 | OriginalNetwork uint32 59 | OriginalAddress common.Address 60 | Amount *big.Int 61 | DestinationAddress common.Address 62 | BlockID uint64 63 | BlockNumber uint64 64 | NetworkID uint32 65 | TxHash common.Hash 66 | GlobalIndex string 67 | } 68 | 69 | // TokenWrapped struct 70 | type TokenWrapped struct { 71 | TokenMetadata 72 | OriginalNetwork uint32 73 | OriginalTokenAddress common.Address 74 | WrappedTokenAddress common.Address 75 | BlockID uint64 76 | BlockNumber uint64 77 | NetworkID uint32 78 | } 79 | 80 | // TokenMetadata is a metadata of ERC20 token. 81 | type TokenMetadata struct { 82 | Name string 83 | Symbol string 84 | Decimals uint8 85 | } 86 | 87 | type VerifiedBatch struct { 88 | BlockNumber uint64 89 | BatchNumber uint64 90 | RollupID uint32 91 | LocalExitRoot common.Hash 92 | TxHash common.Hash 93 | StateRoot common.Hash 94 | Aggregator common.Address 95 | } 96 | 97 | // RollupExitLeaf struct 98 | type RollupExitLeaf struct { 99 | ID uint64 100 | BlockID uint64 101 | Leaf common.Hash 102 | RollupId uint32 103 | Root common.Hash 104 | } 105 | -------------------------------------------------------------------------------- /hex/hex.go: -------------------------------------------------------------------------------- 1 | package hex 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "math/big" 7 | "strconv" 8 | "strings" 9 | ) 10 | 11 | const ( 12 | // Base represents the hexadecimal base, which is 16 13 | Base = 16 14 | 15 | // BitSize64 64 bits 16 | BitSize64 = 64 17 | ) 18 | 19 | // DecError represents an error when decoding a hex value 20 | type DecError struct{ msg string } 21 | 22 | func (err DecError) Error() string { return err.msg } 23 | 24 | // EncodeToHex generates a hex string based on the byte representation, with the '0x' prefix 25 | func EncodeToHex(str []byte) string { 26 | return "0x" + hex.EncodeToString(str) 27 | } 28 | 29 | // EncodeToString is a wrapper method for hex.EncodeToString 30 | func EncodeToString(str []byte) string { 31 | return hex.EncodeToString(str) 32 | } 33 | 34 | // DecodeString returns the byte representation of the hexadecimal string 35 | func DecodeString(str string) ([]byte, error) { 36 | return hex.DecodeString(str) 37 | } 38 | 39 | // DecodeHex converts a hex string to a byte array 40 | func DecodeHex(str string) ([]byte, error) { 41 | str = strings.TrimPrefix(str, "0x") 42 | 43 | // Check if the string has an odd length 44 | if len(str)%2 != 0 { 45 | // Prepend a '0' to make it even-length 46 | str = "0" + str 47 | } 48 | 49 | return hex.DecodeString(str) 50 | } 51 | 52 | // MustDecodeHex type-checks and converts a hex string to a byte array 53 | func MustDecodeHex(str string) []byte { 54 | buf, err := DecodeHex(str) 55 | if err != nil { 56 | panic(fmt.Errorf("could not decode hex: %v", err)) 57 | } 58 | 59 | return buf 60 | } 61 | 62 | // DecodeUint64 type-checks and converts a hex string to a uint64 63 | func DecodeUint64(str string) uint64 { 64 | i := DecodeBig(str) 65 | return i.Uint64() 66 | } 67 | 68 | // EncodeUint64 encodes a number as a hex string with 0x prefix. 69 | func EncodeUint64(i uint64) string { 70 | enc := make([]byte, 2, 10) //nolint:mnd 71 | copy(enc, "0x") 72 | return string(strconv.AppendUint(enc, i, Base)) 73 | } 74 | 75 | // BadNibble is a nibble that is bad 76 | const BadNibble = ^uint64(0) 77 | 78 | // DecodeNibble decodes a byte into a uint64 79 | func DecodeNibble(in byte) uint64 { 80 | switch { 81 | case in >= '0' && in <= '9': 82 | return uint64(in - '0') 83 | case in >= 'A' && in <= 'F': 84 | return uint64(in - 'A' + 10) //nolint:mnd 85 | case in >= 'a' && in <= 'f': 86 | return uint64(in - 'a' + 10) //nolint:mnd 87 | default: 88 | return BadNibble 89 | } 90 | } 91 | 92 | // EncodeBig encodes bigint as a hex string with 0x prefix. 93 | // The sign of the integer is ignored. 94 | func EncodeBig(bigint *big.Int) string { 95 | numBits := bigint.BitLen() 96 | if numBits == 0 { 97 | return "0x0" 98 | } 99 | 100 | return fmt.Sprintf("%#x", bigint) 101 | } 102 | 103 | // DecodeBig converts a hex number to a big.Int value 104 | func DecodeBig(hexNum string) *big.Int { 105 | str := strings.TrimPrefix(hexNum, "0x") 106 | createdNum := new(big.Int) 107 | createdNum.SetString(str, Base) 108 | 109 | return createdNum 110 | } 111 | -------------------------------------------------------------------------------- /hex/hex_test.go: -------------------------------------------------------------------------------- 1 | package hex 2 | 3 | import ( 4 | "encoding/hex" 5 | "math" 6 | "math/big" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestEncodeDecodeBig(t *testing.T) { 14 | b := big.NewInt(math.MaxInt64) 15 | e := EncodeBig(b) 16 | d := DecodeBig(e) 17 | assert.Equal(t, b.Uint64(), d.Uint64()) 18 | } 19 | 20 | // Define a struct for test cases 21 | type TestCase struct { 22 | input string 23 | output []byte 24 | err error 25 | } 26 | 27 | // Unit test function 28 | func TestDecodeHex(t *testing.T) { 29 | testCases := []TestCase{ 30 | {"0", []byte{0}, nil}, 31 | {"00", []byte{0}, nil}, 32 | {"0x0", []byte{0}, nil}, 33 | {"0x00", []byte{0}, nil}, 34 | {"1", []byte{1}, nil}, 35 | {"01", []byte{1}, nil}, 36 | {"", []byte{}, hex.ErrLength}, 37 | {"0x", []byte{}, hex.ErrLength}, 38 | {"zz", []byte{}, hex.InvalidByteError('z')}, 39 | } 40 | 41 | for _, tc := range testCases { 42 | t.Run(tc.input, func(t *testing.T) { 43 | output, err := DecodeHex(tc.input) 44 | if tc.err != nil { 45 | require.Error(t, tc.err, err) 46 | } else { 47 | require.NoError(t, err) 48 | } 49 | require.Equal(t, output, tc.output) 50 | }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /jsonrpcclient/client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | 10 | "github.com/0xPolygonHermez/zkevm-bridge-service/jsonrpcclient/types" 11 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 12 | ) 13 | 14 | const jsonRPCVersion = "2.0" 15 | 16 | // Client defines typed wrappers for the zkEVM RPC API. 17 | type Client struct { 18 | url string 19 | } 20 | 21 | // NewClient creates an instance of client 22 | func NewClient(url string) *Client { 23 | return &Client{ 24 | url: url, 25 | } 26 | } 27 | 28 | // JSONRPCCall executes a 2.0 JSON RPC HTTP Post Request to the provided URL with 29 | // the provided method and parameters, which is compatible with the Ethereum 30 | // JSON RPC Server. 31 | func JSONRPCCall(url, method string, parameters ...interface{}) (types.Response, error) { 32 | params, err := json.Marshal(parameters) 33 | if err != nil { 34 | return types.Response{}, err 35 | } 36 | 37 | request := types.Request{ 38 | JSONRPC: jsonRPCVersion, 39 | ID: float64(1), 40 | Method: method, 41 | Params: params, 42 | } 43 | 44 | httpRes, err := sendJSONRPC_HTTPRequest(url, request) 45 | if err != nil { 46 | return types.Response{}, err 47 | } 48 | 49 | resBody, err := io.ReadAll(httpRes.Body) 50 | if err != nil { 51 | return types.Response{}, err 52 | } 53 | defer func() { 54 | err := httpRes.Body.Close() 55 | if err != nil { 56 | log.Errorf("error closing response body in rpc call. Request: %+v", request) 57 | } 58 | }() 59 | 60 | if httpRes.StatusCode != http.StatusOK { 61 | return types.Response{}, fmt.Errorf("%v - %v", httpRes.StatusCode, string(resBody)) 62 | } 63 | 64 | var res types.Response 65 | err = json.Unmarshal(resBody, &res) 66 | if err != nil { 67 | return types.Response{}, err 68 | } 69 | return res, nil 70 | } 71 | 72 | // BatchCall used in batch requests to send multiple methods and parameters at once 73 | type BatchCall struct { 74 | Method string 75 | Parameters []interface{} 76 | } 77 | 78 | func sendJSONRPC_HTTPRequest(url string, payload interface{}) (*http.Response, error) { 79 | reqBody, err := json.Marshal(payload) 80 | if err != nil { 81 | return nil, err 82 | } 83 | 84 | reqBodyReader := bytes.NewReader(reqBody) 85 | httpReq, err := http.NewRequest(http.MethodPost, url, reqBodyReader) 86 | if err != nil { 87 | return nil, err 88 | } 89 | 90 | httpReq.Header.Add("Content-type", "application/json") 91 | 92 | httpRes, err := http.DefaultClient.Do(httpReq) 93 | if err != nil { 94 | return nil, err 95 | } 96 | 97 | return httpRes, nil 98 | } 99 | -------------------------------------------------------------------------------- /jsonrpcclient/types/codec.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | const ( 8 | // Earliest contains the string to represent the earliest block known. 9 | Earliest = "earliest" 10 | // Latest contains the string to represent the latest block known. 11 | Latest = "latest" 12 | // Pending contains the string to represent the pending block known. 13 | Pending = "pending" 14 | // Safe contains the string to represent the last virtualized block known. 15 | Safe = "safe" 16 | // Finalized contains the string to represent the last verified block known. 17 | Finalized = "finalized" 18 | 19 | // EIP-1898: https://eips.ethereum.org/EIPS/eip-1898 // 20 | 21 | // BlockNumberKey is the key for the block number for EIP-1898 22 | BlockNumberKey = "blockNumber" 23 | // BlockHashKey is the key for the block hash for EIP-1898 24 | BlockHashKey = "blockHash" 25 | // RequireCanonicalKey is the key for the require canonical for EIP-1898 26 | RequireCanonicalKey = "requireCanonical" 27 | ) 28 | 29 | // Request is a jsonrpc request 30 | type Request struct { 31 | JSONRPC string `json:"jsonrpc"` 32 | ID interface{} `json:"id"` 33 | Method string `json:"method"` 34 | Params json.RawMessage `json:"params,omitempty"` 35 | } 36 | 37 | // Response is a jsonrpc success response 38 | type Response struct { 39 | JSONRPC string 40 | ID interface{} 41 | Result json.RawMessage 42 | Error *ErrorObject 43 | } 44 | 45 | // ErrorObject is a jsonrpc error 46 | type ErrorObject struct { 47 | Code int `json:"code"` 48 | Message string `json:"message"` 49 | Data *ArgBytes `json:"data,omitempty"` 50 | } 51 | 52 | // RPCError returns an instance of RPCError from the 53 | // data available in the ErrorObject instance 54 | func (e *ErrorObject) RPCError() RPCError { 55 | var data []byte 56 | if e.Data != nil { 57 | data = *e.Data 58 | } 59 | rpcError := NewRPCErrorWithData(e.Code, e.Message, data) 60 | return *rpcError 61 | } 62 | 63 | // NewResponse returns Success/Error response object 64 | func NewResponse(req Request, reply []byte, err Error) Response { 65 | var result json.RawMessage 66 | if reply != nil { 67 | result = reply 68 | } 69 | 70 | var errorObj *ErrorObject 71 | if err != nil { 72 | errorObj = &ErrorObject{ 73 | Code: err.ErrorCode(), 74 | Message: err.Error(), 75 | } 76 | if err.ErrorData() != nil { 77 | errorObj.Data = ArgBytesPtr(err.ErrorData()) 78 | } 79 | } 80 | 81 | return Response{ 82 | JSONRPC: req.JSONRPC, 83 | ID: req.ID, 84 | Result: result, 85 | Error: errorObj, 86 | } 87 | } 88 | 89 | // MarshalJSON customizes the JSON representation of the response. 90 | func (r Response) MarshalJSON() ([]byte, error) { 91 | if r.Error != nil { 92 | return json.Marshal(struct { 93 | JSONRPC string `json:"jsonrpc"` 94 | ID interface{} `json:"id"` 95 | Error *ErrorObject `json:"error"` 96 | }{ 97 | JSONRPC: r.JSONRPC, 98 | ID: r.ID, 99 | Error: r.Error, 100 | }) 101 | } 102 | 103 | return json.Marshal(struct { 104 | JSONRPC string `json:"jsonrpc"` 105 | ID interface{} `json:"id"` 106 | Result json.RawMessage `json:"result"` 107 | }{ 108 | JSONRPC: r.JSONRPC, 109 | ID: r.ID, 110 | Result: r.Result, 111 | }) 112 | } 113 | 114 | // Bytes return the serialized response 115 | func (s Response) Bytes() ([]byte, error) { 116 | return json.Marshal(s) 117 | } 118 | -------------------------------------------------------------------------------- /jsonrpcclient/types/codec_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestResponseMarshal(t *testing.T) { 12 | testCases := []struct { 13 | Name string 14 | JSONRPC string 15 | ID interface{} 16 | Result interface{} 17 | Error Error 18 | 19 | ExpectedJSON string 20 | }{ 21 | { 22 | Name: "Error is nil", 23 | JSONRPC: "2.0", 24 | ID: 1, 25 | Result: struct { 26 | A string `json:"A"` 27 | }{"A"}, 28 | Error: nil, 29 | 30 | ExpectedJSON: "{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":{\"A\":\"A\"}}", 31 | }, 32 | { 33 | Name: "Result is nil and Error is not nil", 34 | JSONRPC: "2.0", 35 | ID: 1, 36 | Result: nil, 37 | Error: NewRPCError(123, "m"), 38 | 39 | ExpectedJSON: "{\"jsonrpc\":\"2.0\",\"id\":1,\"error\":{\"code\":123,\"message\":\"m\"}}", 40 | }, 41 | { 42 | Name: "Result is not nil and Error is not nil", 43 | JSONRPC: "2.0", 44 | ID: 1, 45 | Result: struct { 46 | A string `json:"A"` 47 | }{"A"}, 48 | Error: NewRPCError(123, "m"), 49 | 50 | ExpectedJSON: "{\"jsonrpc\":\"2.0\",\"id\":1,\"error\":{\"code\":123,\"message\":\"m\"}}", 51 | }, 52 | } 53 | 54 | for _, testCase := range testCases { 55 | t.Run(testCase.Name, func(t *testing.T) { 56 | req := Request{ 57 | JSONRPC: testCase.JSONRPC, 58 | ID: testCase.ID, 59 | } 60 | var result []byte 61 | if testCase.Result != nil { 62 | r, err := json.Marshal(testCase.Result) 63 | require.NoError(t, err) 64 | result = r 65 | } 66 | 67 | res := NewResponse(req, result, testCase.Error) 68 | bytes, err := json.Marshal(res) 69 | require.NoError(t, err) 70 | assert.Equal(t, testCase.ExpectedJSON, string(bytes)) 71 | }) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /jsonrpcclient/types/errors.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "fmt" 4 | 5 | const ( 6 | // DefaultErrorCode rpc default error code 7 | DefaultErrorCode = -32000 8 | // RevertedErrorCode error code for reverted txs 9 | RevertedErrorCode = 3 10 | // InvalidRequestErrorCode error code for invalid requests 11 | InvalidRequestErrorCode = -32600 12 | // NotFoundErrorCode error code for not found objects 13 | NotFoundErrorCode = -32601 14 | // InvalidParamsErrorCode error code for invalid parameters 15 | InvalidParamsErrorCode = -32602 16 | // ParserErrorCode error code for parsing errors 17 | ParserErrorCode = -32700 18 | ) 19 | 20 | var ( 21 | // ErrBatchRequestsDisabled returned by the server when a batch request 22 | // is detected and the batch requests are disabled via configuration 23 | ErrBatchRequestsDisabled = fmt.Errorf("batch requests are disabled") 24 | 25 | // ErrBatchRequestsLimitExceeded returned by the server when a batch request 26 | // is detected and the number of requests are greater than the configured limit. 27 | ErrBatchRequestsLimitExceeded = fmt.Errorf("batch requests limit exceeded") 28 | ) 29 | 30 | // Error interface 31 | type Error interface { 32 | Error() string 33 | ErrorCode() int 34 | ErrorData() []byte 35 | } 36 | 37 | // RPCError represents an error returned by a JSON RPC endpoint. 38 | type RPCError struct { 39 | err string 40 | code int 41 | data []byte 42 | } 43 | 44 | // NewRPCError creates a new error instance to be returned by the RPC endpoints 45 | func NewRPCError(code int, err string, args ...interface{}) *RPCError { 46 | return NewRPCErrorWithData(code, err, nil, args...) 47 | } 48 | 49 | // NewRPCErrorWithData creates a new error instance with data to be returned by the RPC endpoints 50 | func NewRPCErrorWithData(code int, err string, data []byte, args ...interface{}) *RPCError { 51 | var errMessage string 52 | if len(args) > 0 { 53 | errMessage = fmt.Sprintf(err, args...) 54 | } else { 55 | errMessage = err 56 | } 57 | return &RPCError{code: code, err: errMessage, data: data} 58 | } 59 | 60 | // Error returns the error message. 61 | func (e RPCError) Error() string { 62 | return e.err 63 | } 64 | 65 | // ErrorCode returns the error code. 66 | func (e *RPCError) ErrorCode() int { 67 | return e.code 68 | } 69 | 70 | // ErrorData returns the error data. 71 | func (e *RPCError) ErrorData() []byte { 72 | return e.data 73 | } 74 | -------------------------------------------------------------------------------- /jsonrpcclient/zkevm.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/jsonrpcclient/types" 8 | "github.com/ethereum/go-ethereum/common" 9 | ) 10 | 11 | // ExitRootsByGER returns the exit roots accordingly to the provided Global Exit Root 12 | func (c *Client) ExitRootsByGER(ctx context.Context, globalExitRoot common.Hash) (*types.ExitRoots, error) { 13 | response, err := JSONRPCCall(c.url, "zkevm_getExitRootsByGER", globalExitRoot.String()) 14 | if err != nil { 15 | return nil, err 16 | } 17 | 18 | if response.Error != nil { 19 | return nil, response.Error.RPCError() 20 | } 21 | 22 | var result *types.ExitRoots 23 | err = json.Unmarshal(response.Result, &result) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | return result, nil 29 | } 30 | 31 | // GetLatestGlobalExitRoot returns the latest global exit root 32 | func (c *Client) GetLatestGlobalExitRoot(ctx context.Context) (common.Hash, error) { 33 | response, err := JSONRPCCall(c.url, "zkevm_getLatestGlobalExitRoot") 34 | if err != nil { 35 | return common.Hash{}, err 36 | } 37 | 38 | if response.Error != nil { 39 | return common.Hash{}, response.Error.RPCError() 40 | } 41 | 42 | var result string 43 | err = json.Unmarshal(response.Result, &result) 44 | if err != nil { 45 | return common.Hash{}, err 46 | } 47 | 48 | return common.HexToHash(result), nil 49 | } 50 | -------------------------------------------------------------------------------- /log/config.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | // Config for log 4 | type Config struct { 5 | // Environment defining the log format ("production" or "development"). 6 | // In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above. 7 | // Check [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig) 8 | Environment LogEnvironment `mapstructure:"Environment" jsonschema:"enum=production,enum=development"` 9 | // Level of log. As lower value more logs are going to be generated 10 | Level string `mapstructure:"Level" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=dpanic,enum=panic,enum=fatal"` 11 | // Outputs 12 | Outputs []string `mapstructure:"Outputs"` 13 | } 14 | -------------------------------------------------------------------------------- /log/log_test.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestLogNotInitialized(t *testing.T) { 8 | Info("Test log.Info", " value is ", 10) 9 | Infof("Test log.Infof %d", 10) 10 | Infow("Test log.Infow", "value", 10) 11 | Debugf("Test log.Debugf %d", 10) 12 | Error("Test log.Error", " value is ", 10) 13 | Errorf("Test log.Errorf %d", 10) 14 | Errorw("Test log.Errorw", "value", 10) 15 | Warnf("Test log.Warnf %d", 10) 16 | Warnw("Test log.Warnw", "value", 10) 17 | } 18 | 19 | func TestLog(t *testing.T) { 20 | cfg := Config{ 21 | Environment: EnvironmentDevelopment, 22 | Level: "debug", 23 | Outputs: []string{"stderr"}, //[]string{"stdout", "test.log"} 24 | } 25 | 26 | Init(cfg) 27 | 28 | Info("Test log.Info", " value is ", 10) 29 | Infof("Test log.Infof %d", 10) 30 | Infow("Test log.Infow", "value", 10) 31 | Debugf("Test log.Debugf %d", 10) 32 | Error("Test log.Error", " value is ", 10) 33 | Errorf("Test log.Errorf %d", 10) 34 | Errorw("Test log.Errorw", "value", 10) 35 | Warnf("Test log.Warnf %d", 10) 36 | Warnw("Test log.Warnw", "value", 10) 37 | } 38 | -------------------------------------------------------------------------------- /metrics/api.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | const ( 4 | //Endpoint the endpoint for exposing the metrics 5 | Endpoint = "/metrics" 6 | ) 7 | -------------------------------------------------------------------------------- /metrics/config.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | // Config represents the configuration of the metrics 4 | type Config struct { 5 | // Host is the address to bind the metrics server 6 | Host string `mapstructure:"Host"` 7 | // Port is the port to bind the metrics server 8 | Port int `mapstructure:"Port"` 9 | // Enabled is the flag to enable/disable the metrics server 10 | Enabled bool `mapstructure:"Enabled"` 11 | } 12 | -------------------------------------------------------------------------------- /packaging/deb/zkevm-bridge/DEBIAN/postinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This is a postinstallation script so the service can be configured and started when requested 3 | # 4 | sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent zkevm-bridge 5 | if [ -d "/opt/zkevm-bridge" ] 6 | then 7 | echo "Directory /opt/zkevm-bridge exists." 8 | else 9 | sudo mkdir -p /opt/zkevm-bridge 10 | sudo chown -R zkevm-bridge /opt/zkevm-bridge 11 | fi 12 | sudo systemctl daemon-reload -------------------------------------------------------------------------------- /packaging/deb/zkevm-bridge/DEBIAN/postrm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | ############### 4 | # Remove zkevm-bridge installs 5 | ############## 6 | sudo rm -rf /lib/systemd/system/zkevm-bridge.service 7 | sudo deluser zkevm-bridge 8 | sudo systemctl daemon-reload -------------------------------------------------------------------------------- /packaging/systemd/zkevm-bridge.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=zkevm-bridge 3 | StartLimitIntervalSec=500 4 | StartLimitBurst=5 5 | 6 | [Service] 7 | Restart=on-failure 8 | RestartSec=5s 9 | ExecStart=/usr/bin/zkevm-bridge 10 | Type=simple 11 | KillSignal=SIGINT 12 | User=zkevm-bridge 13 | TimeoutStopSec=120 14 | 15 | [Install] 16 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/cmd/dependencies.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/0xPolygonHermez/zkevm-bridge-service/scripts/cmd/dependencies" 5 | cli "github.com/urfave/cli/v2" 6 | ) 7 | 8 | func updateDeps(ctx *cli.Context) error { 9 | cfg := &dependencies.Config{ 10 | Images: &dependencies.ImagesConfig{ 11 | Names: []string{"hermeznetwork/geth-zkevm-contracts", "hermeznetwork/zkprover-local"}, 12 | TargetFilePath: "../../../../docker-compose.yml", 13 | }, 14 | PB: &dependencies.PBConfig{ 15 | TargetDirPath: "../../../proto/src", 16 | SourceRepo: "https://github.com/0xPolygonHermez/zkevm-comms-protocol.git", 17 | }, 18 | TV: &dependencies.TVConfig{ 19 | TargetDirPath: "../../../test/vectors/src", 20 | SourceRepo: "https://github.com/0xPolygonHermez/zkevm-testvectors.git", 21 | }, 22 | } 23 | 24 | return dependencies.NewManager(cfg).Run() 25 | } 26 | -------------------------------------------------------------------------------- /scripts/cmd/dependencies/files.go: -------------------------------------------------------------------------------- 1 | package dependencies 2 | 3 | import ( 4 | "io" 5 | "os" 6 | "path" 7 | "runtime" 8 | "strings" 9 | 10 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 11 | "github.com/spf13/afero" 12 | ) 13 | 14 | func updateFiles(fs afero.Fs, sourceDir, targetDir string) error { 15 | const bufferSize = 20 16 | err := afero.Walk(fs, targetDir, func(wpath string, info os.FileInfo, err error) error { 17 | if err != nil { 18 | return err 19 | } 20 | if info == nil || info.IsDir() { 21 | return nil 22 | } 23 | relativePath := strings.ReplaceAll(wpath, targetDir, "") 24 | sourcePath := path.Join(sourceDir, relativePath) 25 | 26 | sourceFile, err := fs.Open(sourcePath) 27 | if os.IsNotExist(err) { 28 | // we allow source files to not exist, for instance, test vectors that we 29 | // have in zkevm-node but are not present in the upstream repo 30 | return nil 31 | } 32 | if err != nil { 33 | return err 34 | } 35 | defer func() { 36 | if err := sourceFile.Close(); err != nil { 37 | log.Errorf("Could not close %s: %v", sourceFile.Name(), err) 38 | } 39 | }() 40 | targetFile, err := fs.OpenFile(wpath, os.O_RDWR|os.O_TRUNC, 0644) //nolint:mnd 41 | if err != nil { 42 | return err 43 | } 44 | defer func() { 45 | if err := targetFile.Close(); err != nil { 46 | log.Errorf("Could not close %s: %v", targetFile.Name(), err) 47 | } 48 | }() 49 | buf := make([]byte, bufferSize) 50 | for { 51 | n, err := sourceFile.Read(buf) 52 | if err != nil && err != io.EOF { 53 | return err 54 | } 55 | if n == 0 { 56 | break 57 | } 58 | if _, err := targetFile.Write(buf[:n]); err != nil { 59 | return err 60 | } 61 | } 62 | return nil 63 | }) 64 | return err 65 | } 66 | 67 | func getTargetPath(targetPath string) string { 68 | if strings.HasPrefix(targetPath, "/") { 69 | return targetPath 70 | } 71 | _, filename, _, _ := runtime.Caller(1) 72 | 73 | return path.Join(path.Dir(filename), targetPath) 74 | } 75 | -------------------------------------------------------------------------------- /scripts/cmd/dependencies/github_test.go: -------------------------------------------------------------------------------- 1 | package dependencies 2 | 3 | import ( 4 | "path" 5 | "testing" 6 | 7 | "github.com/spf13/afero" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func Test_cloneTargetRepo(t *testing.T) { 12 | var appFs = afero.NewMemMapFs() 13 | 14 | gm := newGithubManager(appFs, "", "") 15 | 16 | tmpdir, err := gm.cloneTargetRepo("https://github.com/git-fixtures/basic.git") 17 | require.NoError(t, err) 18 | 19 | expectedChangelog := "Initial changelog\n" 20 | actualChangelog, err := afero.ReadFile(appFs, path.Join(tmpdir, "CHANGELOG")) 21 | require.NoError(t, err) 22 | 23 | require.Equal(t, expectedChangelog, string(actualChangelog)) 24 | } 25 | -------------------------------------------------------------------------------- /scripts/cmd/dependencies/manager.go: -------------------------------------------------------------------------------- 1 | package dependencies 2 | 3 | type dependency interface { 4 | update() error 5 | } 6 | 7 | // Manager is the type with knowledge about how to handle dependencies. 8 | type Manager struct { 9 | cfg *Config 10 | } 11 | 12 | // Config has the configurations options for all the updaters. 13 | type Config struct { 14 | Images *ImagesConfig 15 | PB *PBConfig 16 | TV *TVConfig 17 | } 18 | 19 | // NewManager is the Manager constructor. 20 | func NewManager(cfg *Config) *Manager { 21 | return &Manager{ 22 | cfg: cfg, 23 | } 24 | } 25 | 26 | // Run is the main entry point, it executes all the configured dependency 27 | // updates. 28 | func (m *Manager) Run() error { 29 | iu := newImageUpdater(m.cfg.Images.Names, m.cfg.Images.TargetFilePath) 30 | pb := newPBUpdater(m.cfg.PB.SourceRepo, m.cfg.PB.TargetDirPath) 31 | tv := newTestVectorUpdater(m.cfg.TV.SourceRepo, m.cfg.TV.TargetDirPath) 32 | 33 | for _, dep := range []dependency{iu, pb, tv} { 34 | if err := dep.update(); err != nil { 35 | return err 36 | } 37 | } 38 | return nil 39 | } 40 | -------------------------------------------------------------------------------- /scripts/cmd/dependencies/protobuffers.go: -------------------------------------------------------------------------------- 1 | package dependencies 2 | 3 | import ( 4 | "os" 5 | "os/exec" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 8 | "github.com/spf13/afero" 9 | ) 10 | 11 | // PBConfig is the configuration for the protobuffers updater. 12 | type PBConfig struct { 13 | SourceRepo string 14 | TargetDirPath string 15 | } 16 | 17 | type pbUpdater struct { 18 | fs afero.Fs 19 | 20 | gm *githubManager 21 | 22 | sourceRepo string 23 | targetDirPath string 24 | } 25 | 26 | func newPBUpdater(sourceRepo, targetDirPath string) *pbUpdater { 27 | aferoFs := afero.NewOsFs() 28 | 29 | gm := newGithubManager(aferoFs, os.Getenv("UPDATE_DEPS_SSH_PK"), os.Getenv("GITHUB_TOKEN")) 30 | 31 | return &pbUpdater{ 32 | fs: aferoFs, 33 | 34 | gm: gm, 35 | 36 | sourceRepo: sourceRepo, 37 | targetDirPath: targetDirPath, 38 | } 39 | } 40 | 41 | func (pb *pbUpdater) update() error { 42 | log.Infof("Cloning %q...", pb.sourceRepo) 43 | tmpdir, err := pb.gm.cloneTargetRepo(pb.sourceRepo) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | targetDirPath := getTargetPath(pb.targetDirPath) 49 | 50 | log.Infof("Updating files %q...", pb.sourceRepo) 51 | err = updateFiles(pb.fs, tmpdir, targetDirPath) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | log.Infof("Generating stubs from proto files...") 57 | 58 | c := exec.Command("make", "generate-code-from-proto") 59 | c.Dir = "." 60 | c.Stdout = os.Stdout 61 | c.Stderr = os.Stderr 62 | return c.Run() 63 | } 64 | -------------------------------------------------------------------------------- /scripts/cmd/dependencies/testvectors.go: -------------------------------------------------------------------------------- 1 | package dependencies 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 7 | "github.com/spf13/afero" 8 | ) 9 | 10 | // TVConfig is the configuration for the test vector updater. 11 | type TVConfig struct { 12 | TargetDirPath string 13 | SourceRepo string 14 | } 15 | 16 | type testVectorUpdater struct { 17 | fs afero.Fs 18 | 19 | gm *githubManager 20 | 21 | sourceRepo string 22 | targetDirPath string 23 | } 24 | 25 | func newTestVectorUpdater(sourceRepo, targetDirPath string) *testVectorUpdater { 26 | aferoFs := afero.NewOsFs() 27 | 28 | gm := newGithubManager(aferoFs, os.Getenv("UPDATE_DEPS_SSH_PK"), os.Getenv("GITHUB_TOKEN")) 29 | 30 | return &testVectorUpdater{ 31 | fs: aferoFs, 32 | 33 | gm: gm, 34 | 35 | sourceRepo: sourceRepo, 36 | targetDirPath: targetDirPath, 37 | } 38 | } 39 | 40 | func (tu *testVectorUpdater) update() error { 41 | log.Infof("Cloning %q...", tu.sourceRepo) 42 | tmpdir, err := tu.gm.cloneTargetRepo(tu.sourceRepo) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | targetDirPath := getTargetPath(tu.targetDirPath) 48 | 49 | log.Infof("Updating files %q...", tu.sourceRepo) 50 | return updateFiles(tu.fs, tmpdir, targetDirPath) 51 | } 52 | -------------------------------------------------------------------------------- /scripts/cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 7 | cli "github.com/urfave/cli/v2" 8 | ) 9 | 10 | func main() { 11 | app := cli.NewApp() 12 | app.Name = "zkevm-bridge-scripts" 13 | app.Commands = []*cli.Command{ 14 | { 15 | Name: "updatedeps", 16 | Usage: "Updates external dependencies like images, test vectors or proto files", 17 | Action: updateDeps, 18 | Flags: []cli.Flag{}, 19 | }, 20 | } 21 | 22 | err := app.Run(os.Args) 23 | if err != nil { 24 | log.Fatal(err) 25 | os.Exit(1) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /scripts/generate-smartcontracts-bindings.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script generates the smart contracts bindings for the smart contracts 4 | if ! command -v abigen &> /dev/null 5 | then 6 | echo "abigen binary could not be found" 7 | exit 1 8 | fi 9 | 10 | OUTPUT_BASE_DIR=../etherman/smartcontracts/ 11 | 12 | set -e 13 | 14 | gen() { 15 | local package=$1 16 | 17 | mkdir -p ${OUTPUT_BASE_DIR}/${package} 18 | 19 | abigen --bin ${OUTPUT_BASE_DIR}/bin/${package}.bin --abi ${OUTPUT_BASE_DIR}/abi/${package}.abi --pkg=${package} --out=${OUTPUT_BASE_DIR}/${package}/${package}.go 20 | } 21 | 22 | gen_from_json(){ 23 | local package=$1 24 | mkdir -p ${OUTPUT_BASE_DIR}/${package} 25 | abigen --combined-json ${OUTPUT_BASE_DIR}/json/${package}.json --pkg=${package} --out=${OUTPUT_BASE_DIR}/${package}/${package}.go 26 | } 27 | 28 | #gen_from_json pingreceiver 29 | gen claimcompressor -------------------------------------------------------------------------------- /server/config.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import "github.com/0xPolygonHermez/zkevm-bridge-service/db" 4 | 5 | // Config struct 6 | type Config struct { 7 | // GRPCPort is TCP port to listen by gRPC server 8 | GRPCPort string `mapstructure:"GRPCPort"` 9 | // HTTPPort is TCP port to listen by HTTP/REST gateway 10 | HTTPPort string `mapstructure:"HTTPPort"` 11 | // CacheSize is the buffer size of the lru-cache 12 | CacheSize int `mapstructure:"CacheSize"` 13 | // DefaultPageLimit is the default page limit for pagination 14 | DefaultPageLimit uint32 `mapstructure:"DefaultPageLimit"` 15 | // MaxPageLimit is the maximum page limit for pagination 16 | MaxPageLimit uint32 `mapstructure:"MaxPageLimit"` 17 | // Version is the version of the bridge service 18 | BridgeVersion string 19 | // FinalizedGEREnabled is used to compute the MT proofs based on the finalized GERs 20 | FinalizedGEREnabled bool `mapstructure:"FinalizedGEREnabled"` 21 | // DB is the database config 22 | DB db.Config `mapstructure:"DB"` 23 | } 24 | -------------------------------------------------------------------------------- /server/interfaces.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 7 | "github.com/ethereum/go-ethereum/common" 8 | ) 9 | 10 | type bridgeServiceStorage interface { 11 | Get(ctx context.Context, key []byte, dbTx interface{}) ([][]byte, error) 12 | GetRoot(ctx context.Context, depositCnt, network uint32, dbTx interface{}) ([]byte, error) 13 | GetDepositCountByRoot(ctx context.Context, root []byte, network uint32, dbTx interface{}) (uint32, error) 14 | GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint32, dbTx interface{}) (*etherman.GlobalExitRoot, error) 15 | GetL1ExitRootByGER(ctx context.Context, ger common.Hash, dbTx interface{}) (*etherman.GlobalExitRoot, error) 16 | GetLatestTrustedExitRoot(ctx context.Context, networkID uint32, dbTx interface{}) (*etherman.GlobalExitRoot, error) 17 | GetClaim(ctx context.Context, index, originNetworkID, networkID uint32, dbTx interface{}) (*etherman.Claim, error) 18 | GetClaims(ctx context.Context, destAddr string, limit, offset uint32, dbTx interface{}) ([]*etherman.Claim, error) 19 | GetClaimCount(ctx context.Context, destAddr string, dbTx interface{}) (uint64, error) 20 | GetDeposit(ctx context.Context, depositCnt, networkID uint32, dbTx interface{}) (*etherman.Deposit, error) 21 | GetDeposits(ctx context.Context, destAddr string, limit, offset uint32, dbTx interface{}) ([]*etherman.Deposit, error) 22 | GetDepositCount(ctx context.Context, destAddr string, dbTx interface{}) (uint64, error) 23 | GetTokenWrapped(ctx context.Context, originalNetwork uint32, originalTokenAddress common.Address, dbTx interface{}) (*etherman.TokenWrapped, error) 24 | GetRollupExitLeavesByRoot(ctx context.Context, root common.Hash, dbTx interface{}) ([]etherman.RollupExitLeaf, error) 25 | GetPendingDepositsToClaim(ctx context.Context, destAddress common.Address, destNetwork, leafType, limit, offset uint32, fromNetwork int8, dbTx interface{}) ([]*etherman.Deposit, uint64, error) 26 | } 27 | -------------------------------------------------------------------------------- /server/service_test.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 8 | "github.com/ethereum/go-ethereum/common" 9 | "github.com/stretchr/testify/mock" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestGetClaimProofbyGER(t *testing.T) { 14 | cfg := Config{ 15 | CacheSize: 32, 16 | } 17 | mockStorage := newBridgeServiceStorageMock(t) 18 | sut := NewBridgeService(cfg, 32, []uint32{0, 1}, mockStorage) 19 | var ( 20 | depositCnt uint32 21 | networkID uint32 22 | ) 23 | GER := common.Hash{} 24 | deposit := ðerman.Deposit{} 25 | mockStorage.EXPECT().GetDeposit(mock.Anything, depositCnt, networkID, mock.Anything).Return(deposit, nil) 26 | exitRoot := etherman.GlobalExitRoot{ 27 | ExitRoots: []common.Hash{{}, {}}, 28 | } 29 | mockStorage.EXPECT().GetL1ExitRootByGER(mock.Anything, GER, mock.Anything).Return(&exitRoot, nil) 30 | node := [][]byte{{}, {}} 31 | mockStorage.EXPECT().Get(mock.Anything, mock.Anything, mock.Anything).Return(node, nil) 32 | smtProof, smtRollupProof, globaExitRoot, err := sut.GetClaimProofbyGER(context.Background(), depositCnt, networkID, GER, nil) 33 | require.NoError(t, err) 34 | require.NotNil(t, smtProof) 35 | require.NotNil(t, smtRollupProof) 36 | require.NotNil(t, globaExitRoot) 37 | } 38 | -------------------------------------------------------------------------------- /synchronizer/config.go: -------------------------------------------------------------------------------- 1 | package synchronizer 2 | 3 | import ( 4 | "github.com/0xPolygonHermez/zkevm-bridge-service/config/types" 5 | ) 6 | 7 | // Config represents the configuration of the synchronizer 8 | type Config struct { 9 | // SyncInterval is the delay interval between reading new rollup information 10 | SyncInterval types.Duration `mapstructure:"SyncInterval"` 11 | 12 | // SyncChunkSize is the number of blocks to sync on each chunk 13 | SyncChunkSize uint64 `mapstructure:"SyncChunkSize"` 14 | 15 | // ForceL2SyncChunk is a flag to force the L2 synchronizer to sync a chunk. This will disable part of the reorg protection 16 | ForceL2SyncChunk bool `mapstructure:"ForceL2SyncChunk"` 17 | } 18 | -------------------------------------------------------------------------------- /synchronizer/interfaces.go: -------------------------------------------------------------------------------- 1 | package synchronizer 2 | 3 | import ( 4 | "context" 5 | "math/big" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 8 | rpcTypes "github.com/0xPolygonHermez/zkevm-bridge-service/jsonrpcclient/types" 9 | "github.com/ethereum/go-ethereum/common" 10 | "github.com/ethereum/go-ethereum/core/types" 11 | ) 12 | 13 | // ethermanInterface contains the methods required to interact with ethereum. 14 | type ethermanInterface interface { 15 | HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) 16 | GetRollupInfoByBlockRange(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error) 17 | GetNetworkID() uint32 18 | } 19 | 20 | type storageInterface interface { 21 | GetLastBlock(ctx context.Context, networkID uint32, dbTx interface{}) (*etherman.Block, error) 22 | Rollback(ctx context.Context, dbTx interface{}) error 23 | BeginDBTransaction(ctx context.Context) (interface{}, error) 24 | Commit(ctx context.Context, dbTx interface{}) error 25 | AddBlock(ctx context.Context, block *etherman.Block, dbTx interface{}) (uint64, error) 26 | AddGlobalExitRoot(ctx context.Context, exitRoot *etherman.GlobalExitRoot, dbTx interface{}) error 27 | AddDeposit(ctx context.Context, deposit *etherman.Deposit, dbTx interface{}) (uint64, error) 28 | AddClaim(ctx context.Context, claim *etherman.Claim, dbTx interface{}) error 29 | AddTokenWrapped(ctx context.Context, tokenWrapped *etherman.TokenWrapped, dbTx interface{}) error 30 | Reset(ctx context.Context, blockNumber uint64, networkID uint32, dbTx interface{}) error 31 | GetPreviousBlock(ctx context.Context, networkID uint32, offset uint64, dbTx interface{}) (etherman.Block, error) 32 | GetNumberDeposits(ctx context.Context, origNetworkID uint32, blockNumber uint64, dbTx interface{}) (uint32, error) 33 | AddTrustedGlobalExitRoot(ctx context.Context, trustedExitRoot *etherman.GlobalExitRoot, dbTx interface{}) (bool, error) 34 | GetLatestL1SyncedExitRoot(ctx context.Context, dbTx interface{}) (*etherman.GlobalExitRoot, error) 35 | GetLatestTrustedExitRoot(ctx context.Context, networkID uint32, dbTx interface{}) (*etherman.GlobalExitRoot, error) 36 | CheckIfRootExists(ctx context.Context, root []byte, network uint32, dbTx interface{}) (bool, error) 37 | GetL1ExitRootByGER(ctx context.Context, ger common.Hash, dbTx interface{}) (*etherman.GlobalExitRoot, error) 38 | GetL2ExitRootsByGER(ctx context.Context, ger common.Hash, dbTx interface{}) ([]etherman.GlobalExitRoot, error) 39 | UpdateL2GER(ctx context.Context, ger etherman.GlobalExitRoot, dbTx interface{}) error 40 | AddRemoveL2GER(ctx context.Context, globalExitRoot etherman.GlobalExitRoot, dbTx interface{}) error 41 | } 42 | 43 | type bridgectrlInterface interface { 44 | AddDeposit(ctx context.Context, deposit *etherman.Deposit, dbTx interface{}) error 45 | ReorgMT(ctx context.Context, depositCount, networkID uint32, dbTx interface{}) error 46 | RollbackMT(ctx context.Context, networkID uint32, dbTx interface{}) error 47 | AddRollupExitLeaf(ctx context.Context, rollupLeaf etherman.RollupExitLeaf, dbTx interface{}) error 48 | } 49 | 50 | type zkEVMClientInterface interface { 51 | GetLatestGlobalExitRoot(ctx context.Context) (common.Hash, error) 52 | ExitRootsByGER(ctx context.Context, globalExitRoot common.Hash) (*rpcTypes.ExitRoots, error) 53 | } 54 | -------------------------------------------------------------------------------- /test/client/config.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import "github.com/ethereum/go-ethereum/common" 4 | 5 | // Config is a client config 6 | type Config struct { 7 | L1NodeURL string `mapstructure:"L1NodeURL"` 8 | L2NodeURL string `mapstructure:"L2NodeURL"` 9 | BridgeURL string `mapstructure:"BridgeURL"` 10 | L1BridgeAddr common.Address `mapstructure:"L1BridgeAddr"` 11 | L2BridgeAddr common.Address `mapstructure:"L2BridgeAddr"` 12 | } 13 | -------------------------------------------------------------------------------- /test/config/aggoracle/config.toml: -------------------------------------------------------------------------------- 1 | 2 | RPCURL = "http://localhost:8123" 3 | 4 | rollupCreationBlockNumber = "136" 5 | rollupManagerCreationBlockNumber = "1" 6 | genesisBlockNumber = "136" 7 | [L1Config] 8 | chainId = "1337" 9 | polygonZkEVMGlobalExitRootAddress = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318" 10 | polygonRollupManagerAddress = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" 11 | polTokenAddress = "0x5FbDB2315678afecb367f032d93F642f64180aa3" 12 | polygonZkEVMAddress = "0x8dAF17A20c9DBA35f005b6324F493785D239719d" 13 | 14 | [L2Config] 15 | GlobalExitRootAddr = "0x712516e61C8B383dF4A63CFe83d7701Bce54B03e" 16 | 17 | [Log] 18 | Environment = "development" # "production" or "development" 19 | Level = "debug" 20 | Outputs = ["stderr"] 21 | 22 | [AggOracle] 23 | TargetChainType="EVM" 24 | URLRPCL1="http://localhost:8545" 25 | BlockFinality="FinalizedBlock" 26 | WaitPeriodNextGER="1s" 27 | [AggOracle.EVMSender] 28 | GlobalExitRootL2="0x712516e61C8B383dF4A63CFe83d7701Bce54B03e" 29 | URLRPCL2="http://localhost:8123" # This can be replaced with desired L2 e.g. OP Geth RPC 30 | ChainIDL2=1001 31 | GasOffset=0 32 | WaitPeriodMonitorTx="1s" 33 | [AggOracle.EVMSender.EthTxManager] 34 | FrequencyToMonitorTxs = "1s" 35 | WaitTxToBeMined = "1s" 36 | GetReceiptMaxTime = "1s" 37 | GetReceiptWaitInterval = "1s" 38 | PrivateKeys = [ 39 | {Path = "/app/aggoracle.keystore", Password = "testonly"}, 40 | ] 41 | ForcedGas = 0 42 | GasPriceMarginFactor = 1 43 | MaxGasPriceLimit = 0 44 | StoragePath = "/tmp/ethtxmanager-sequencesender.json" 45 | ReadPendingL1Txs = false 46 | SafeStatusL1NumberOfBlocks = 5 47 | FinalizedStatusL1NumberOfBlocks = 10 48 | [AggOracle.EVMSender.EthTxManager.Etherman] 49 | URL = "http://localhost:8123" # This can be replaced with desired L2 e.g. OP Geth RPC 50 | MultiGasProvider = false 51 | L1ChainID = 1001 52 | HTTPHeaders = [] 53 | 54 | [LastGERSync] 55 | DBPath = "/tmp/lastgersync" 56 | BlockFinality = "LatestBlock" 57 | InitialBlockNum = 0 58 | GlobalExitRootL2Addr = "0x712516e61C8B383dF4A63CFe83d7701Bce54B03e" 59 | RetryAfterErrorPeriod = "1s" 60 | MaxRetryAttemptsAfterError = -1 61 | WaitForNewBlocksPeriod = "1s" 62 | DownloadBufferSize = 100 63 | 64 | [ReorgDetectorL1] 65 | DBPath = "/tmp/reorgdetectorl1.sqlite" 66 | 67 | [ReorgDetectorL2] 68 | DBPath = "/tmp/reorgdetectorl2.sqlite" 69 | 70 | [L1InfoTreeSync] 71 | DBPath = "/tmp/L1InfoTreeSync.sqlite" 72 | GlobalExitRootAddr="0x8A791620dd6260079BF849Dc5567aDC3F2FdC318" 73 | RollupManagerAddr = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" 74 | SyncBlockChunkSize=100 75 | BlockFinality="LatestBlock" 76 | URLRPCL1="http://localhost:8545" 77 | WaitForNewBlocksPeriod="100ms" 78 | InitialBlock=0 79 | RetryAfterErrorPeriod="1s" 80 | MaxRetryAttemptsAfterError=-1 81 | -------------------------------------------------------------------------------- /test/config/bridge_network_e2e/cardona.toml: -------------------------------------------------------------------------------- 1 | # You must set 2 | # 2. TestAddrPrivate 3 | # 3. L1NodeURL 4 | 5 | 6 | TestAddrPrivate="set private key" 7 | 8 | [ConnectionConfig] 9 | L1NodeURL="https://eth-sepolia.g.alchemy.com/v2/" 10 | L2NodeURL="https://rpc.cardona.zkevm-rpc.com" 11 | BridgeURL="https://bridge-api.cardona.zkevm-rpc.com" 12 | L1BridgeAddr="0x528e26b25a34a4a5d0dbda1d57d318153d2ed582" 13 | L2BridgeAddr="0x528e26b25a34a4a5d0dbda1d57d318153d2ed582" 14 | 15 | 16 | -------------------------------------------------------------------------------- /test/config/prover/config.prover.json: -------------------------------------------------------------------------------- 1 | { 2 | "runExecutorServer": true, 3 | "runExecutorClient": false, 4 | "runExecutorClientMultithread": false, 5 | 6 | "runHashDBServer": true, 7 | "runHashDBTest": false, 8 | 9 | "runAggregatorServer": false, 10 | "runAggregatorClient": false, 11 | "runAggregatorClientMock": true, 12 | "aggregatorClientMockTimeout": 1, 13 | "proverName": "test-prover", 14 | 15 | "runFileGenBatchProof": false, 16 | "runFileGenAggregatedProof": false, 17 | "runFileGenFinalProof": false, 18 | "runFileProcessBatch": false, 19 | "runFileProcessBatchMultithread": false, 20 | 21 | "runKeccakScriptGenerator": false, 22 | "runKeccakTest": false, 23 | "runStorageSMTest": false, 24 | "runBinarySMTest": false, 25 | "runMemAlignSMTest": false, 26 | "runSHA256Test": false, 27 | "runBlakeTest": false, 28 | 29 | "executeInParallel": true, 30 | "useMainExecGenerated": true, 31 | "saveRequestToFile": false, 32 | "saveInputToFile": false, 33 | "saveDbReadsToFile": false, 34 | "saveDbReadsToFileOnChange": false, 35 | "saveOutputToFile": true, 36 | "saveProofToFile": true, 37 | "saveResponseToFile": false, 38 | "loadDBToMemCache": true, 39 | "opcodeTracer": false, 40 | "logRemoteDbReads": false, 41 | "logExecutorServerResponses": false, 42 | 43 | "proverServerPort": 50051, 44 | "proverServerMockPort": 50052, 45 | "proverServerMockTimeout": 10000000, 46 | "proverClientPort": 50051, 47 | "proverClientHost": "127.0.0.1", 48 | 49 | "executorServerPort": 50071, 50 | "executorROMLineTraces": false, 51 | "executorClientPort": 50071, 52 | "executorClientHost": "127.0.0.1", 53 | 54 | "hashDBServerPort": 50061, 55 | "hashDBURL": "local", 56 | 57 | "aggregatorServerPort": 50081, 58 | "aggregatorClientPort": 50081, 59 | "aggregatorClientHost": "zkevm-node", 60 | 61 | "mapConstPolsFile": false, 62 | "mapConstantsTreeFile": false, 63 | 64 | "inputFile": "input_executor_0.json", 65 | "inputFile2": "input_executor_1.json", 66 | 67 | "keccakScriptFile": "config/scripts/keccak_script.json", 68 | "storageRomFile": "config/scripts/storage_sm_rom.json", 69 | 70 | "outputPath": "output", 71 | 72 | "databaseURL": "postgresql://prover_user:prover_pass@zkevm-state-db:5432/prover_db", 73 | "dbNodesTableName": "state.nodes", 74 | "dbProgramTableName": "state.program", 75 | "dbMultiWrite": true, 76 | "dbFlushInParallel": false, 77 | "dbMTCacheSize": 1024, 78 | "dbProgramCacheSize": 512, 79 | "dbNumberOfPoolConnections": 30, 80 | "dbGetTree": true, 81 | "cleanerPollingPeriod": 600, 82 | "requestsPersistence": 3600, 83 | "maxExecutorThreads": 20, 84 | "maxProverThreads": 8, 85 | "maxHashDBThreads": 8, 86 | "ECRecoverPrecalc": false, 87 | "ECRecoverPrecalcNThreads": 4, 88 | "stateManager": true, 89 | "useAssociativeCache" : false 90 | } 91 | 92 | -------------------------------------------------------------------------------- /test/config/prover/initproverdb.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE prover_db; 2 | \connect prover_db; 3 | 4 | CREATE SCHEMA state; 5 | 6 | CREATE TABLE state.nodes (hash BYTEA PRIMARY KEY, data BYTEA NOT NULL); 7 | CREATE TABLE state.program (hash BYTEA PRIMARY KEY, data BYTEA NOT NULL); 8 | 9 | CREATE USER prover_user with password 'prover_pass'; 10 | ALTER DATABASE prover_db OWNER TO prover_user; 11 | ALTER SCHEMA state OWNER TO prover_user; 12 | ALTER SCHEMA public OWNER TO prover_user; 13 | ALTER TABLE state.nodes OWNER TO prover_user; 14 | ALTER TABLE state.program OWNER TO prover_user; 15 | ALTER USER prover_user SET SEARCH_PATH=state; 16 | -------------------------------------------------------------------------------- /test/mocksmartcontracts/BridgeMessageReceiver.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: AGPL-3.0 2 | 3 | pragma solidity 0.8.20; 4 | 5 | /** 6 | * @dev Define interface for bridge message receiver 7 | */ 8 | interface IBridgeMessageReceiver { 9 | function onMessageReceived( 10 | address originAddress, 11 | uint32 originNetwork, 12 | bytes memory data 13 | ) external view returns (bool); 14 | } 15 | 16 | contract BridgeMessageReceiver is IBridgeMessageReceiver { 17 | function onMessageReceived( 18 | address originAddress, 19 | uint32 originNetwork, 20 | bytes memory data 21 | ) external view returns (bool) { 22 | if (data.length != 0) { 23 | return true; 24 | } 25 | return false; 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /test/mocksmartcontracts/BridgeMessageReceiver/BridgeMessageReceiver.abi: -------------------------------------------------------------------------------- 1 | [{"inputs":[{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"onMessageReceived","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"}] -------------------------------------------------------------------------------- /test/mocksmartcontracts/BridgeMessageReceiver/BridgeMessageReceiver.bin: -------------------------------------------------------------------------------- 1 | 608060405234801561001057600080fd5b506101ac806100206000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c80631806b5f214610030575b600080fd5b61004361003e36600461008b565b610057565b604051901515815260200160405180910390f35b6000815160001461006a5750600161006e565b5060005b9392505050565b634e487b7160e01b600052604160045260246000fd5b6000806000606084860312156100a057600080fd5b83356001600160a01b03811681146100b757600080fd5b9250602084013563ffffffff811681146100d057600080fd5b9150604084013567ffffffffffffffff808211156100ed57600080fd5b818601915086601f83011261010157600080fd5b81358181111561011357610113610075565b604051601f8201601f19908116603f0116810190838211818310171561013b5761013b610075565b8160405282815289602084870101111561015457600080fd5b826020860160208301376000602084830101528095505050505050925092509256fea2646970667358221220aae52164165586bb06a59933e81aa3e5f35e45cb8c87d85e2d4b95656df9d54b64736f6c63430008140033 -------------------------------------------------------------------------------- /test/mocksmartcontracts/BridgeMessageReceiver/IBridgeMessageReceiver.abi: -------------------------------------------------------------------------------- 1 | [{"inputs":[{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"onMessageReceived","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"}] -------------------------------------------------------------------------------- /test/mocksmartcontracts/BridgeMessageReceiver/IBridgeMessageReceiver.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/zkevm-bridge-service/1a2a7222d2bc3691bdb4078b8b900e6f55d0cd17/test/mocksmartcontracts/BridgeMessageReceiver/IBridgeMessageReceiver.bin -------------------------------------------------------------------------------- /test/mocksmartcontracts/PingReceiver.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: AGPL-3.0 2 | 3 | pragma solidity >=0.8.17; 4 | 5 | import "./polygonZKEVMContracts/interfaces/IBridgeMessageReceiver.sol"; 6 | import "./polygonZKEVMContracts/interfaces/IPolygonZkEVMBridge.sol"; 7 | import "@openzeppelin/contracts/access/Ownable.sol"; 8 | 9 | /** 10 | * ZkEVMNFTBridge is an example contract to use the message layer of the PolygonZkEVMBridge to bridge NFTs 11 | */ 12 | contract PingReceiver is IBridgeMessageReceiver, Ownable { 13 | // Global Exit Root address 14 | IPolygonZkEVMBridge public immutable polygonZkEVMBridge; 15 | 16 | // Current network identifier 17 | uint32 public immutable networkID; 18 | 19 | // Address in the other network that will send the message 20 | address public pingSender; 21 | 22 | // Value sent from the other network 23 | uint256 public pingValue; 24 | 25 | /** 26 | * @param _polygonZkEVMBridge Polygon zkevm bridge address 27 | */ 28 | constructor(IPolygonZkEVMBridge _polygonZkEVMBridge) { 29 | polygonZkEVMBridge = _polygonZkEVMBridge; 30 | networkID = polygonZkEVMBridge.networkID(); 31 | } 32 | 33 | /** 34 | * @dev Emitted when a message is received from another network 35 | */ 36 | event PingReceived(uint256 pingValue); 37 | 38 | /** 39 | * @dev Emitted when change the sender 40 | */ 41 | event SetSender(address newPingSender); 42 | 43 | /** 44 | * @notice Set the sender of the message 45 | * @param newPingSender Address of the sender in the other network 46 | */ 47 | function setSender(address newPingSender) external onlyOwner { 48 | pingSender = newPingSender; 49 | emit SetSender(newPingSender); 50 | } 51 | 52 | /** 53 | * @notice Verify merkle proof and withdraw tokens/ether 54 | * @param originAddress Origin address that the message was sended 55 | * @param originNetwork Origin network that the message was sended ( not usefull for this contract) 56 | * @param data Abi encoded metadata 57 | */ 58 | function onMessageReceived( 59 | address originAddress, 60 | uint32 originNetwork, 61 | bytes memory data 62 | ) external payable override { 63 | // Can only be called by the bridge 64 | require( 65 | msg.sender == address(polygonZkEVMBridge), 66 | "PingReceiver::onMessageReceived: Not PolygonZkEVMBridge" 67 | ); 68 | 69 | // Can only be called by the sender on the other network 70 | require( 71 | pingSender == originAddress, 72 | "PingReceiver::onMessageReceived: Not ping Sender" 73 | ); 74 | 75 | // Decode data 76 | pingValue = abi.decode(data, (uint256)); 77 | 78 | emit PingReceived(pingValue); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /test/mocksmartcontracts/readme.md: -------------------------------------------------------------------------------- 1 | # Contracts 2 | 3 | ### PingReceiver 4 | - Original code: [PingReceiver.sol](https://github.com/0xPolygonHermez/code-examples/blob/main/pingPongExample/contracts/PingReceiver.sol) 5 | -------------------------------------------------------------------------------- /test/mocksmartcontracts/script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | dir=$(pwd) 6 | 7 | gen() { 8 | local package=$1 9 | 10 | abigen --bin bin/${package}.bin --abi abi/${package}.abi --pkg=${package} --out=${package}/${package}.go 11 | } 12 | 13 | 14 | compilegen() { 15 | local package=$1 16 | 17 | docker run --rm --user $(id -u) -v "${dir}:/contracts" ethereum/solc:0.8.20-alpine - "/contracts/${package}.sol" -o "/contracts/${package}" --abi --bin --overwrite --optimize --evm-version paris 18 | abigen --bin ${package}/${package}.bin --abi ${package}/${package}.abi --pkg=${package} --out=${package}/${package}.go 19 | } 20 | 21 | gen PingReceiver 22 | gen polygonzkevmbridge 23 | gen erc20permitmock 24 | compilegen BridgeMessageReceiver -------------------------------------------------------------------------------- /test/operations/interfaces.go: -------------------------------------------------------------------------------- 1 | package operations 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/bridgectrl/pb" 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 8 | "github.com/ethereum/go-ethereum/common" 9 | ) 10 | 11 | // StorageInterface is a storage interface. 12 | type StorageInterface interface { 13 | GetLastBlock(ctx context.Context, networkID uint32, dbTx interface{}) (*etherman.Block, error) 14 | GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint32, dbTx interface{}) (*etherman.GlobalExitRoot, error) 15 | GetLatestL1SyncedExitRoot(ctx context.Context, dbTx interface{}) (*etherman.GlobalExitRoot, error) 16 | GetLatestTrustedExitRoot(ctx context.Context, networkID uint32, dbTx interface{}) (*etherman.GlobalExitRoot, error) 17 | GetTokenWrapped(ctx context.Context, originalNetwork uint32, originalTokenAddress common.Address, dbTx interface{}) (*etherman.TokenWrapped, error) 18 | GetDepositCountByRoot(ctx context.Context, root []byte, network uint32, dbTx interface{}) (uint32, error) 19 | UpdateBlocksForTesting(ctx context.Context, networkID uint32, blockNum uint64, dbTx interface{}) error 20 | GetClaim(ctx context.Context, depositCount, origNetworkID, networkID uint32, dbTx interface{}) (*etherman.Claim, error) 21 | GetClaims(ctx context.Context, destAddr string, limit uint32, offset uint32, dbTx interface{}) ([]*etherman.Claim, error) 22 | UpdateDepositsStatusForTesting(ctx context.Context, dbTx interface{}) error 23 | GetLatestMonitoredTxGroupID(ctx context.Context, dbTx interface{}) (uint64, error) 24 | // synchronizer 25 | AddBlock(ctx context.Context, block *etherman.Block, dbTx interface{}) (uint64, error) 26 | AddGlobalExitRoot(ctx context.Context, exitRoot *etherman.GlobalExitRoot, dbTx interface{}) error 27 | AddTrustedGlobalExitRoot(ctx context.Context, trustedExitRoot *etherman.GlobalExitRoot, dbTx interface{}) (bool, error) 28 | AddDeposit(ctx context.Context, deposit *etherman.Deposit, dbTx interface{}) (uint64, error) 29 | AddClaim(ctx context.Context, claim *etherman.Claim, dbTx interface{}) error 30 | AddTokenWrapped(ctx context.Context, tokenWrapped *etherman.TokenWrapped, dbTx interface{}) error 31 | // atomic 32 | Rollback(ctx context.Context, dbTx interface{}) error 33 | BeginDBTransaction(ctx context.Context) (interface{}, error) 34 | Commit(ctx context.Context, dbTx interface{}) error 35 | } 36 | 37 | // BridgeServiceInterface is an interface for the bridge service. 38 | type BridgeServiceInterface interface { 39 | GetBridges(ctx context.Context, req *pb.GetBridgesRequest) (*pb.GetBridgesResponse, error) 40 | GetClaims(ctx context.Context, req *pb.GetClaimsRequest) (*pb.GetClaimsResponse, error) 41 | GetProof(ctx context.Context, req *pb.GetProofRequest) (*pb.GetProofResponse, error) 42 | GetProofByGER(ctx context.Context, req *pb.GetProofByGERRequest) (*pb.GetProofResponse, error) 43 | } 44 | -------------------------------------------------------------------------------- /test/operations/mockserver.go: -------------------------------------------------------------------------------- 1 | package operations 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/bridgectrl" 8 | "github.com/0xPolygonHermez/zkevm-bridge-service/db/pgstorage" 9 | "github.com/0xPolygonHermez/zkevm-bridge-service/server" 10 | ) 11 | 12 | // RunMockServer runs mock server 13 | func RunMockServer(ctx context.Context, dbType string, height uint8, networks []uint32) (*bridgectrl.BridgeController, StorageInterface, error) { 14 | if dbType != "postgres" { 15 | return nil, nil, fmt.Errorf("not registered database") 16 | } 17 | 18 | dbCfg := pgstorage.NewConfigFromEnv() 19 | err := pgstorage.InitOrReset(ctx, dbCfg) 20 | if err != nil { 21 | return nil, nil, err 22 | } 23 | store, err := pgstorage.NewPostgresStorage(ctx, dbCfg) 24 | if err != nil { 25 | return nil, nil, err 26 | } 27 | 28 | btCfg := bridgectrl.Config{ 29 | Height: height, 30 | } 31 | bt, err := bridgectrl.NewBridgeController(ctx, btCfg, networks, store) 32 | if err != nil { 33 | return nil, nil, err 34 | } 35 | 36 | cfg := server.Config{ 37 | GRPCPort: "9090", 38 | HTTPPort: "8080", 39 | CacheSize: 100000, //nolint:mnd 40 | DefaultPageLimit: 25, //nolint:mnd 41 | MaxPageLimit: 100, //nolint:mnd 42 | BridgeVersion: "v1", 43 | } 44 | bridgeService := server.NewBridgeService(cfg, btCfg.Height, networks, store) 45 | return bt, store, server.RunServer(ctx, cfg, bridgeService) 46 | } 47 | -------------------------------------------------------------------------------- /test/scripts/claim/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 8 | clientUtils "github.com/0xPolygonHermez/zkevm-bridge-service/test/client" 9 | "github.com/0xPolygonHermez/zkevm-bridge-service/utils" 10 | "github.com/ethereum/go-ethereum/common" 11 | ) 12 | 13 | const ( 14 | l2BridgeAddr = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" 15 | 16 | l2AccHexAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 17 | l2AccHexPrivateKey = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" 18 | l2NetworkURL = "http://localhost:8123" 19 | bridgeURL = "http://localhost:8080" 20 | 21 | mtHeight = 32 22 | ) 23 | 24 | func main() { 25 | ctx := context.Background() 26 | c, err := utils.NewClient(ctx, l2NetworkURL, common.HexToAddress(l2BridgeAddr)) 27 | if err != nil { 28 | log.Fatal("Error: ", err) 29 | } 30 | auth, err := c.GetSigner(ctx, l2AccHexPrivateKey) 31 | if err != nil { 32 | log.Fatal("Error: ", err) 33 | } 34 | 35 | // Get Claim data 36 | cfg := clientUtils.Config{ 37 | L1NodeURL: l2NetworkURL, 38 | L2NodeURL: l2NetworkURL, 39 | BridgeURL: bridgeURL, 40 | L2BridgeAddr: common.HexToAddress(l2BridgeAddr), 41 | } 42 | client, err := clientUtils.NewClient(ctx, cfg) 43 | if err != nil { 44 | log.Fatal("Error: ", err) 45 | } 46 | deposits, _, err := client.GetBridges(l2AccHexAddress, 0, 10) //nolint 47 | if err != nil { 48 | log.Fatal("Error: ", err) 49 | } 50 | bridgeData := deposits[0] 51 | proof, err := client.GetMerkleProof(deposits[0].NetworkId, deposits[0].DepositCnt) 52 | if err != nil { 53 | log.Fatal("error: ", err) 54 | } 55 | log.Debug("bridge: ", bridgeData) 56 | log.Debug("mainnetExitRoot: ", proof.MainExitRoot) 57 | log.Debug("rollupExitRoot: ", proof.RollupExitRoot) 58 | 59 | var smtProof, smtRollupProof [mtHeight][32]byte 60 | for i := 0; i < len(proof.MerkleProof); i++ { 61 | log.Debug("smtProof: ", proof.MerkleProof[i]) 62 | smtProof[i] = common.HexToHash(proof.MerkleProof[i]) 63 | log.Debug("smtRollupProof: ", proof.RollupMerkleProof[i]) 64 | smtRollupProof[i] = common.HexToHash(proof.RollupMerkleProof[i]) 65 | } 66 | globalExitRoot := ðerman.GlobalExitRoot{ 67 | ExitRoots: []common.Hash{common.HexToHash(proof.MainExitRoot), common.HexToHash(proof.RollupExitRoot)}, 68 | } 69 | log.Info("Sending claim tx...") 70 | err = c.SendClaim(ctx, bridgeData, smtProof, smtRollupProof, globalExitRoot, auth) 71 | if err != nil { 72 | log.Fatal("error: ", err) 73 | } 74 | log.Info("Success!") 75 | balance, err := c.BalanceAt(ctx, common.HexToAddress(l2AccHexAddress), nil) 76 | if err != nil { 77 | log.Fatal("error getting balance: ", err) 78 | } 79 | log.Info("L2 balance: ", balance) 80 | } 81 | -------------------------------------------------------------------------------- /test/scripts/deploytool/README.MD: -------------------------------------------------------------------------------- 1 | go run main.go deployClaimCompressor --url "http://localhost:8123" --bridgeAddress "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" --walletFile ../../test.keystore.claimtx --password "testonly" 2 | 3 | go run main.go sendETH --url "http://localhost:8123" --destAddress "0x70997970c51812dc3a010c7d01b50e0d17dc79c8" --walletFile ../../test.keystore.claimtx --password "testonly" 4 | 5 | go run main.go deploySovereignChainSMC --url "http://localhost:8123" --walletFile ../../test.keystore.aggregator --password "testonly" 6 | -------------------------------------------------------------------------------- /test/scripts/deposit/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "math/big" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 8 | "github.com/0xPolygonHermez/zkevm-bridge-service/utils" 9 | "github.com/ethereum/go-ethereum/common" 10 | ) 11 | 12 | const ( 13 | l1BridgeAddr = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" 14 | 15 | l1AccHexAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 16 | l1AccHexPrivateKey = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" 17 | l1NetworkURL = "http://localhost:8545" 18 | 19 | funds = 90000000000000000 // nolint 20 | destNetwork uint32 = 1 21 | ) 22 | 23 | var tokenAddr = common.Address{} 24 | 25 | func main() { 26 | ctx := context.Background() 27 | client, err := utils.NewClient(ctx, l1NetworkURL, common.HexToAddress(l1BridgeAddr)) 28 | if err != nil { 29 | log.Fatal("Error: ", err) 30 | } 31 | auth, err := client.GetSigner(ctx, l1AccHexPrivateKey) 32 | if err != nil { 33 | log.Fatal("Error: ", err) 34 | } 35 | amount := big.NewInt(funds) 36 | emptyAddr := common.Address{} 37 | if tokenAddr == emptyAddr { 38 | auth.Value = amount 39 | } 40 | destAddr := common.HexToAddress(l1AccHexAddress) 41 | log.Info("Sending bridge tx...") 42 | err = client.SendBridgeAsset(ctx, tokenAddr, amount, destNetwork, &destAddr, []byte{}, auth) 43 | if err != nil { 44 | log.Fatal("Error: ", err) 45 | } 46 | log.Info("Success!") 47 | } 48 | -------------------------------------------------------------------------------- /test/scripts/forcebatchproposal/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman/smartcontracts/polygonrollupmanager" 8 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman/smartcontracts/polygonzkevm" 9 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 10 | "github.com/0xPolygonHermez/zkevm-bridge-service/utils" 11 | "github.com/ethereum/go-ethereum/accounts/abi/bind" 12 | "github.com/ethereum/go-ethereum/common" 13 | ) 14 | 15 | const ( 16 | l1AccHexPrivateKey = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" 17 | 18 | l1NetworkURL = "http://localhost:8545" 19 | polygonZkEVMAddressHex = "0x8dAF17A20c9DBA35f005b6324F493785D239719d" 20 | polygonRollupManagerAddressHex = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" 21 | polTokenAddressHex = "0x5FbDB2315678afecb367f032d93F642f64180aa3" //nolint:gosec 22 | 23 | maxSequenceTimestamp = 1 24 | initSequencedBatch = 1 25 | ) 26 | 27 | func main() { 28 | ctx := context.Background() 29 | // Eth client 30 | log.Infof("Connecting to l1") 31 | client, err := utils.NewClient(ctx, l1NetworkURL, common.Address{}) 32 | if err != nil { 33 | log.Fatal("Error: ", err) 34 | } 35 | auth, err := client.GetSigner(ctx, l1AccHexPrivateKey) 36 | if err != nil { 37 | log.Fatal("Error: ", err) 38 | } 39 | polygonZkEVMAddress := common.HexToAddress(polygonZkEVMAddressHex) 40 | polygonZkEVM, err := polygonzkevm.NewPolygonzkevm(polygonZkEVMAddress, client) 41 | if err != nil { 42 | log.Fatal("Error: ", err) 43 | } 44 | polygonRollupManagerAddress := common.HexToAddress(polygonRollupManagerAddressHex) 45 | polygonRollupManager, err := polygonrollupmanager.NewPolygonrollupmanager(polygonRollupManagerAddress, client) 46 | if err != nil { 47 | log.Fatal("Error: ", err) 48 | } 49 | polAmount, err := polygonRollupManager.GetForcedBatchFee(&bind.CallOpts{Pending: false}) 50 | if err != nil { 51 | log.Fatal("Error getting collateral amount from smc: ", err) 52 | } 53 | err = client.ApproveERC20(ctx, common.HexToAddress(polTokenAddressHex), polygonZkEVMAddress, polAmount, auth) 54 | if err != nil { 55 | log.Fatal("Error approving pol: ", err) 56 | } 57 | tx, err := polygonZkEVM.SequenceBatches(auth, nil, maxSequenceTimestamp, initSequencedBatch, auth.From) 58 | if err != nil { 59 | log.Fatal("Error sending the batch: ", err) 60 | } 61 | 62 | // Wait eth transfer to be mined 63 | log.Infof("Waiting tx to be mined") 64 | const txETHTransferTimeout = 60 * time.Second 65 | err = utils.WaitTxToBeMined(ctx, client.Client, tx, txETHTransferTimeout) 66 | if err != nil { 67 | log.Fatal("Error: ", err) 68 | } 69 | log.Info("Batch succefully sent!") 70 | } 71 | -------------------------------------------------------------------------------- /test/scripts/initialClaim/Readme.md: -------------------------------------------------------------------------------- 1 | # InitialClaim script 2 | This script allows to create the claim tx and include it in a forcedBatch. This is require when the L2 network is empty and there are no funds in L2. 3 | Typically this action is used to include the claim tx to fill the bridge autoclaim wallet with ethers in L2 in order to allow the service send the claim txs for the users. 4 | 5 | ## Parameters 6 | At the beginning of the script there are the next constant variables that need to be reviewed. 7 | ``` 8 | l2BridgeAddr = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" 9 | zkevmAddr = "0x8dAF17A20c9DBA35f005b6324F493785D239719d" 10 | 11 | accHexAddress = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 12 | accHexPrivateKey = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" 13 | l1NetworkURL = "http://localhost:8545" 14 | l2NetworkURL = "http://localhost:8123" 15 | bridgeURL = "http://localhost:8080" 16 | ``` 17 | `l2BridgeAddr` is the bridge address smart contract in L2 18 | `zkevmAddr` is the polygonZkEvm address in L1 19 | `accHexAddress` is the wallet address used to send the claim in L2 and to send the forcedBatch in L1 20 | `accHexPrivateKey` is the wallet private key used to send the claim in L2 and to send the forcedBatch in L1 21 | `l1NetworkURL` is the url of the L1 rpc 22 | `l2NetworkURL` is the url of the L2 rpc 23 | `bridgeURL` is the url of the bridge service -------------------------------------------------------------------------------- /test/scripts/isClaimed/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 7 | "github.com/0xPolygonHermez/zkevm-bridge-service/utils" 8 | "github.com/ethereum/go-ethereum/accounts/abi/bind" 9 | "github.com/ethereum/go-ethereum/common" 10 | ) 11 | 12 | const ( 13 | bridgeAddr = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" 14 | 15 | networkURL = "http://localhost:8123" 16 | 17 | depositCnt = 585 18 | originalNetwork = 0 19 | ) 20 | 21 | func main() { 22 | ctx := context.Background() 23 | client, err := utils.NewClient(ctx, networkURL, common.HexToAddress(bridgeAddr)) 24 | if err != nil { 25 | log.Fatal("Error: ", err) 26 | } 27 | 28 | isClaimed, err := client.Bridge.IsClaimed(&bind.CallOpts{Pending: false}, depositCnt, originalNetwork) 29 | if err != nil { 30 | log.Fatal("error sending deposit. Error: ", err) 31 | } 32 | log.Info("IsCLaimed: ", isClaimed) 33 | } 34 | -------------------------------------------------------------------------------- /test/scripts/readLatestLER/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/0xPolygonHermez/zkevm-bridge-service/etherman/smartcontracts/polygonzkevmglobalexitroot" 5 | "github.com/0xPolygonHermez/zkevm-bridge-service/log" 6 | "github.com/ethereum/go-ethereum/accounts/abi/bind" 7 | "github.com/ethereum/go-ethereum/common" 8 | "github.com/ethereum/go-ethereum/ethclient" 9 | ) 10 | 11 | const ( 12 | gerManAddr = "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa" 13 | 14 | nodeURL = "http://localhost:8124" 15 | ) 16 | 17 | func main() { 18 | client, err := ethclient.Dial(nodeURL) 19 | if err != nil { 20 | log.Fatal("error conecting to the node. Error: ", err) 21 | } 22 | g, err := polygonzkevmglobalexitroot.NewPolygonzkevmglobalexitroot(common.HexToAddress(gerManAddr), client) 23 | if err != nil { 24 | log.Fatal("Error: ", err) 25 | } 26 | rollupExitRoot, err := g.LastRollupExitRoot(&bind.CallOpts{}) 27 | if err != nil { 28 | log.Fatal("Error: ", err) 29 | } 30 | // ger, err := g.GlobalExitRootMap(&bind.CallOpts{}) 31 | // if err != nil { 32 | // log.Fatal("Error: ", err) 33 | // } 34 | // log.Info("ger! ", common.BytesToAddress(ger[:])) 35 | log.Info("rollupExitRoot! ", common.BytesToHash(rollupExitRoot[:])) 36 | } 37 | -------------------------------------------------------------------------------- /test/test.keystore.aggregator: -------------------------------------------------------------------------------- 1 | {"version":3,"id":"71b028b6-9b1d-4f4c-9e66-31c94a6eb679","address":"70997970c51812dc3a010c7d01b50e0d17dc79c8","crypto":{"ciphertext":"985d5dc5f7750fc4ad0ad0d370486870016bb97e00ef1f7b146d6ad95d456861","cipherparams":{"iv":"f51b18b9f45872f71c3578513fca6cb0"},"cipher":"aes-128-ctr","kdf":"scrypt","kdfparams":{"dklen":32,"salt":"6253e2d8a71e4808dd11143329cfea467cabb37ac1e1e55dbc0dd90ff22524a7","n":8192,"r":8,"p":1},"mac":"922f741e84201fc7c17bbf9fae5dba6c04a2a99a7268998b5a0268aa690004be"}} -------------------------------------------------------------------------------- /test/test.keystore.aggregator-2: -------------------------------------------------------------------------------- 1 | {"address":"9965507d1a55bcc2695c58ba16fb37d819b0a4dc","crypto":{"cipher":"aes-128-ctr","ciphertext":"58c70f3dcd1082dde67139e35582e0f13ff8cdf29f99f85648fd44434ffaedd9","cipherparams":{"iv":"d80b9f93460e76fb1cc03c39fdb14ef5"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"91423984e31be2541fb757593041c8c95dac2624f09040ce4189d15da19449bc"},"mac":"dea5f893609c1a46d86713c9031d99ea531a3b44cb5da666432f677a8e3d3fd9"},"id":"57648c58-6979-447d-a262-ecff68a2a456","version":3} -------------------------------------------------------------------------------- /test/test.keystore.claimtx: -------------------------------------------------------------------------------- 1 | {"address":"f39fd6e51aad88f6f4ce6ab8827279cfffb92266","crypto":{"cipher":"aes-128-ctr","ciphertext":"d005030a7684f3adad2447cbb27f63039eec2224c451eaa445de0d90502b9f3d","cipherparams":{"iv":"dc07a54bc7e388efa89c34d42f2ebdb4"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"cf2ec55ecae11171de575112cfb16963570533a9c46fb774473ceb11519eb24a"},"mac":"3eb180d405a5da6e462b2adc00091c14856c91d574bf27348714506357d6e177"},"id":"035454db-6b6d-477f-8a79-ce24c10b185f","version":3} -------------------------------------------------------------------------------- /test/test.keystore.sequencer: -------------------------------------------------------------------------------- 1 | {"address":"f39fd6e51aad88f6f4ce6ab8827279cfffb92266","crypto":{"cipher":"aes-128-ctr","ciphertext":"d005030a7684f3adad2447cbb27f63039eec2224c451eaa445de0d90502b9f3d","cipherparams":{"iv":"dc07a54bc7e388efa89c34d42f2ebdb4"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"cf2ec55ecae11171de575112cfb16963570533a9c46fb774473ceb11519eb24a"},"mac":"3eb180d405a5da6e462b2adc00091c14856c91d574bf27348714506357d6e177"},"id":"035454db-6b6d-477f-8a79-ce24c10b185f","version":3} -------------------------------------------------------------------------------- /test/test.keystore.sequencer-2: -------------------------------------------------------------------------------- 1 | {"address":"15d34aaf54267db7d7c367839aaf71a00a2c6a65","crypto":{"cipher":"aes-128-ctr","ciphertext":"44ae2b38312474970669d8c0dd146056f72c0f6d94c7c4ad5d8aad3782d15d0b","cipherparams":{"iv":"668c72a8a0d75d36df682d07ef4fcdec"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"699fa8720a4d609cfa6bedb8fd35d3b8bab370d41419a856d593217168f0dc57"},"mac":"982606ec979042ab5fb755b6879ebefeef9c26ed855c04c0c6ea0b4195af4445"},"id":"786a9f61-dcdc-4e74-8f08-44807582dc9a","version":3} -------------------------------------------------------------------------------- /test/vectors/src/block-raw.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "blockNumber": 234, 4 | "blockHash": "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9fc", 5 | "parentHash": "", 6 | "NetworkID": 0 7 | }, 8 | { 9 | "blockNumber": 235, 10 | "blockHash": "0xf17265729f396905b38adaca163d68afcbbb24714860701264d4ba4ad93c0d05", 11 | "parentHash": "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9fc", 12 | "NetworkID": 0 13 | }, 14 | { 15 | "blockNumber": 236, 16 | "blockHash": "0x32566da09dbbc8e2639fb1b1f1dd91eae0751ae1e6976f435e9bd4e17def51cb", 17 | "parentHash": "0xf17265729f396905b38adaca163d68afcbbb24714860701264d4ba4ad93c0d05", 18 | "NetworkID": 0 19 | }, 20 | { 21 | "blockNumber": 237, 22 | "blockHash": "0x1b1c844f8fb44c18685dbb121642fa7fe6239b545399e3bd1ae6db6f8974f508", 23 | "parentHash": "0x32566da09dbbc8e2639fb1b1f1dd91eae0751ae1e6976f435e9bd4e17def51cb", 24 | "NetworkID": 0 25 | } 26 | ] -------------------------------------------------------------------------------- /test/vectors/src/claim-raw.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "index": 1, 4 | "origNetwork": 0, 5 | "token": "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D", 6 | "amount": "1000000000000000001", 7 | "destNetwork": 1000, 8 | "destAddress": "0xd51a44d3fae010294c616388b506acda1bfaae46", 9 | "blockNumber": 234 10 | }, 11 | { 12 | "index": 2, 13 | "origNetwork": 0, 14 | "token": "0xeB17ce701E9D92724AA2ABAdA7E4B28830597Dd9", 15 | "amount": "100000000000000000", 16 | "destNetwork": 1001, 17 | "destAddress": "0xabCcEd19d7f290B84608feC510bEe872CC8F5112", 18 | "blockNumber": 235 19 | }, 20 | { 21 | "index": 3, 22 | "origNetwork": 0, 23 | "token": "0x187Bd40226A7073b49163b1f6c2b73d8F2aa8478", 24 | "amount": "1000000000000000002", 25 | "destNetwork": 1000, 26 | "destAddress": "0xd51a44d3fae010294c616388b506acda1bfaae46", 27 | "blockNumber": 236 28 | }, 29 | { 30 | "index": 4, 31 | "origNetwork": 0, 32 | "token": "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D", 33 | "amount": "1000000000000000000", 34 | "destNetwork": 1000, 35 | "destAddress": "0xeB17ce701E9D92724AA2ABAdA7E4B28830597Dd9", 36 | "blockNumber": 237 37 | } 38 | ] -------------------------------------------------------------------------------- /test/vectors/src/deposit-raw.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "originNetwork": 0, 4 | "tokenAddress": "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D", 5 | "amount": "1000000000000000001", 6 | "destinationNetwork": 1000, 7 | "destinationAddress": "0xd51a44d3fae010294c616388b506acda1bfaae46", 8 | "metadata": "0x000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000005436f696e410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003434f410000000000000000000000000000000000000000000000000000000000", 9 | "leafValue": "17f1cd46e37b6751f90292ce5292a70eec9b41fc308a48c450ad613be005f289" 10 | }, 11 | { 12 | "originNetwork": 0, 13 | "tokenAddress": "0xeB17ce701E9D92724AA2ABAdA7E4B28830597Dd9", 14 | "amount": "100000000000000000", 15 | "destinationNetwork": 1001, 16 | "destinationAddress": "0xabCcEd19d7f290B84608feC510bEe872CC8F5112", 17 | "metadata": "0x000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000005436f696e410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003434f410000000000000000000000000000000000000000000000000000000000", 18 | "leafValue": "71a27713829ffa95c1090990e6bdd444a6ee784e217159eeef4511389849d9dd" 19 | }, 20 | { 21 | "originNetwork": 0, 22 | "tokenAddress": "0x187Bd40226A7073b49163b1f6c2b73d8F2aa8478", 23 | "amount": "1000000000000000002", 24 | "destinationNetwork": 1000, 25 | "destinationAddress": "0xd51a44d3fae010294c616388b506acda1bfaae46", 26 | "metadata": "0x000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000005436f696e410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003434f410000000000000000000000000000000000000000000000000000000000", 27 | "leafValue": "4e07659ee1149f8a5cc94fa89f4d2a3f606337fe9b8e748120632d495c7b9c6c" 28 | }, 29 | { 30 | "originNetwork": 0, 31 | "tokenAddress": "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D", 32 | "amount": "1000000000000000000", 33 | "destinationNetwork": 1000, 34 | "destinationAddress": "0xeB17ce701E9D92724AA2ABAdA7E4B28830597Dd9", 35 | "metadata": "0x000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000005436f696e410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003434f410000000000000000000000000000000000000000000000000000000000", 36 | "leafValue": "6613d41bf048126c53d04af0deac2d74f6610720c22c5e419010e95689d89594" 37 | } 38 | ] -------------------------------------------------------------------------------- /test/vectors/src/mt-bridge/leaf-vectors.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "originNetwork": 0, 4 | "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", 5 | "amount": "0x8ac7230489e80000", 6 | "destinationNetwork": 1, 7 | "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 8 | "metadata": "0x", 9 | "leafValue": "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5" 10 | }, 11 | { 12 | "originNetwork": 1, 13 | "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", 14 | "amount": "0x8ac7230489e80000", 15 | "destinationNetwork": 0, 16 | "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 17 | "metadata": "0x12345670", 18 | "leafValue": "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f" 19 | }, 20 | { 21 | "originNetwork": 0, 22 | "tokenAddress": "0x0000000000000000000000000000000000000000", 23 | "amount": "0x8ac7230489e80000", 24 | "destinationNetwork": 1, 25 | "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 26 | "metadata": "0x12345678", 27 | "leafValue": "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d" 28 | }, 29 | { 30 | "originNetwork": 10, 31 | "tokenAddress": "0x0000000000000000000000000000000000000000", 32 | "amount": "0x01", 33 | "destinationNetwork": 4, 34 | "destinationAddress": "0x0000000000000000000000000000000000000000", 35 | "metadata": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb922661234e51aad88F6F4ce6aB8827279cffFb92266", 36 | "leafValue": "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242" 37 | } 38 | ] -------------------------------------------------------------------------------- /test/vectors/src/mt-bridge/root-vectors.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "previousLeafsValues": [], 4 | "currentRoot": "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757", 5 | "newLeaf": { 6 | "originNetwork": 0, 7 | "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", 8 | "amount": "0x8ac7230489e80000", 9 | "destinationNetwork": 1, 10 | "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 11 | "currentLeafValue": "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", 12 | "metadata": "0x" 13 | }, 14 | "newRoot": "0xbf7ddbb59aa018a4c74e061f5172973ff09e4cb7f58405af117fc521f1ca46aa" 15 | }, 16 | { 17 | "previousLeafsValues": [ 18 | "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5" 19 | ], 20 | "currentRoot": "0xbf7ddbb59aa018a4c74e061f5172973ff09e4cb7f58405af117fc521f1ca46aa", 21 | "newLeaf": { 22 | "originNetwork": 1, 23 | "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", 24 | "amount": "0x8ac7230489e80000", 25 | "destinationNetwork": 0, 26 | "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 27 | "currentLeafValue": "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f", 28 | "metadata": "0x12345670" 29 | }, 30 | "newRoot": "0xa7042a3ce14f384bbff63f1cee6ee5579193c2d7002e0034854963322cda6128" 31 | }, 32 | { 33 | "previousLeafsValues": [ 34 | "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", 35 | "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f" 36 | ], 37 | "currentRoot": "0xa7042a3ce14f384bbff63f1cee6ee5579193c2d7002e0034854963322cda6128", 38 | "newLeaf": { 39 | "originNetwork": 0, 40 | "tokenAddress": "0x0000000000000000000000000000000000000000", 41 | "amount": "0x8ac7230489e80000", 42 | "destinationNetwork": 1, 43 | "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", 44 | "currentLeafValue": "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d", 45 | "metadata": "0x12345678" 46 | }, 47 | "newRoot": "0x88e652896cb1de5962a0173a222059f51e6b943a2ba6dfc9acbff051ceb1abb5" 48 | }, 49 | { 50 | "previousLeafsValues": [ 51 | "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", 52 | "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f", 53 | "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d" 54 | ], 55 | "currentRoot": "0x88e652896cb1de5962a0173a222059f51e6b943a2ba6dfc9acbff051ceb1abb5", 56 | "newLeaf": { 57 | "originNetwork": 10, 58 | "tokenAddress": "0x0000000000000000000000000000000000000000", 59 | "amount": "0x01", 60 | "destinationNetwork": 4, 61 | "destinationAddress": "0x0000000000000000000000000000000000000000", 62 | "currentLeafValue": "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242", 63 | "metadata": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb922661234e51aad88F6F4ce6aB8827279cffFb92266" 64 | }, 65 | "newRoot": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" 66 | } 67 | ] -------------------------------------------------------------------------------- /test/vectors/vectors.go: -------------------------------------------------------------------------------- 1 | package vectors 2 | 3 | // DepositVectorRaw represents the deposit vector 4 | type DepositVectorRaw struct { 5 | OriginalNetwork uint32 `json:"originNetwork"` 6 | TokenAddress string `json:"tokenAddress"` 7 | Amount string `json:"amount"` 8 | DestinationNetwork uint32 `json:"destinationNetwork"` 9 | DestinationAddress string `json:"destinationAddress"` 10 | ExpectedHash string `json:"leafValue"` 11 | CurrentHash string `json:"currentLeafValue"` 12 | Metadata string `json:"metadata"` 13 | } 14 | 15 | // MTRootVectorRaw represents the root of Merkle Tree 16 | type MTRootVectorRaw struct { 17 | ExistingLeaves []string `json:"previousLeafsValues"` 18 | CurrentRoot string `json:"currentRoot"` 19 | NewLeaf DepositVectorRaw `json:"newLeaf"` 20 | NewRoot string `json:"newRoot"` 21 | } 22 | 23 | // MTClaimVectorRaw represents the merkle proof 24 | type MTClaimVectorRaw struct { 25 | Deposits []DepositVectorRaw `json:"leafs"` 26 | Index uint32 `json:"index"` 27 | MerkleProof []string `json:"proof"` 28 | ExpectedRoot string `json:"root"` 29 | } 30 | 31 | // ClaimVectorRaw represents the claim vector 32 | type ClaimVectorRaw struct { 33 | Index uint32 `json:"index"` 34 | OriginalNetwork uint32 `json:"originNetwork"` 35 | Token string `json:"token"` 36 | Amount string `json:"amount"` 37 | DestinationNetwork uint32 `json:"destNetwork"` 38 | DestinationAddress string `json:"destAddress"` 39 | BlockNumber uint64 `json:"blockNumber"` 40 | } 41 | 42 | // BlockVectorRaw represents the block vector 43 | type BlockVectorRaw struct { 44 | BlockNumber uint64 `json:"blockNumber"` 45 | BlockHash string `json:"blockHash"` 46 | ParentHash string `json:"parentHash"` 47 | NetworkID uint32 `json:"networkID"` 48 | } 49 | -------------------------------------------------------------------------------- /third_party/google/api/annotations.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | syntax = "proto3"; 16 | 17 | package google.api; 18 | 19 | import "google/api/http.proto"; 20 | import "google/protobuf/descriptor.proto"; 21 | 22 | option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; 23 | option java_multiple_files = true; 24 | option java_outer_classname = "AnnotationsProto"; 25 | option java_package = "com.google.api"; 26 | option objc_class_prefix = "GAPI"; 27 | 28 | extend google.protobuf.MethodOptions { 29 | // See `HttpRule`. 30 | HttpRule http = 72295728; 31 | } -------------------------------------------------------------------------------- /utils/gerror/error.go: -------------------------------------------------------------------------------- 1 | package gerror 2 | 3 | import "errors" 4 | 5 | var ( 6 | // ErrStorageNotFound is used when the object is not found in the storage 7 | ErrStorageNotFound = errors.New("not found in the Storage") 8 | // ErrStorageNotRegister is used when the object is not found in the synchronizer 9 | ErrStorageNotRegister = errors.New("not registered storage") 10 | // ErrNilDBTransaction indicates the db transaction has not been properly initialized 11 | ErrNilDBTransaction = errors.New("database transaction not properly initialized") 12 | // ErrRestServerHealth indicates the health check of rest server failed 13 | ErrRestServerHealth = errors.New("not ready for the rest server") 14 | // ErrDepositNotSynced is used when the deposit is not synchronized in nodes 15 | ErrDepositNotSynced = errors.New("not synchronized deposit") 16 | // ErrNetworkNotRegister is used when the networkID is not registered in the bridge 17 | ErrNetworkNotRegister = errors.New("not registered network") 18 | ) 19 | -------------------------------------------------------------------------------- /utils/helpers.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "crypto/sha256" 5 | "math/rand" 6 | ) 7 | 8 | func generateRandomString(length int) string { 9 | const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" 10 | b := make([]byte, length) 11 | for i := range b { 12 | b[i] = charset[rand.Intn(len(charset))] //nolint:gosec 13 | } 14 | return string(b) 15 | } 16 | 17 | // GenerateRandomHash generates a random hash. 18 | func GenerateRandomHash() [sha256.Size]byte { 19 | rs := generateRandomString(10) //nolint:mnd 20 | return sha256.Sum256([]byte(rs)) 21 | } 22 | -------------------------------------------------------------------------------- /utils/time_provider.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // TimeProvider is a interface for classes that needs time and we want to be able to unittest it 8 | type TimeProvider interface { 9 | // Now returns current time 10 | Now() time.Time 11 | } 12 | 13 | // TimeProviderSystemLocalTime is the default implementation of TimeProvider 14 | type TimeProviderSystemLocalTime struct{} 15 | 16 | func NewTimeProviderSystemLocalTime() *TimeProviderSystemLocalTime { 17 | return &TimeProviderSystemLocalTime{} 18 | } 19 | 20 | // Now returns current time 21 | func (d TimeProviderSystemLocalTime) Now() time.Time { 22 | return time.Now() 23 | } 24 | 25 | // TimeProviderFixedTime is a implementation that always returns the same time 26 | // that is useful for testing 27 | type TimeProviderFixedTime struct { 28 | FixedTime time.Time 29 | } 30 | 31 | // Now returns current time 32 | func (d TimeProviderFixedTime) Now() time.Time { 33 | return d.FixedTime 34 | } 35 | -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package zkevmbridgeservice 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "runtime" 7 | ) 8 | 9 | // Populated during build, don't touch! 10 | var ( 11 | Version = "v0.1.0" 12 | GitRev = "undefined" 13 | GitBranch = "undefined" 14 | BuildDate = "Fri, 17 Jun 1988 01:58:00 +0200" 15 | ) 16 | 17 | // PrintVersion prints version info into the provided io.Writer. 18 | func PrintVersion(w io.Writer) { 19 | _, _ = fmt.Fprintf(w, "Version: %s\n", Version) 20 | _, _ = fmt.Fprintf(w, "Git revision: %s\n", GitRev) 21 | _, _ = fmt.Fprintf(w, "Git branch: %s\n", GitBranch) 22 | _, _ = fmt.Fprintf(w, "Go version: %s\n", runtime.Version()) 23 | _, _ = fmt.Fprintf(w, "Built: %s\n", BuildDate) 24 | _, _ = fmt.Fprintf(w, "OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) 25 | } 26 | -------------------------------------------------------------------------------- /version.mk: -------------------------------------------------------------------------------- 1 | VERSION := $(shell git describe --tags --always) 2 | GITREV := $(shell git rev-parse --short HEAD) 3 | GITBRANCH := $(shell git rev-parse --abbrev-ref HEAD) 4 | DATE := $(shell LANG=US date +"%a, %d %b %Y %X %z") 5 | --------------------------------------------------------------------------------