├── .fluence ├── aqua │ ├── deals.aqua │ ├── hosts.aqua │ └── services.aqua └── workers.yaml ├── .github ├── actionlint.yaml ├── release-please │ ├── config.json │ └── manifest.json ├── renovate.json └── workflows │ ├── lint-pr.yml │ ├── lint.yml │ ├── release.yml │ ├── run-tests.yml │ └── tests.yml ├── .gitignore ├── .vscode └── extensions.json ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── fluence-lock.yaml ├── fluence.yaml ├── gateway ├── README.md ├── aqua │ ├── balancer.aqua │ ├── counter.aqua │ ├── eth_rpc.aqua │ ├── logger.aqua │ ├── provider.aqua │ ├── quorum.aqua │ ├── random.aqua │ ├── rpc.aqua │ └── utils.aqua ├── configs │ └── quickstart_config.json ├── package-lock.json ├── package.json ├── src │ ├── arguments.js │ ├── config.js │ ├── index.js │ └── methods.js └── web3run.js ├── images └── metamask_tx_prompt.png ├── jest.config.js ├── package-lock.json ├── package.json ├── rust-toolchain.toml ├── src └── aqua │ └── main.aqua ├── test ├── config.ts ├── env.ts ├── fRPC.integration-test.ts ├── tsconfig.json └── utils.ts ├── tsconfig.json └── wasm-modules ├── curl-adapter ├── Cargo.toml ├── module.yaml └── src │ └── main.rs ├── eth-rpc ├── Cargo.toml ├── module.yaml └── src │ ├── curl_transport.rs │ ├── eth_call.rs │ ├── main.rs │ ├── typed.rs │ └── values.rs └── service.yaml /.fluence/aqua/deals.aqua: -------------------------------------------------------------------------------- 1 | aqua Deals declares * 2 | 3 | data Deal: 4 | definition: string 5 | timestamp: string 6 | dealIdOriginal: string 7 | dealId: string 8 | chainNetwork: string 9 | chainNetworkId: u64 10 | 11 | data Deals: 12 | defaultWorker: ?Deal 13 | 14 | func get() -> Deals: 15 | <- Deals( 16 | defaultWorker=?[Deal( 17 | definition="bafkreifncwgw7vgktf7j7qectsinrkhbhxjwfdvemmrshdsgi2ydelpvs4", 18 | timestamp="2023-09-27T12:11:50.777Z", 19 | dealIdOriginal="0x8d9C53312f0A4ad1Ba280ac197D371a33627E3bE", 20 | dealId="8d9c53312f0a4ad1ba280ac197d371a33627e3be", 21 | chainNetwork="testnet", 22 | chainNetworkId=80001 23 | )] 24 | ) 25 | -------------------------------------------------------------------------------- /.fluence/aqua/hosts.aqua: -------------------------------------------------------------------------------- 1 | aqua Hosts declares * 2 | 3 | data SpellLocation: 4 | hostId: string 5 | spellId: string 6 | workerId: string 7 | 8 | data Host: 9 | definition: string 10 | installationSpells: []SpellLocation 11 | relayId: string 12 | timestamp: string 13 | dummyDealId: string 14 | 15 | data Hosts: 16 | defaultWorker: ?Host 17 | 18 | func get() -> Hosts: 19 | <- Hosts( 20 | defaultWorker=nil 21 | ) 22 | -------------------------------------------------------------------------------- /.fluence/aqua/services.aqua: -------------------------------------------------------------------------------- 1 | data BytesValue: 2 | value: []u8 3 | success: bool 4 | error: string 5 | 6 | data JsonString: 7 | value: string 8 | success: bool 9 | error: string 10 | 11 | data U64Value: 12 | value: u64 13 | success: bool 14 | error: string 15 | 16 | service EthRpc("eth_rpc"): 17 | accounts(uri: string) -> []JsonString 18 | block_number(uri: string) -> U64Value 19 | call(uri: string, req: string, block: u64) -> BytesValue 20 | call_get_accounts(uri: string) -> [][]u8 21 | eth_call(uri: string, method: string, json_args: []string) -> JsonString 22 | -------------------------------------------------------------------------------- /.fluence/workers.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=schemas/workers.json 2 | 3 | # A result of app deployment. This file is created automatically after successful deployment using `fluence workers deploy` command 4 | 5 | # Documentation: https://github.com/fluencelabs/cli/tree/main/docs/configs/workers.md 6 | 7 | version: 0 8 | 9 | deals: 10 | defaultWorker: 11 | definition: "bafkreifncwgw7vgktf7j7qectsinrkhbhxjwfdvemmrshdsgi2ydelpvs4" 12 | timestamp: "2023-09-27T12:11:50.777Z" 13 | dealIdOriginal: "0x8d9C53312f0A4ad1Ba280ac197D371a33627E3bE" 14 | dealId: "8d9c53312f0a4ad1ba280ac197d371a33627e3be" 15 | chainNetwork: "testnet" 16 | chainNetworkId: 8000 17 | -------------------------------------------------------------------------------- /.github/actionlint.yaml: -------------------------------------------------------------------------------- 1 | self-hosted-runner: 2 | labels: 3 | - builder 4 | -------------------------------------------------------------------------------- /.github/release-please/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "bootstrap-sha": "36767706f357c7c650b2a610a06cff6c9ebe2d07", 3 | "release-type": "simple", 4 | "bump-minor-pre-major": true, 5 | "bump-patch-for-minor-pre-major": true, 6 | "packages": { 7 | ".": {} 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /.github/release-please/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | ".": "0.0.18" 3 | } 4 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ], 6 | "enabledManagers": ["cargo", "npm", "github-actions"], 7 | "rangeStrategy": "pin", 8 | "packageRules": [ 9 | { 10 | "matchDepTypes": ["devDependencies"], 11 | "prPriority": -1 12 | }, 13 | { 14 | "matchUpdateTypes": ["major"], 15 | "prConcurrentLimit": 1 16 | }, 17 | { 18 | "matchManagers": ["github-actions"], 19 | "prPriority": 1 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /.github/workflows/lint-pr.yml: -------------------------------------------------------------------------------- 1 | name: lint PR 2 | 3 | on: 4 | pull_request: 5 | types: 6 | - opened 7 | - edited 8 | - synchronize 9 | 10 | concurrency: 11 | group: "${{ github.workflow }}-${{ github.ref }}" 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | title: 16 | name: Validate PR title 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: amannn/action-semantic-pull-request@v5 20 | env: 21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 22 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - ".github/workflows/**" 7 | - ".github/renovate.json" 8 | 9 | concurrency: 10 | group: "${{ github.workflow }}-${{ github.ref }}" 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | reviewdog: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | 20 | - name: Lint actions 21 | uses: reviewdog/action-actionlint@v1 22 | env: 23 | SHELLCHECK_OPTS: "-e SC2086 -e SC2207 -e SC2128" 24 | with: 25 | reporter: github-pr-check 26 | fail_on_error: true 27 | 28 | renovate: 29 | runs-on: ubuntu-latest 30 | steps: 31 | - name: Checkout 32 | uses: actions/checkout@v4 33 | 34 | - name: Renovate Config Validator 35 | uses: tj-actions/renovate-config-validator@v2 36 | with: 37 | config_file: .github/renovate.json 38 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: "release-please" 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | 8 | concurrency: 9 | group: "${{ github.workflow }}-${{ github.ref }}" 10 | 11 | jobs: 12 | release-please: 13 | runs-on: ubuntu-latest 14 | 15 | outputs: 16 | release-created: ${{ steps.release.outputs['release_created'] }} 17 | tag-name: ${{ steps.release.outputs['tag_name'] }} 18 | version: ${{ steps.release.outputs['version'] }} 19 | pr: ${{ steps.release.outputs['pr'] }} 20 | 21 | steps: 22 | - name: Run release-please 23 | id: release 24 | uses: google-github-actions/release-please-action@v4 25 | with: 26 | token: ${{ secrets.FLUENCEBOT_RELEASE_PLEASE_PAT }} 27 | command: manifest 28 | config-file: .github/release-please/config.json 29 | manifest-file: .github/release-please/manifest.json 30 | 31 | - name: Show output from release-please 32 | if: steps.release.outputs.releases_created 33 | env: 34 | RELEASE_PLEASE_OUTPUT: ${{ toJSON(steps.release.outputs) }} 35 | run: echo "${RELEASE_PLEASE_OUTPUT}" | jq 36 | 37 | bump-version: 38 | if: needs.release-please.outputs.pr != null 39 | runs-on: ubuntu-latest 40 | needs: 41 | - release-please 42 | 43 | permissions: 44 | contents: write 45 | 46 | steps: 47 | - name: Checkout 48 | uses: actions/checkout@v4 49 | with: 50 | ref: ${{ fromJson(needs.release-please.outputs.pr).headBranchName }} 51 | token: ${{ secrets.FLUENCEBOT_RELEASE_PLEASE_PAT }} 52 | 53 | - name: Get version 54 | id: version 55 | run: | 56 | version="$(jq -r '.[]' .github/release-please/manifest.json)" 57 | echo "version=${version}" >> $GITHUB_OUTPUT 58 | 59 | - name: Setup node 60 | uses: actions/setup-node@v4 61 | with: 62 | node-version: "18" 63 | registry-url: "https://registry.npmjs.org" 64 | cache: "npm" 65 | cache-dependency-path: "gateway/package-lock.json" 66 | 67 | - name: Set gateway version 68 | run: npm version ${{ steps.version.outputs.version }} 69 | working-directory: gateway 70 | 71 | - name: Commit version bump 72 | uses: stefanzweifel/git-auto-commit-action@v5 73 | with: 74 | commit_message: "chore: Bump version to ${{ steps.version.outputs.version }}" 75 | branch: ${{ fromJson(needs.release-please.outputs.pr).headBranchName }} 76 | commit_user_name: fluencebot 77 | commit_user_email: devops@fluence.one 78 | commit_author: fluencebot 79 | 80 | publish: 81 | if: needs.release-please.outputs.release-created 82 | runs-on: ubuntu-latest 83 | needs: release-please 84 | 85 | permissions: 86 | contents: write 87 | id-token: write 88 | 89 | steps: 90 | - name: Checkout 91 | uses: actions/checkout@v4 92 | with: 93 | token: ${{ secrets.FLUENCEBOT_RELEASE_PLEASE_PAT }} 94 | 95 | - name: Import secrets 96 | uses: hashicorp/vault-action@v2.8.0 97 | with: 98 | url: https://vault.fluence.dev 99 | path: jwt/github 100 | role: ci 101 | method: jwt 102 | jwtGithubAudience: "https://github.com/fluencelabs" 103 | jwtTtl: 300 104 | exportToken: false 105 | secrets: | 106 | kv/npmjs/fluencebot token | NODE_AUTH_TOKEN 107 | 108 | - name: Setup node 109 | uses: actions/setup-node@v4 110 | with: 111 | node-version: "18" 112 | registry-url: "https://registry.npmjs.org" 113 | cache-dependency-path: "gateway/package-lock.json" 114 | cache: "npm" 115 | 116 | - run: npm i 117 | working-directory: gateway 118 | 119 | - name: Publish gateway to NPM registry 120 | run: npm publish --access public --tag unstable 121 | working-directory: gateway 122 | 123 | slack: 124 | if: always() 125 | name: "Notify" 126 | runs-on: ubuntu-latest 127 | 128 | needs: 129 | - release-please 130 | - publish 131 | 132 | permissions: 133 | contents: read 134 | id-token: write 135 | 136 | steps: 137 | - uses: lwhiteley/dependent-jobs-result-check@v1 138 | id: status 139 | with: 140 | statuses: failure 141 | dependencies: ${{ toJSON(needs) }} 142 | 143 | - name: Log output 144 | run: | 145 | echo "statuses:" "${{ steps.status.outputs.statuses }}" 146 | echo "jobs:" "${{ steps.status.outputs.jobs }}" 147 | echo "found any?:" "${{ steps.status.outputs.found }}" 148 | 149 | - name: Import secrets 150 | uses: hashicorp/vault-action@v2.8.0 151 | with: 152 | url: https://vault.fluence.dev 153 | path: jwt/github 154 | role: ci 155 | method: jwt 156 | jwtGithubAudience: "https://github.com/fluencelabs" 157 | jwtTtl: 300 158 | exportToken: false 159 | secrets: | 160 | kv/slack/release-please webhook | SLACK_WEBHOOK_URL 161 | 162 | - uses: ravsamhq/notify-slack-action@v2 163 | if: steps.status.outputs.found == 'true' 164 | with: 165 | status: "failure" 166 | notification_title: "*{workflow}* has {status_message}" 167 | message_format: "${{ steps.status.outputs.jobs }} {status_message} in <{repo_url}|{repo}>" 168 | footer: "<{run_url}>" 169 | -------------------------------------------------------------------------------- /.github/workflows/run-tests.yml: -------------------------------------------------------------------------------- 1 | name: "test" 2 | 3 | on: 4 | pull_request: 5 | paths-ignore: 6 | - "**.md" 7 | - ".github/**" 8 | - "!.github/workflows/run-tests.yml" 9 | - "!.github/workflows/tests.yml" 10 | 11 | concurrency: 12 | group: "${{ github.workflow }}-${{ github.ref }}" 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | frpc: 17 | uses: ./.github/workflows/tests.yml 18 | with: 19 | ref: ${{ github.ref }} 20 | 21 | lints: 22 | runs-on: builder 23 | 24 | env: 25 | RUSTFLAGS: "-D warnings" 26 | 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@v4 30 | 31 | - name: Setup Rust toolchain 32 | uses: dsherret/rust-toolchain-file@v1 33 | 34 | - name: Setup cache 35 | uses: Swatinem/rust-cache@v2 36 | with: 37 | shared-key: frpc 38 | save-if: false 39 | 40 | - name: Run cargo check 41 | run: cargo check 42 | 43 | - name: Run cargo clippy 44 | uses: giraffate/clippy-action@v1 45 | with: 46 | reporter: github-pr-review 47 | clippy_flags: -Z unstable-options --all 48 | fail_on_error: true 49 | 50 | - name: Run cargo fmt 51 | uses: actions-rust-lang/rustfmt@v1 52 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: "Run tests with workflow_call" 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | ref: 7 | description: "git ref to checkout to" 8 | type: string 9 | default: "main" 10 | nox-image: 11 | description: "nox image tag" 12 | type: string 13 | default: "null" 14 | fcli-version: 15 | description: "fcli version to use" 16 | type: string 17 | # default: "unstable" 18 | default: "stage" 19 | js-client-version: 20 | description: "@fluencelabs/js-client version" 21 | type: string 22 | default: "null" 23 | fluence-env: 24 | description: "env to run tests against" 25 | type: string 26 | default: "local" 27 | 28 | jobs: 29 | tests: 30 | name: "Run tests" 31 | runs-on: builder 32 | 33 | env: 34 | FLUENCE_USER_DIR: "${{ github.workspace }}/tmp/.fluence" 35 | CI: true 36 | # FORCE_COLOR: true 37 | 38 | permissions: 39 | contents: read 40 | id-token: write 41 | 42 | steps: 43 | - name: Checkout repository 44 | uses: actions/checkout@v4 45 | with: 46 | repository: fluencelabs/frpc 47 | ref: ${{ inputs.ref }} 48 | 49 | - name: Import secrets 50 | uses: hashicorp/vault-action@v2.8.0 51 | with: 52 | url: https://vault.fluence.dev 53 | path: jwt/github 54 | role: ci 55 | method: jwt 56 | jwtGithubAudience: "https://github.com/fluencelabs" 57 | jwtTtl: 300 58 | secrets: | 59 | kv/hub.docker.com/fluencebot username | DOCKER_HUB_USERNAME ; 60 | kv/hub.docker.com/fluencebot password | DOCKER_HUB_PASSWORD ; 61 | kv/docker-registry/basicauth/ci username | DOCKER_USERNAME ; 62 | kv/docker-registry/basicauth/ci password | DOCKER_PASSWORD ; 63 | kv/npm-registry/basicauth/ci token | NODE_AUTH_TOKEN ; 64 | kv/ci/frpc chain_urls | RPC_PROVIDERS ; 65 | kv/ci/frpc private_key | FLUENCE_CHAIN_PRIVATE_KEY 66 | 67 | - name: Login to Docker Hub 68 | uses: docker/login-action@v3 69 | with: 70 | username: ${{ env.DOCKER_HUB_USERNAME }} 71 | password: ${{ env.DOCKER_HUB_PASSWORD }} 72 | 73 | - name: Login to private registry 74 | uses: docker/login-action@v3 75 | with: 76 | registry: docker.fluence.dev 77 | username: ${{ env.DOCKER_USERNAME }} 78 | password: ${{ env.DOCKER_PASSWORD }} 79 | 80 | - name: Setup Rust toolchain 81 | uses: dsherret/rust-toolchain-file@v1 82 | 83 | - name: Setup fcli 84 | uses: fluencelabs/setup-fluence@v1 85 | with: 86 | artifact: fcli 87 | version: ${{ inputs.fcli-version }} 88 | 89 | - name: Init local env with fcli 90 | run: fluence local init --no-input 91 | 92 | - name: Replace nox image in docker-compose 93 | if: inputs.nox-image != 'null' 94 | working-directory: .fluence 95 | run: | 96 | sed -i'' -e '/nox-/!b;n;s|image: fluencelabs/nox:.*$|image: ${{ inputs.nox-image }}|' docker-compose.yaml 97 | 98 | - name: Run local env 99 | run: fluence local up 100 | 101 | - name: Setup node with self-hosted registry 102 | uses: actions/setup-node@v4 103 | with: 104 | node-version: "18" 105 | registry-url: "https://npm.fluence.dev" 106 | cache: "npm" 107 | 108 | - name: Run npm i 109 | run: npm i 110 | 111 | - name: Run npm i in gateway 112 | working-directory: gateway 113 | run: npm i 114 | 115 | - name: Set js-client version 116 | if: inputs.js-client-version != 'null' 117 | uses: fluencelabs/github-actions/npm-set-dependency@main 118 | with: 119 | package: "@fluencelabs/js-client" 120 | version: "${{ inputs.js-client-version }}" 121 | working-directory: gateway 122 | 123 | - name: Run npm run build 124 | run: npm run build 125 | 126 | - name: Run tests 127 | env: 128 | FLUENCE_ENV: ${{ inputs.fluence-env }} 129 | run: npm run test -- -t deploy 130 | 131 | - name: Dump container logs 132 | if: always() 133 | uses: jwalton/gh-docker-logs@v2 134 | 135 | - name: Cleanup 136 | if: always() 137 | run: | 138 | fluence local down 139 | rm -rf tmp ${{ env.FLUENCE_USER_DIR }} 140 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .DS_Store 3 | **/node_modules 4 | **/target/ 5 | .repl_history 6 | .vscode/settings.json 7 | src/ts/src/aqua 8 | src/js/src/aqua 9 | aqua-compiled 10 | 11 | # recommended by Fluence Labs: 12 | .fluence/project-secrets.yaml 13 | .fluence/docker-compose.yaml 14 | .fluence/schemas 15 | .fluence/secrets 16 | .fluence/tmp 17 | 18 | # to avoid accidental publish of RPC URLs 19 | gateway/configs/quickstart_config.json 20 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "redhat.vscode-yaml", 4 | "FluenceLabs.aqua" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## [0.0.18](https://github.com/fluencelabs/fRPC-Substrate/compare/v0.0.17...v0.0.18) (2023-10-14) 4 | 5 | 6 | ### Bug Fixes 7 | 8 | * **deps:** update rust crate serde to v1.0.189 ([#114](https://github.com/fluencelabs/fRPC-Substrate/issues/114)) ([32b8972](https://github.com/fluencelabs/fRPC-Substrate/commit/32b8972a8f1137f742657e47871f82842e0db12c)) 9 | 10 | ## [0.0.17](https://github.com/fluencelabs/fRPC-Substrate/compare/v0.0.16...v0.0.17) (2023-10-11) 11 | 12 | 13 | ### Bug Fixes 14 | 15 | * **aqua:** fix compilation under fcli 0.8.x ([#96](https://github.com/fluencelabs/fRPC-Substrate/issues/96)) ([d88bfc1](https://github.com/fluencelabs/fRPC-Substrate/commit/d88bfc143ce294db5f10ee67b74447698e6e20c0)) 16 | * **deps:** pin rust crate marine-rs-sdk to =0.10.0 ([#93](https://github.com/fluencelabs/fRPC-Substrate/issues/93)) ([18502b9](https://github.com/fluencelabs/fRPC-Substrate/commit/18502b9ebf175ae9d827d80a27c6dde74eaa2408)) 17 | * **deps:** update dependency @fluencelabs/marine-worker to v0.3.3 ([#94](https://github.com/fluencelabs/fRPC-Substrate/issues/94)) ([41bec37](https://github.com/fluencelabs/fRPC-Substrate/commit/41bec3789c8d6846050c4e92bc7c8236ee3639eb)) 18 | * **deps:** update dependency web3 to v4.1.2 ([#92](https://github.com/fluencelabs/fRPC-Substrate/issues/92)) ([ead9943](https://github.com/fluencelabs/fRPC-Substrate/commit/ead994324f301f495e0ccae9a5f211666f99a775)) 19 | * **deps:** update rust crate serde_json to v1.0.107 ([#88](https://github.com/fluencelabs/fRPC-Substrate/issues/88)) ([e7b5cef](https://github.com/fluencelabs/fRPC-Substrate/commit/e7b5cefac867abc8b24fc85657212b131cb79f76)) 20 | * **deps:** update rust crate tokio to v1.33.0 ([#103](https://github.com/fluencelabs/fRPC-Substrate/issues/103)) ([bb41bb4](https://github.com/fluencelabs/fRPC-Substrate/commit/bb41bb4b49c987bd2bc94af015c16e35b13477ce)) 21 | * **quorum:** fix parsing quorum result ([#98](https://github.com/fluencelabs/fRPC-Substrate/issues/98)) ([34d6340](https://github.com/fluencelabs/fRPC-Substrate/commit/34d6340c5efc890ee334711154c0bd02959fe038)) 22 | * **readme:** Fix RPC links ([#108](https://github.com/fluencelabs/fRPC-Substrate/issues/108)) ([cf9f338](https://github.com/fluencelabs/fRPC-Substrate/commit/cf9f3384c6f07fcbc5253e120bfa5c149a4de30e)) 23 | * **readme:** Fix spelling ([#109](https://github.com/fluencelabs/fRPC-Substrate/issues/109)) ([110dc59](https://github.com/fluencelabs/fRPC-Substrate/commit/110dc595cd99af142ac50cbc537662c4d995a346)) 24 | 25 | ## [0.0.12](https://github.com/fluencelabs/fRPC-Substrate/compare/v0.0.11...v0.0.12) (2023-10-09) 26 | 27 | 28 | ### Bug Fixes 29 | 30 | * **aqua:** fix compilation under fcli 0.8.x ([#96](https://github.com/fluencelabs/fRPC-Substrate/issues/96)) ([d88bfc1](https://github.com/fluencelabs/fRPC-Substrate/commit/d88bfc143ce294db5f10ee67b74447698e6e20c0)) 31 | * **deps:** pin rust crate marine-rs-sdk to =0.10.0 ([#93](https://github.com/fluencelabs/fRPC-Substrate/issues/93)) ([18502b9](https://github.com/fluencelabs/fRPC-Substrate/commit/18502b9ebf175ae9d827d80a27c6dde74eaa2408)) 32 | * **deps:** update dependency @fluencelabs/marine-worker to v0.3.3 ([#94](https://github.com/fluencelabs/fRPC-Substrate/issues/94)) ([41bec37](https://github.com/fluencelabs/fRPC-Substrate/commit/41bec3789c8d6846050c4e92bc7c8236ee3639eb)) 33 | * **deps:** update dependency web3 to v4.1.2 ([#92](https://github.com/fluencelabs/fRPC-Substrate/issues/92)) ([ead9943](https://github.com/fluencelabs/fRPC-Substrate/commit/ead994324f301f495e0ccae9a5f211666f99a775)) 34 | * **deps:** update rust crate serde_json to v1.0.107 ([#88](https://github.com/fluencelabs/fRPC-Substrate/issues/88)) ([e7b5cef](https://github.com/fluencelabs/fRPC-Substrate/commit/e7b5cefac867abc8b24fc85657212b131cb79f76)) 35 | * **quorum:** fix parsing quorum result ([#98](https://github.com/fluencelabs/fRPC-Substrate/issues/98)) ([34d6340](https://github.com/fluencelabs/fRPC-Substrate/commit/34d6340c5efc890ee334711154c0bd02959fe038)) 36 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = [ "wasm-modules/eth-rpc", "wasm-modules/curl-adapter" ] 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hacking Decentralized RPC with Fluence 2 | 3 | ## Overview 4 | 5 | Running blockchain nodes to support your dApps' read and write requirements to/from a node tends to be rather resource intensive. Not surprisingly, Web3 developers have been flocking toward integrating their dApps with hosted blockchain JSON-RPC gateways. Alas, centralized "RPC as SaaS" introduces bottlenecks challenging the availability, reliability and Web3 ethos of dApps while quite often raising the exit barriers by providing custom API overlays to the EVM JSON-RPC API convention. 6 | 7 | To accelerate dApp developers' ability to utilize decentralized RPC in their dApps, Fluence is providing a decentralized RPC (fRPC) substrate, i.e., a starter kit that includes a gateway to bridge HTTP and Aqua, a Wasm service to connect to RPC endpoints and Aqua scripts implementing basic availability, failover and verification algorithms. See Figure 1. 8 | 9 | Figure 1: Stylized fRPC Workflow With dApp 10 | 11 | ```mermaid 12 | sequenceDiagram 13 | 14 | participant A as dApp 15 | participant G as fRPC Gateway 16 | participant N as Fluence network 17 | participant R as RPC endpoints 18 | 19 | A ->> G: dApp HTTP request 20 | G ->> G: map HTTP to Aqua request 21 | G ->> G: select algorithm (failover, round robin, quorum, etc.) 22 | G ->> N: Aqua call to network peer(s) 23 | N ->> R: HTTP call to RPC endpoint(s) 24 | R ->> N: Response or timeout 25 | alt response 26 | N ->> G: response to gateway 27 | G ->> A: response to dApp 28 | else timeout 29 | loop over endpoint urls 30 | N ->> R: try another request 31 | alt response 32 | N ->> G: response to gateway 33 | G ->> A: response to dApp -- break 34 | end 35 | end 36 | G ->> G: timeout 37 | G ->> A: no response error 38 | end 39 | ``` 40 | 41 | fRPC substrate allows existing dApps to be upgraded to decentralized RPC while not requiring any changes to their frontend other than changing the HTTP transport url and making it easy to implement more complex control algorithms. Moreover, fRPC substrate components are highly customizable allowing developers to quickly and easily extend the substrate to fit their dApps' needs and to improve the fRPC ecosystem with improved services and algorithms. 42 | 43 | ## Quickstart 44 | 45 | Clone the repo if you haven't done so already, and in the *gateway* directory, install the dependencies: 46 | 47 | ```bash 48 | npm i 49 | ``` 50 | 51 | If you don't have Fluence CLI installed, do: 52 | 53 | ```bash 54 | npm -g i @fluencelabs/cli@unstable 55 | ``` 56 | 57 | Before you proceed, you should have, say, three RPC endpoint urls, e.g., Infura, Alchemy and Ankr, for the same EVM-based chain you are using in your dApp. Update the `configs/quickstart_config.json` by providing your endpoint urls and ignore the rest of the parameters for now: 58 | 59 | ```json 60 | { 61 | "providers": [ 62 | "", // <- replace 63 | "", // <- replace 64 | "" // <- replace and maybe add more 65 | ], 66 | "mode": "round-robin", 67 | "relay": "/dns4/stage.fluence.dev/tcp/19002/wss/p2p/12D3KooWMigkP4jkVyufq5JnDJL6nXvyjeaDNpRfEZqQhsG3sYCU", 68 | "serviceId": "e9e32b0b-3b19-4bdd-b1da-f5ff9cc0357f", 69 | "port": 3000, 70 | "counterServiceId": null, 71 | "counterPeerId": null, 72 | "quorumServiceId": null, 73 | "quorumPeerId": null, 74 | "quorumNumber": null 75 | } 76 | ``` 77 | 78 | Now start the gateway: 79 | 80 | **Command**: 81 | 82 | ```bash 83 | npm -C gateway run run configs/quickstart_config.json 84 | ``` 85 | 86 | **Output**: 87 | 88 | ```bash 89 | > @fluencelabs/aqua-eth-gateway@0.0.11 run 90 | > fluence aqua -i aqua/ -o aqua-compiled/ --js && node src/index.js configs/my_quickstart_config.json 91 | 92 | # Compiling... 93 | Result /Users/bebo/localdev/fRPC-Substrate/gateway/aqua-compiled/rpc.js: compilation OK (10 functions, 4 services) 94 | Result /Users/bebo/localdev/fRPC-Substrate/gateway/aqua-compiled/rpc.d.ts: compilation OK (10 functions, 4 services) 95 | Result /Users/bebo/localdev/fRPC-Substrate/gateway/aqua-compiled/rpc.js: compilation OK (10 functions, 4 services) 96 | Result /Users/bebo/localdev/fRPC-Substrate/gateway/aqua-compiled/rpc.d.ts: compilation OK (10 functions, 4 services) 97 | 98 | Running server... 99 | Server was started on port 3000 100 | 101 | ``` 102 | 103 | With the gateway ready for action, all you have to do is change your dApps HTTP transport url to `http://127.0.0.1:3000` and keep using your dApp as usual. In the absence of a dApp, we can interact with the gateway from the command line: 104 | 105 | **Command**: 106 | 107 | ```bash 108 | curl http://127.0.0.1:3000 \ 109 | -X POST \ 110 | -H "Content-Type: application/json" \ 111 | -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params": [],"id":100}' 112 | ``` 113 | 114 | **Output**: 115 | 116 | ```bash 117 | {"jsonrpc":"2.0","id":100,"result":"0x82b950"} 118 | 119 | # with the corresponding gateway log output 120 | Receiving request 'eth_blockNumber' 121 | peerId: 12D3KooWKDnWpCLPJrycSevracdEgGznfDPwG1g5CWbt8uccdL79 122 | Counter: 1 123 | Worker used: "12D3KooWKPcNwR6EMq3sqm4sKtUKmZbMhPQ2dk1zr8YNgjdu9Xqn" 124 | Call will be to : https://eth-goerli.g.alchemy.com/v2/ 125 | ``` 126 | 127 | Since we have specified *round-robin* in our config file and have more than one endpoint url in play, re-running the json-rpc call should result in a different endpoint selection: 128 | 129 | **Command**: 130 | 131 | ```bash 132 | curl http://127.0.0.1:3000 \ 133 | -X POST \ 134 | -H "Content-Type: application/json" \ 135 | -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params": [],"id":100}' 136 | ``` 137 | 138 | **Output**: 139 | 140 | ```bash 141 | {"jsonrpc":"2.0","id":100,"result":"0x82b956"} 142 | 143 | # with the corresponding gateway log output 144 | 145 | Receiving request 'eth_blockNumber' 146 | peerId: 12D3KooWKDnWpCLPJrycSevracdEgGznfDPwG1g5CWbt8uccdL79 147 | Counter: 2 148 | Worker used: "12D3KooWKPcNwR6EMq3sqm4sKtUKmZbMhPQ2dk1zr8YNgjdu9Xqn" 149 | Call will be to : https://frequent-sleek-river.ethereum-goerli.discover.quiknode.pro// 150 | ``` 151 | 152 | Success! Go ahead and replace the `round-robin` mode with the `random` mode in your config file, stop and start the gateway and have a look at the different endpoint management. All available algorithms and their parameters are described in the [fRPC Algorithms](#fRPC-Algorithms) section below. 153 | 154 | Congrats, you just took a major step toward keeping your dApp decentralized, available and performant! Now it's time to dive into the Fluence protocol and technology stack to learn how to improve upon the basic substrate. 155 | 156 | ## Developing With Fluence 157 | 158 | Fluence's decentralized serverless protocol and solution stack allows developers to quickly create decentralized applications and protocols by distributing services for subsequent execution to peers of the open and permissionless Fluence peer-to-peer compute network. Specifically, developers: 159 | 160 | * express their business logic in Rust code compiled to wasm32-wasi 161 | * create a [Deal](https://fluence.dev/docs/build/glossary#deal), i.e., a construct that links on-chain contract economics and off-chain resources necessary for peers to run a service, which entails escrowing stablecoin, currently limited to (testnet) USDC, to the Deal contract 162 | * deploy their Wasm modules plus linking instructions as a uniquely addressable *service* to p2p network storage, i.e., IPFS 163 | 164 | With a Deal in place, resource owners, i.e., owner/operators of one or more peers, make a decision whether to host the service and if so, participate in the Deal by providing a stake to the Deal contract and pulling the corresponding service assets required for hosting from IPFS. As a matter of fact, peers utilize [Workers](https://fluence.dev/docs/build/glossary#worker), omitted from Figure 2 for simplicity reasons, to implement their side of a Deal. See Figure 2. 165 | 166 | ```mermaid 167 | 168 | sequenceDiagram 169 | title: Figure 2: Stylized Deal Creation For Service Deployment 170 | 171 | actor D as Developer 172 | participant CF as Contract factory 173 | participant C as Contract 174 | participant N as Network storage (IPFS) 175 | actor R as Resource owner 176 | participant P as Peer i owned by resource owner 177 | 178 | D ->> D: Business logic to Rust To Wasm 179 | D ->> CF: request deal contract for service 180 | CF ->> C: generate Deal contract for service 181 | par 182 | D ->> C: escrow funds 183 | D ->> N: upload service package 184 | end 185 | R ->> CF: listen for new contracts 186 | loop listen to on-chain events 187 | alt new contract 188 | R ->> C: evaluate deal 189 | alt like deal 190 | R ->> C: join deal with stake 191 | P ->> N: request service package 192 | P ->> P: host service 193 | P ->> P: wait for service request 194 | alt get request 195 | R ->> C: claim payment 196 | end 197 | end 198 | end 199 | end 200 | 201 | ``` 202 | 203 | While this sounds, and is, elaborate, *Fluence CLI*, see below, takes care of most of the scaffolding and workflow management for you. 204 | 205 | > At this point, the marketplace for Fluence's decentralized serverless isn't quite finished. The supply side has not been enabled and on the demand side, parameters are fixed for the testnet. That is, developers are not able to provide custom Deal parameters, such as willingness to pay for service execution. Instead, these parameters, i.e. price of execution per epoch and epoch duration, are hard-coded and used by Fluence CLI to create the corresponding Deal contract and transaction for you to sign. Moreover, economics are limited to the testnet using testnet tokens. 206 | 207 | ## Setting Up For Developing With Fluence 208 | 209 | To get going, you need to install and set up a few dependencies. 210 | 211 | > Fluence tooling works on most \*nix systems including OSX and Windows Linux Subsystem. At this time, Windows is not supported. 212 | 213 | ### Off-chain Dependencies 214 | 215 | * [node](https://nodejs.org/en/) 18 LTS 216 | * [Fluence CLI](https://github.com/fluencelabs/fluence-cli) 217 | * [Rust](https://www.rust-lang.org/tools/install) (optional; Fluence CLI will install if not already in your environment) 218 | * For VSCode, there is a helpful [Aqua language support](https://marketplace.visualstudio.com/items?itemName=FluenceLabs.aqua) package available 219 | 220 | > This Fluence CLI installs missing dependencies as needed ("lazy install"). If you want all your dependencies installed at once, use the `fluence dependencies i` command. 221 | 222 | ### On-chain Dependencies 223 | 224 | * Wallectconnect compatible wallet, e.g., MetaMask, set for and funded with: 225 | * [Mumbai testnet](https://chainlist.org/chain/80001) parameters 226 | * [Mumbai faucet](https://mumbaifaucet.com/) 227 | * [Fluence USDC testnet faucet](https://faucet.fluence.dev/) 228 | 229 | You will need Mumbai MATIC and Fluence (testnet) USDC. This is as good a time as any to head over to those faucets and get your allocations. As an experienced Web3 dev, you know it's good hygiene to set up a new account, say, fRPC-dev, for the Mumbai testnet and testnet tokens. 230 | 231 | ### RPC Endpoints 232 | 233 | Since fRPC works with existing centralized or self-hosted RPC providers, you want at least three provider urls with appended API keys to *the* chain of your choice. Multi-chain support is currently not supported by fRPC Substrate. For Ethereum's Goerli testnet, for example: 234 | 235 | * Infura: https://goerli.infura.io/v3/\/ 236 | * Alchemy: https://eth-goerli.g.alchemy.com/v2/\/ 237 | * Ankr: https://rpc.ankr.com/eth_goerli/\ 238 | 239 | Each of the listed providers has a free account option and supports the API key in the url style, rather than the header, which is the current gateway implementation choice; a choice you should feel free to override and customize to your needs. 240 | 241 | ### Tools And Tooling 242 | 243 | The most prominent developer's helper is [Fluence CLI](https://github.com/fluencelabs/fluence-cli), which allows you to manage the entire lifecycle of a project including Rust and Aqua code as well as Deals. From scaffolding your project, services and modules to Deal creation and service deployment, Fluence CLI has you covered. Moreover, Fluence CLI can scaffold JS projects using [js-client](https://github.com/fluencelabs/js-client) allowing you to create, or integrate, Fluence projects for the browser or node app. See Figure 3 for a quick overview of workflows managed by Fluence CLI and the associated commands. If you have Fluence CLI installed, use `fluence --help` to get a more complete overview of topics and commands. 244 | 245 | Figure 3: Stylized Project Creation And Deployment Workflow With Fluence CLI 246 | 247 | ```mermaid 248 | 249 | stateDiagram 250 | 251 | [*] --> InitProject: fluence init 252 | InitProject --> CreateNewService: fluence service new 253 | InitProject --> AddExistingService: fluence service add 254 | CreateNewService --> Service 255 | AddExistingService --> Service 256 | Service --> AddNewModules: fluence module new 257 | Service --> AddExistingModules: fluence module add 258 | Service --> LocalTesting: fluence service repl, cargo test 259 | Service --> DeployedService: fluence deal deploy 260 | DeployedService --> RunService: fluence run 261 | ``` 262 | 263 | Fluence CLI uses multiple *yaml* config files. You can find the schemas in the [schemas](./.fluence/schemas) directory. Note that Fluence CLI creates config files lazily, i.e., as needed. 264 | 265 | See [FLuence CLI](https://github.com/fluencelabs/fluence-cli) for more details. For implementing your business logic with Rust and compiling it to wasm32-wasi, aka Wasm, module(s), see the [Marine book](https://fluence.dev/docs/marine-book/introduction). To learn more about distributed choreography and composition of services, see the [Aqua book](https://fluence.dev/docs/aqua-book/introduction). 266 | 267 | ## Hacking On fRPC Substrate 268 | 269 | Fluence's *fRPC Substrate* is a starter kit that includes all the components you need to quickly enable your dApp with decentralized RPC using existing centralized RPC providers, e.g., Infura, Alchemy, Ankr, etc., without touching your existing frontend Web3 code. fRPC substrate consists of the following code components, see Figure 4: 270 | 271 | * RPC API "adapter" code written in Rust and compiled to wasm32-wasi modules that are deployable to any peer in the Fluence p2p network 272 | * Aqua code for distributed algorithms, such as Random and Round Robin selection, using the distributed Wasm connectors for request-response handling over libp2p 273 | * A gateway app server that bridges libp2p transport to the HTTP transport expected by your dApps' Web3 SDK, such as web3js, ethers, etc. Note that the expectation at this point is for you to *self-host* the gateway at a location of your choosing. 274 | 275 | Figure 4: Stylized fRPC Use With dApps 276 | 277 | ```mermaid 278 | sequenceDiagram 279 | 280 | participant D as dApp 281 | participant G as Gateway 282 | participant N as Fluence p2p network 283 | participant R as Centralized RPC providers 284 | 285 | G ->> G: Configure and start Gateway 286 | D ->> D: Use gateway Address:port in web3 sdk setup 287 | D ->> G: Make Web3 request 288 | G ->> N: Call one or more Fluence services 289 | N ->> R: Call one or more different RPC providers 290 | R ->> N: Services processes response based on specified algo 291 | N ->> G: Gateway receives "curated" response 292 | G ->> D: dApp receives response 293 | D ->> D: dApp does its thing 294 | ``` 295 | 296 | ### fRPC Wasm Components 297 | 298 | fRPC Substrate comes with one *service* comprised of two Wasm modules, which you can find in the [wasm-modules]("./wasm-modules/") directory. The service is called 'eth_rpc' and the included modules are a [curl_adapater]("./../wasm-modules/curl-adapter") and [eth_rpc]("./../wasm-modules/eth-rpc"). The *curl_adapter* module is a generic, re-usable module allowing access to a peer's curl binary, if permissioned by the peer, and exposes the *curl_request* function. Any modules requiring curl access may use the curl_adapter modules via [FFI linking](https://doc.rust-lang.org/nomicon/ffi.html) with the *curl_request* function. 299 | 300 | The *eth_rpc* module manages the json-rpc requests and responses initiated and consumed by Aqua scripts as the result of some frontend event, e.g. our dApp or curl request. Once available on peers of the Fluence p2p network, the *eth-rpc* services, aka RPC endpoint adapter, allows us to call one or more RPC endpoints using Aqua for choreography and composition of services. 301 | 302 | Before you can deploy your service, use `fluence build` in the root dir to compile each module's Rust code to wasm32-wasi output: 303 | 304 | **Command**: 305 | 306 | ```bash 307 | fluence build 308 | ``` 309 | 310 | **Output**: 311 | 312 | ```bash 313 | # Making sure all services are downloaded... 314 | # Making sure all services are built... 315 | Finished release [optimized] target(s) in 0.61s 316 | ``` 317 | 318 | See [target dir]("./target/wasm32-wasi/release") for *curl_adapter.wasm* and *eth_erpc.wasm*, respectively. With the wasm modules available, you can locally interact with them using [Marine REPL](https://crates.io/crates/mrepl): 319 | 320 | `fluence service repl` 321 | 322 | **Command**: 323 | 324 | ```bash 325 | fluence service repl 326 | ``` 327 | 328 | **Output**: 329 | 330 | ```bash 331 | ? Enter service name from fluence.yaml, path to a service or url to .tar.gz archive wasm-modules 332 | # Making sure service and modules are downloaded and built... 333 | Finished release [optimized] target(s) in 0.18s 334 | 335 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 336 | 337 | Execute help inside repl to see available commands. 338 | Current service is: eth_rpc 339 | Call eth_rpc service functions in repl like this: 340 | 341 | call eth_rpc [, ] 342 | 343 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 344 | 345 | Welcome to the Marine REPL (version 0.19.1) 346 | Minimal supported versions 347 | sdk: 0.6.0 348 | interface-types: 0.20.0 349 | 350 | app service was created with service id = f0fc66d9-1fc6-494f-bcc1-104970875730 351 | elapsed time 254.67135ms 352 | 353 | 1> i 354 | Loaded modules interface: 355 | exported data types (combined from all modules): 356 | data MountedBinaryResult: 357 | ret_code: i32 358 | error: string 359 | stdout: []u8 360 | stderr: []u8 361 | data U64Value: 362 | value: u64 363 | success: bool 364 | error: string 365 | data BytesValue: 366 | value: []u8 367 | success: bool 368 | error: string 369 | data JsonString: 370 | value: string 371 | success: bool 372 | error: string 373 | 374 | exported functions: 375 | curl_adapter: 376 | func curl_request(cmd: []string) -> MountedBinaryResult 377 | eth_rpc: 378 | func block_number(uri: string) -> U64Value 379 | func call_get_accounts(uri: string) -> [][]u8 380 | func accounts(uri: string) -> []JsonString 381 | func call(uri: string, req: string, block: u64) -> BytesValue 382 | func eth_call(uri: string, method: string, json_args: []string) -> JsonString 383 | 384 | 2>call eth_rpc eth_call ["https://.infura.io/v3/", "eth_blockNumber", []] 385 | result: { 386 | "error": "", 387 | "success": true, 388 | "value": "\"0x82a08d\"" 389 | } 390 | elapsed time: 588.092888ms 391 | 392 | 3> 393 | ``` 394 | 395 | The *i* command lists all the exported interfaces from the wasm modules in Aqua instead of Rust notation. In *exported* functions you see the module namespace, e.g., *curl_adapter*, and exported functions, e.g., *curl_request*. To execute a function, use `call []`. 396 | 397 | ### Adding Modules To A Service 398 | 399 | Regardless of your customization requirements, you probably will have no reason to modify the *curl_adapter* and *eth_rpc* modules. However, you may want to add new modules, or even services, to handle your additional business logic requirements. For example, you may want to capture RPC endpoint performance data, such as response times and availability, to some Web3 storage, e.g., IPFS or Ceramic, for further analysis to, say, derive a weighting scheme for endpoint selection. 400 | 401 | Fluence CLI allows you to quickly create a new, or add an existing, module to your project. For example, 402 | 403 | **Command**: 404 | 405 | ```bash 406 | fluence module new --path ./wasm-modules demo-module 407 | ``` 408 | 409 | **Output**: 410 | 411 | ```bash 412 | Successfully generated template for new module at demo-module 413 | ``` 414 | 415 | Which created a Rust project in the *wasm-module/demo-module* directory ready for you to customize. When you're done, you add the new module to your service config, service.yaml: 416 | 417 | **Command**: 418 | 419 | ```bash 420 | fluence module add 421 | ``` 422 | 423 | **Output**: 424 | 425 | ```bash 426 | ? Enter path to a module or url to .tar.gz archive wasm-modules/demo 427 | ? Enter service name from fluence.yaml or path to the service directory wasm-modules 428 | Added demo to ~/localdev/fRPC-Substrate/wasm-modules/service.yaml 429 | ``` 430 | 431 | The demo module is now part of the service and `fluence build`, for example, now compiles the *demo* module as part of the project build. You can create a new service with the `fluence service new` command. Note that the implication of creating a new service, possibly in a new project directory, is that you intend to deploy that service separately from the *eth-rpc* service. Of course, you will need to write Aqua code to be able to interact with your new module. 432 | 433 | To get rid of the demo project for now, use `fluence module remove` to unlink the module from the *fluence.yaml* and *service.yaml* files; the old *rm -r * gets rid of the code template. 434 | 435 | ### Deploying Services With A Deal 436 | 437 | > :warning: **Warning**: For quickstart, services are already deployed for you. But if you want to deploy services in this repository yourself, you should remove old deployment information first: 438 | > ```bash 439 | > mv .fluence/workers.yaml .fluence/workers.yaml.backup 440 | > ``` 441 | > `fluence deal deploy` is capable of redeploying services, so you don't have to do this manual management every time you want to (re)deploy deal. But one can't redeploy a deal he doesn't own, so you will get a error unless you are working to modify a Deal you created. 442 | 443 | 444 | With a service, in this case the *eth-rpc* service, ready for deployment, we simply use the `fluence deal deploy`: 445 | 446 | **Command**: 447 | 448 | ```bash 449 | fluence deal deploy 450 | ``` 451 | 452 | **Output**: 453 | 454 | ```bash 455 | Using kras environment to sign contracts 456 | Finished release [optimized] target(s) in 0.05s # (1) 457 | Connecting to kras relay: /dns4/7-kras.fluence.dev/tcp/9000/wss/p2p/12D3KooWDUszU2NeWyUVjCXhGEt1MoZrhvdmaQQwtZUriuGN1jTr 458 | Connected 459 | 460 | Creating deal for worker defaultWorker # (2) 461 | 462 | To approve transactions to your wallet using metamask, open the following url: # (3) 463 | 464 | https://cli-connector.fluence.dev/?wc=3df74b36a4459be644172d82e114297a65330ada4e77bc8afba67688064f033e%402&relay-protocol=irn&symKey=5b9b1773203cfe98c86bb8d611ff7945173430f5fb056fcd95f79919adbb0bae 465 | 466 | or go to https://cli-connector.fluence.dev and enter the following connection string there: 467 | 468 | wc:3df74b36a4459be644172d82e114297a65330ada4e77bc8afba67688064f033e@2?relay-protocol=irn&symKey=5b9b1773203cfe98c86bb8d611ff7945173430f5fb056fcd95f79919adbb0bae 469 | 470 | Confirm transaction in your wallet... 471 | # Waiting for transaction to be mined...... 472 | To approve transactions to your wallet using metamask, open the following url: # (4) 473 | 474 | https://cli-connector.fluence.dev/?wc=4c6084bf73667a0f02795048002dfdaff8e6b1be22495f989e6d04995ad2e8ba%402&relay-protocol=irn&symKey=8b19aba8c445bd37819f60ef0bafe2e4098424dc5d570a8dfeaf4b57cc1e794d 475 | 476 | or go to https://cli-connector.fluence.dev and enter the following connection string there: 477 | 478 | wc:4c6084bf73667a0f02795048002dfdaff8e6b1be22495f989e6d04995ad2e8ba@2?relay-protocol=irn&symKey=8b19aba8c445bd37819f60ef0bafe2e4098424dc5d570a8dfeaf4b57cc1e794d 479 | 480 | Confirm transaction in your wallet... 481 | # Waiting for transaction to be mined...... 482 | 3 workers joined the deal 0x06AAe83F938890c47FA7C667392e01D9E3052961 # (5) 483 | 484 | 485 | Success! 486 | 487 | created deals: # (6) 488 | defaultWorker: 489 | deal: https://mumbai.polygonscan.com/address/0x06AAe83F938890c47FA7C667392e01D9E3052961 490 | worker definition: bafkreigzfyfis2pmfr425dwpeql4hsrat5d7hpdthlxwzhefd23kw7gtey 491 | timestamp: 2023-10-12T14:39:21.570Z 492 | ``` 493 | 494 | One little command is doing quite a bit so you don't have to. Let's work through the process: 495 | 496 | * for an up-to-date look, all service assets, i.e., modules, are (re-) compiled (1) 497 | * a (new) Deal with both on-chain and off-chain activities is created (2) 498 | * the wasm modules and config are uploaded to IPFS node where deal-participating peer's workers can fetch the package by CID 499 | * now you have to get involved! You are presented with two transactions, one after the other, for you to sign. To sign a transaction, copy and paste the uri to your browser and Metamask should pop up with a signing request eventually. Before signing a transaction, carefully inspect its origin and content. Different wallets have different security features. If you are using MetaMask, carefully review your Security & Privacy settings. This is what you should see when you sign a transaction: 500 | ![Sign TX](./images/metamask_tx_prompt.png) 501 | * first transaction is for the Deal on-chain creation (3) 502 | * second transaction is for the Deal's on-chain matching (4) 503 | * once you sign the transactions, the deal is created and workers join the deal, deploying your services (5) 504 | * finally, deployment information is saved for future use in Aqua scripts (6) 505 | 506 | Fluence CLI did a bunch of work for us behind the scenes and signing the transaction is a lot quicker than entering (virtual) credit card information. Fluence CLI organized a set of parameters needed by our Aqua scripts in [deals.aqua](./.fluence/aqua/deals.aqua) for easy importing. 507 | 508 | Note that the deal's section in [fluence.yaml](./fluence.yaml) specifies the minimum and maximum workers that should be deployed under the *defaultWorker* namespace. Fluence CLI currently provides default values for min and max workers of one (1) and three (3), respectively. In the near future, you will be able to provide your spot price for service execution, hosting targets in the form of named capacity providers and more. 509 | 510 | After successful deal deployment it is possible to retrieve logs for the deployed deal. 511 | 512 | **Command**: 513 | 514 | ```bash 515 | fluence deal logs 516 | ``` 517 | 518 | **Output**: 519 | 520 | ```bash 521 | Connecting to random stage relay: /dns4/0-stage.fluence.dev/tcp/9000/wss/p2p/12D3KooWDcpWuyrMTDinqNgmXAuRdfd2mTdY9VoXZSAet2pDzh6r 522 | Connected 523 | defaultWorker (host_id: 12D3KooWMMGdfVEJ1rWe1nH1nehYDzNEHhg5ogdfiGk88AupCMnf, worker_id: 12D3KooWGctQEUKcgWBetu9aiR3owMZcBGNcpDC5ZE3H6dL16uSP, spell_id: 679acf1c-57e2-4dd7-aa78-bb181df7a00a): 524 | 525 | 2023-10-25 14:41:48 Installing worker for deal 0x02ab47b7b2737e16a516421c1b8ad36475e0f7ce 526 | 2023-10-25 14:41:48 parsed worker definition bafkreifp4gbp3emepswptldwlpbhpybt47uy2c3ksm3y7rut6cmcdnljwa { 527 | "services": [ 528 | { 529 | "modules": [ 530 | { 531 | "config": "bafkreia2wftbxfd4blycnvlxw2yl7ibhan2g7vauexv7fspibodlu34que", 532 | "wasm": "bafkreiarl3nin4jtauc52k76h4ze7yekvc5d2uno5fkgpotmcekwm7cnqa" 533 | }, 534 | { 535 | "config": "bafkreiaclbxbmtydpwdcpoh2yggcd6uimicmbb6rxzab7bgp342w5vcz2m", 536 | "wasm": "bafybeieeemeldllgokrkgybbrrjqeehyin3blv5cgehhdp3nlrfyj4eqoa" 537 | } 538 | ], 539 | "name": "eth_rpc" 540 | } 541 | ], 542 | "spells": [] 543 | } 544 | 2023-10-25 14:41:50 Created service eth_rpc 2b5967ae-e5f9-4929-8668-d2039593af28 545 | 2023-10-25 14:41:50 Installation finished 546 | 2023-10-25 14:41:50 Worker installation finished with status { 547 | "message": "", 548 | "state": "INSTALLATION_SUCCESSFUL", 549 | "timestamp": 1698244910 550 | } 551 | 552 | defaultWorker (host_id: 12D3KooWJ4bTHirdTFNZpCS72TAzwtdmavTBkkEXtzo6wHL25CtE, worker_id: unknown, spell_id: unknown): Worker is not installed yet 553 | 554 | defaultWorker (host_id: 12D3KooWAKNos2KogexTXhrkMZzFYpLHuWJ4PgoAhurSAv7o5CWA, worker_id: unknown, spell_id: unknown): Worker is not installed yet 555 | ``` 556 | 557 | In the example output above, we see that the worker for the *defaultWorker* namespace was installed successfully. The worker is now ready to receive requests from the gateway. The other two workers are not installed yet. It should happen in a while and one can check the logs again to see the progress. 558 | 559 | ### fRPC Aqua Code 560 | 561 | Now that we have our services deployed and ready for action, it's time to look at Aqua, which is utilized by the Gateway to bridge HTTP to/from libp2p. Let's have a look at the Aqua code and structure. 562 | 563 | File [rpc.aqua]("./gateway/aqua/rpc.aqua") is the file where fRPC algorithms and entrypoints used by gateway are defined. 564 | 565 | ```aqua 566 | -- rpc.aqua 567 | aqua RPC 568 | 569 | import "@fluencelabs/aqua-lib/builtin.aqua" 570 | import Subnet, Worker from "@fluencelabs/aqua-lib/subnet.aqua" 571 | 572 | import "services.aqua" 573 | use "deals.aqua" 574 | ``` 575 | 576 | Two of the dependencies (should) stand out: *deals.aqua* and *services.aqua* as they are local files located in the project *.fluence* directory: *services.aqua* contains the interface exports from the *eth-rpc* wasm module and *deals.aqua* maps the values from *deployed.yaml* to data structures usable by your aqua code. Since these files are dynamically generated by Fluence CLI, you need to (re-) compile your Aqua after every change to your Wasm code or deal deploy updates. For further details and examples, consult the [Aqua book](https://fluence.dev/docs/aqua-book/introduction), explore the [aqua playground](https://github.com/fluencelabs/aqua-playground) and visit the relevant repositories: [aqua-lib](https://github.com/fluencelabs/aqua-lib), [registry](https://github.com/fluencelabs/registry), [spell](https://github.com/fluencelabs/spell). 577 | 578 | ### fRPC Gateway Configuration 579 | 580 | The gateway config file, e.g., [quickstart_config.json](./configs/quickstart_config.json), contains the parameters for Fluence p2p network connection and gateway behavior. Key parameters include: 581 | 582 | * *providers*: an array of RPC endpoint urls, e.g., Infura, Alchemy, Ankr, etc. 583 | * *mode*: one of "random", "round-robin" or "quorum" to specify the endpoint selection algorithm 584 | * *relay*: the Fluence p2p network relay address for [js-client](https://github.com/fluencelabs/js-client) to use 585 | * *port*: the port the gateway listens on 586 | * other parameters related to the fRPC Algorithms, see below 587 | 588 | ### fRPC Algorithms 589 | 590 | The fRPC substrate offers basic algorithms to enhance reliability, addressing issues related to RPC endpoint availability and trustworthiness. 591 | 592 | Let's first examine *balancedEthCall* in [rpc.aqua]("./gateway/aqua/rpc.aqua"): 593 | 594 | ```aqua 595 | -- Call RPC method with load balancing 596 | func balancedEthCall{Logger, Balancer}(method: string, jsonArgs: []string) -> JsonString: -- (1) 597 | on HOST_PEER_ID: -- (2) 598 | worker, provider <- Balancer.next() -- (3) 599 | Logger.logWorker(worker) -- (4) 600 | Logger.logCall(provider) -- (4) 601 | rpc <- fromWorkerProvider(worker, provider) -- (5) 602 | result <- rpcCall{rpc}(method, jsonArgs) -- (6) 603 | <- result -- (7) 604 | ``` 605 | 606 | This function is a building block for other algorithms that allows to make a call to a RPC endpoint with some balancing logic. Let's go through the code line by line: 607 | 608 | * (1) Function declaration states that two abilities are required to execute it: *Logger* and *Balancer*. To learn more about abilities, see [Abilities](https://fluence.dev/docs/aqua-book/language/abilities). 609 | * (2) The function is executed on the host peer, i.e. the relay peer we used to connect to Fluence p2p network. 610 | * (3) Worker and RPC provider are determined by *Balancer*. 611 | * (4) Worker and provider are logged for debugging purposes. 612 | * (5) RPC ability is created from worker and provider with a helper function *fromWorkerProvider*. 613 | * (6) RPC ability is passed to *rpcCall* function to make the actual call. 614 | * (7) Result of the call is returned. 615 | 616 | #### Random 617 | 618 | **Use: Set `mode` to "random" in your gateway config file** 619 | 620 | Randomization the selection of one out of many RPC endpoints by itself is a weak algorithm to mitigate a single point of failure or byzantine behavior. However, it can be an important building block for more effective algorithms such as failover and quorum/consensus from both RPC providers and network peers. 621 | 622 | The fRPC substrate implementation is very basic from a business logic perspective but illustrates how to randomly choose both a worker, which represents the deployed service on a particular peer, and an RPC endpoint: 623 | 624 | ```aqua 625 | func randomLoadBalancingEth(uris: []string, method: string, jsonArgs: []string) -> JsonString: 626 | result: ?JsonString 627 | 628 | workers, error <- getWorkers() -- (1) 629 | if error != nil: 630 | result <- errorJsonString(error!) 631 | else: 632 | log <- initPeerLogger() -- (2) 633 | random <- timeRandom() -- (2) 634 | balancer <- randomBalancer{random}(workers, uris) -- (2) 635 | result <- balancedEthCall{log, balancer}(method, jsonArgs) -- (3) 636 | 637 | <- result! 638 | ``` 639 | 640 | The *randomLoadBalancingEth* function is build upon *balancedEthCall*: 641 | * (1) Workers that are part of the deal are fetched from the network. 642 | * (2) Logger and random balancer are initialized. 643 | * (3) *balancedEthCall* is called with logger and balancer. 644 | 645 | Note that time service is used to generate random numbers. This is not a good idea for production, but it's good enough for demonstration. 646 | 647 | #### Round robin 648 | 649 | **Use: Set `mode` to "round-robin" in your gateway config file** 650 | 651 | [Config Parameters](#fRPC-Gateway-Configuration): 652 | * *counterServiceId*: the service id of the counter service 653 | * *counterPeerId*: the peer id of the counter service 654 | 655 | A round robin algorithm cycles through the different options usually in a predictable manner. This substrate implementation is no different: 656 | 657 | ```aqua 658 | func roundRobinEth(uris: []string, method: string, jsonArgs: []string, counterServiceId: string, counterPeerId: string) -> JsonString: 659 | result: ?JsonString 660 | 661 | workers, error <- getWorkers() 662 | if error != nil: 663 | result <- errorJsonString(error!) 664 | else: 665 | log <- initPeerLogger() 666 | counter <- onPeerCounter(counterPeerId, counterServiceId) -- (1) 667 | balancer <- cycleBalancer{counter}(workers, uris) -- (2) 668 | result <- balancedEthCall{log, balancer}(method, jsonArgs) 669 | 670 | <- result! 671 | ``` 672 | 673 | The *roundRobinEth* function is very similar to *randomLoadBalancingEth*, except for the balancer: 674 | * (1) Counter ability is created from peer id and service id. 675 | * (2) Cycle balancer is created from counter and workers. 676 | 677 | To keep the state of the *cycle index*, we use a counter based on a local, [js-client](https://github.com/fluencelabs/js-client) based [service](./gateway/src/index.js). The peer executing the *Counter* service is the (local) client-peer implemented by the gateway. Note that the state of the counter is limited to the life of the gateway. 678 | 679 | #### Quorum 680 | 681 | **Use: Set `mode` to "quorum" in your gateway config file** 682 | 683 | [Config Parameters](#fRPC-Gateway-Configuration): 684 | * *quorumServiceId*: the service id of the quorum service 685 | * *quorumPeerId*: the peer id of the quorum service 686 | * *quorumNumber*: the number of results that must be equal to determine a quorum result 687 | 688 | A quorum, aka "off-chain consensus", "determines" a result by a ranked frequency distribution of the results pool and makes a selection against a quorum threshold value, e.g., 2/3 of items in the results pool must be equal for a quorum result to be accepted. Moreover, additional parameters such as the minimum number of items in the result pool may be added. Depending on your trust in the peers processing the endpoint requests or even the peer executing the quorum algorithm, additional verification steps may have to be added. There is one more pertinent consideration when it comes to designing quorum algorithms: the differentiation between (on-chain) read and write operations. 689 | 690 | In the fRPC substrate implementation, we provide a basic quorum algorithm: 691 | 692 | ```aqua 693 | func quorum{ProviderBalancer, QuorumChecker}(workers: []Worker, quorumNumber: u32, timeout: u32, method: string, jsonArgs: []string) -> QuorumResult: -- (1) 694 | results: *JsonString 695 | on HOST_PEER_ID: -- (2) 696 | for worker <- workers par: -- (3) 697 | provider <- ProviderBalancer.nextProvider() -- (4) 698 | rpc <- fromWorkerProvider(worker, provider) -- (5) 699 | results <- rpcCall{rpc}(method, jsonArgs) -- (6) 700 | 701 | -- wait all results from all workers with timeout 702 | join results[workers.length - 1] -- (7) 703 | par Peer.timeout(timeout, "Workers timeout") -- (7) 704 | 705 | <- QuorumChecker.check(results, quorumNumber) -- (8) 706 | ``` 707 | 708 | Let's examine the code line by line: 709 | * (1) Function declaration states that two abilities are required to execute it: *ProviderBalancer* and *QuorumChecker*. To learn more about abilities, see [Abilities](https://fluence.dev/docs/aqua-book/language/abilities). 710 | * (2) The function is executed on the host peer, i.e. the relay peer we used to connect to Fluence p2p network. 711 | * (3) For each worker in parallel: 712 | * (4) Provider is determined by *ProviderBalancer*. 713 | * (5) RPC ability is created from worker and provider with a helper function *fromWorkerProvider*. 714 | * (6) RPC ability is passed to *rpcCall* function to make the actual call. Result is saved in *results* stream variable. 715 | * (7) Results from all workers are waited for with a timeout. For more information, see [Timeout and race patterns](https://fluence.dev/docs/aqua-book/language/flow/parallel#timeout-and-race-patterns). 716 | * (8) Final result is determined by *QuorumChecker* based on all results gathered at this point. 717 | 718 | As evidenced by the code, no considerations to differentiate between read and write operations are made, which might prove disadvantageous when submitting, for example, a signed transaction. 719 | 720 | The actual entrypoint, *quorumEthCall*, is a wrapper around *quorum*: 721 | 722 | ```aqua 723 | func quorumEth(uris: []string, quorumNumber: u32, timeout: u32, method: string, jsonArgs: []string, quorumServiceId: string, quorumPeerId: string) -> QuorumResult: 724 | result: *QuorumResult 725 | 726 | workers, error <- getWorkers() 727 | if error != nil: 728 | result <- errorQuorumResult(error!) 729 | else: 730 | random <- timeRandom() 731 | balancer <- randomBalancer{random}(workers, uris) 732 | quorumChecker <- onPeerQuorumChecker(quorumPeerId, quorumServiceId) 733 | result <- quorum{balancer, quorumChecker}(workers, quorumNumber, timeout, method, jsonArgs) 734 | 735 | <- result! 736 | ``` 737 | 738 | It is very similar to *randomLoadBalancingEth* and *roundRobinEth*, except for the balancer and quorum checker initialization. To determine the quorum result, we use a local, [js-client](https://github.com/fluencelabs/js-client) based [service](./gateway/src/index.js). The peer executing the *QuorumChecker* service is the (local) client-peer implemented by the gateway. 739 | 740 | ## Summary 741 | 742 | fRPC is a design pattern to efficiently mitigate risks inherent in centralized RPC providers for dApps using Fluence's decentralized serverless compute protocol. fRPC Substrate is a basic implementation of the fRPC design pattern that dApp users can use out of the box with no changes to their frontend. Once you tried fRPC, feel free to experiment with the code and let us know about your journey in the [Fluence Discord](https://fluence.chat) developer channel. 743 | 744 | For support, to discuss your ideas or to schedule presentations of your solutions to the Fluence and fRPC community at large, reach out in [discord]("https://fluence.chat") or [telegram](https://t.me/fluence_project). 745 | 746 | Happy Hacking! 747 | 748 | ## Contribution 749 | 750 | Found a mistake, inaccuracy or have other improvement suggestions? Open an issue or a pull request! Note that contributions submitted will be licensed according to the terms of [LICENSE](./LICENSE). 751 | -------------------------------------------------------------------------------- /fluence-lock.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=.fluence/schemas/fluence-lock.yaml.json 2 | 3 | # Defines a lock file for Fluence Project dependencies. When dependencies are installed - their exact versions are saved here. 4 | 5 | # Documentation: https://github.com/fluencelabs/fluence-cli/tree/main/docs/configs/fluence-lock.md 6 | 7 | version: 0 8 | cargo: 9 | marine: 0.14.1 10 | mrepl: 0.21.3 11 | npm: 12 | "@fluencelabs/aqua-lib": 0.6.0 13 | "@fluencelabs/aqua": 0.10.3 14 | "@fluencelabs/registry": 0.8.3 15 | "@fluencelabs/spell": 0.5.7 16 | -------------------------------------------------------------------------------- /fluence.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=.fluence/schemas/fluence.json 2 | 3 | # Defines Fluence Project, most importantly - what exactly you want to deploy and how. You can use `fluence init` command to generate a template for new Fluence project 4 | 5 | # Documentation: https://github.com/fluencelabs/fluence-cli/tree/main/docs/configs/fluence.md 6 | 7 | version: 4 8 | 9 | aquaInputPath: src/aqua/main.aqua 10 | 11 | workers: 12 | defaultWorker: 13 | services: [ eth_rpc ] 14 | 15 | deals: 16 | defaultWorker: 17 | minWorkers: 3 18 | targetWorkers: 3 19 | 20 | relays: stage 21 | 22 | services: 23 | eth_rpc: 24 | get: wasm-modules 25 | -------------------------------------------------------------------------------- /gateway/README.md: -------------------------------------------------------------------------------- 1 | # Aqua Ethereum Gateway 2 | 3 | Gateway represents access to different Ethereum API providers (infura, alchemy). It can be used with Web3 client and load balancing requests between different providers. 4 | 5 | 6 | ## Installation 7 | 8 | ```shell 9 | npm install -g @fluencelabs/aqua-eth-gateway 10 | ``` 11 | 12 | ## Example 13 | 14 | ```shell 15 | aqua-eth-gateway path/to/config.json 16 | ``` 17 | 18 | where config is: 19 | 20 | ```json 21 | { 22 | "providers": [ 23 | "https://goerli.infura.io/v3/your-api-key", 24 | "https://eth-goerli.g.alchemy.com/v2/your-api-key" 25 | ], 26 | "mode": "random", 27 | "relay": "fluence/peer/address", 28 | "serviceId": "eth-rpc serviceId", 29 | "port": 3000, 30 | "counterServiceId": null, 31 | "counterPeerId": null, 32 | "quorumServiceId": null, 33 | "quorumPeerId": null, 34 | "quorumNumber": null 35 | } 36 | ``` 37 | 38 | `counterServiceId` and `counterPeerId` is credentials to counter service for `round-robin` mode. Will be used local counter if undefined. 39 | `quorumServiceId` and `quorumPeerId` is credentials to counter service for `round-robin` mode. Will be used local counter if undefined. 40 | `quorumNumber` is `3` by default. 41 | 42 | ## Mode 43 | 44 | `random` - choose workers and providers randomly 45 | `round-robin` - choose workers and providers in circle order 46 | `quorum` - call all workers and choose the result that is the same from `>= quorumNumber` providers. Or return an error. 47 | -------------------------------------------------------------------------------- /gateway/aqua/balancer.aqua: -------------------------------------------------------------------------------- 1 | module Balancer declares Balancer, ProviderBalancer, WorkersBalancer, randomBalancer, cycleBalancer 2 | 3 | import Worker from "@fluencelabs/aqua-lib/subnet.aqua" 4 | 5 | import Counter from "counter.aqua" 6 | import Random from "random.aqua" 7 | import Provider from "provider.aqua" 8 | 9 | ability WorkersBalancer: 10 | nextWorker() -> Worker 11 | 12 | ability ProviderBalancer: 13 | nextProvider() -> Provider 14 | 15 | ability Balancer: 16 | nextWorker() -> Worker 17 | nextProvider() -> Provider 18 | next() -> Worker, Provider 19 | 20 | -- Create balancer that returns 21 | -- workers and providers in random order 22 | func randomBalancer{Random}(workers: []Worker, providers: []Provider) -> Balancer: 23 | -- closures do not capture topology here 24 | nextWorker = func () -> Worker: 25 | rand <- Random.next() 26 | idx = rand % workers.length 27 | <- workers[idx] 28 | 29 | nextProvider = func () -> Provider: 30 | rand <- Random.next() 31 | idx = rand % providers.length 32 | <- providers[idx] 33 | 34 | next = func () -> Worker, Provider: 35 | <- nextWorker(), nextProvider() 36 | 37 | <- Balancer(next=next, nextWorker=nextWorker, nextProvider=nextProvider) 38 | 39 | -- Create balancer that returns 40 | -- workers and providers in cycle order 41 | func cycleBalancer{Counter}(workers: []Worker, providers: []Provider) -> Balancer: 42 | next = func () -> Worker, Provider: 43 | n <- Counter.incrementAndReturn() 44 | idx = n % workers.length 45 | <- workers[idx], providers[idx] 46 | 47 | nextWorker = func () -> Worker: 48 | w, p <- next() 49 | <- w 50 | 51 | nextProvider = func () -> Provider: 52 | w, p <- next() 53 | <- p 54 | 55 | <- Balancer(next=next, nextWorker=nextWorker, nextProvider=nextProvider) -------------------------------------------------------------------------------- /gateway/aqua/counter.aqua: -------------------------------------------------------------------------------- 1 | module Counter declares Counter, onPeerCounter 2 | 3 | export CounterSrv 4 | 5 | ability Counter: 6 | incrementAndReturn() -> u32 7 | 8 | service CounterSrv("counter"): 9 | incrementAndReturn() -> u32 10 | 11 | -- Create Counter ability that 12 | -- counts on peer through CounterSrv(id) 13 | func onPeerCounter(peer: string, id: string) -> Counter: 14 | -- closure does not capture topology here 15 | incAndReturn = func () -> u32: 16 | on peer: 17 | CounterSrv id 18 | res <- CounterSrv.incrementAndReturn() 19 | <- res 20 | 21 | <- Counter(incrementAndReturn = incAndReturn) 22 | 23 | -------------------------------------------------------------------------------- /gateway/aqua/eth_rpc.aqua: -------------------------------------------------------------------------------- 1 | module RPCEth declares RPCEth, fromWorkerProvider 2 | 3 | import Worker from "@fluencelabs/aqua-lib/subnet.aqua" 4 | 5 | import "services.aqua" 6 | 7 | import Provider from "provider.aqua" 8 | 9 | -- Ability to call Ethereum JSON RPC methods 10 | ability RPCEth: 11 | call(method: string, jsonArgs: []string) -> JsonString 12 | 13 | -- Create RPCEth ability from Worker and Provider 14 | func fromWorkerProvider(worker: Worker, provider: Provider) -> RPCEth: 15 | -- closure does not capture topology here 16 | call = func (method: string, jsonArgs: []string) -> JsonString: 17 | -- TODO: Handle worker_id == nil? 18 | on worker.worker_id! via worker.host_id: 19 | res <- EthRpc.eth_call(provider, method, jsonArgs) 20 | <- res 21 | 22 | <- RPCEth(call = call) 23 | -------------------------------------------------------------------------------- /gateway/aqua/logger.aqua: -------------------------------------------------------------------------------- 1 | module Logger declares Logger, initPeerLogger 2 | 3 | export LoggerSrv 4 | 5 | import Worker from "@fluencelabs/aqua-lib/subnet.aqua" 6 | 7 | ability Logger: 8 | log(s: []string) 9 | logNum(n: u32) 10 | logCall(s: string) 11 | logWorker(w: Worker) 12 | 13 | service LoggerSrv("logger"): 14 | log(s: []string) 15 | logNum(n: u32) 16 | logCall(s: string) 17 | logWorker(w: Worker) 18 | 19 | -- Create Logger ability that logs 20 | -- on INIT_PEER_ID via HOST_PEER_ID 21 | -- through LoggerSrv 22 | func initPeerLogger() -> Logger: 23 | -- closures do not capture topology here 24 | 25 | log = func (s: []string): 26 | on INIT_PEER_ID via HOST_PEER_ID: 27 | LoggerSrv.log(s) 28 | logNum = func (n: u32): 29 | on INIT_PEER_ID via HOST_PEER_ID: 30 | LoggerSrv.logNum(n) 31 | logCall = func (s: string): 32 | on INIT_PEER_ID via HOST_PEER_ID: 33 | LoggerSrv.logCall(s) 34 | logWorker = func (w: Worker): 35 | on INIT_PEER_ID via HOST_PEER_ID: 36 | LoggerSrv.logWorker(w) 37 | 38 | <- Logger(log=log, logNum=logNum, logCall=logCall, logWorker=logWorker) -------------------------------------------------------------------------------- /gateway/aqua/provider.aqua: -------------------------------------------------------------------------------- 1 | module Provider declares Provider 2 | 3 | alias Provider: string -------------------------------------------------------------------------------- /gateway/aqua/quorum.aqua: -------------------------------------------------------------------------------- 1 | module Quorum declares QuorumChecker, QuorumResult, onPeerQuorumChecker 2 | 3 | import JsonString from "services.aqua" 4 | 5 | export QuorumCheckerSrv 6 | 7 | data QuorumResult: 8 | value: string 9 | results: []JsonString 10 | error: string 11 | 12 | -- Ability to check if a quorum on results is reached 13 | ability QuorumChecker: 14 | check(results: []JsonString, minResults: u32) -> QuorumResult 15 | 16 | service QuorumCheckerSrv("quorum"): 17 | check(results: []JsonString, minResults: u32) -> QuorumResult 18 | 19 | -- Create a QuorumChecker ability 20 | -- that checks quorum on peer through QuorumCheckerSrv(id) 21 | func onPeerQuorumChecker(peer: string, id: string) -> QuorumChecker: 22 | -- closure does not capture topology here 23 | check = func (results: []JsonString, minResults: u32) -> QuorumResult: 24 | on peer: 25 | QuorumCheckerSrv id 26 | res <- QuorumCheckerSrv.check(results, minResults) 27 | <- res 28 | 29 | <- QuorumChecker(check = check) -------------------------------------------------------------------------------- /gateway/aqua/random.aqua: -------------------------------------------------------------------------------- 1 | module Random declares Random, timeRandom 2 | 3 | import Peer from "@fluencelabs/aqua-lib/builtin.aqua" 4 | 5 | import NumOp from "utils.aqua" 6 | 7 | ability Random: 8 | next() -> i64 9 | 10 | -- Create random from timestamp 11 | func timeRandom() -> Random: 12 | -- closure does not capture topology here 13 | next = func () -> i64: 14 | t <- Peer.timestamp_sec() 15 | n <- NumOp.identity(t) 16 | <- n 17 | 18 | <- Random(next = next) -------------------------------------------------------------------------------- /gateway/aqua/rpc.aqua: -------------------------------------------------------------------------------- 1 | aqua RPC 2 | 3 | import "@fluencelabs/aqua-lib/builtin.aqua" 4 | import Subnet, Worker from "@fluencelabs/aqua-lib/subnet.aqua" 5 | 6 | import "services.aqua" 7 | use "deals.aqua" 8 | 9 | import Logger, initPeerLogger from "logger.aqua" 10 | import Balancer, ProviderBalancer, randomBalancer, cycleBalancer from "balancer.aqua" 11 | import onPeerCounter from "counter.aqua" 12 | import QuorumChecker, QuorumResult, onPeerQuorumChecker from "quorum.aqua" 13 | import timeRandom from "random.aqua" 14 | import RPCEth, fromWorkerProvider from "eth_rpc.aqua" 15 | 16 | import NumOp from "utils.aqua" 17 | 18 | export randomLoadBalancingEth, roundRobinEth, quorumEth 19 | 20 | func errorQuorumResult(msg: string) -> QuorumResult: 21 | <- QuorumResult(value = "", results = [], error = msg) 22 | 23 | func errorJsonString(msg: string) -> JsonString: 24 | <- JsonString(value = "", success = false, error = msg) 25 | 26 | -- Get workers participating in deal 27 | func getWorkers() -> []Worker, ?string: 28 | on INIT_PEER_ID via HOST_PEER_ID: 29 | deals <- Deals.get() 30 | dealId = deals.defaultWorker!.dealIdOriginal 31 | on HOST_PEER_ID: 32 | result <- Subnet.resolve(dealId) 33 | <- result.workers, result.error 34 | 35 | -- Call RPC method through ability 36 | func rpcCall{RPCEth}(method: string, jsonArgs: []string) -> JsonString: 37 | <- RPCEth.call(method, jsonArgs) 38 | 39 | -- Call RPC method with load balancing 40 | func balancedEthCall{Logger, Balancer}(method: string, jsonArgs: []string) -> JsonString: 41 | on HOST_PEER_ID: 42 | worker, provider <- Balancer.next() 43 | Logger.logWorker(worker) 44 | Logger.logCall(provider) 45 | rpc <- fromWorkerProvider(worker, provider) 46 | result <- rpcCall{rpc}(method, jsonArgs) 47 | <- result 48 | 49 | -- Call RPC method with random load balancing 50 | func randomLoadBalancingEth(uris: []string, method: string, jsonArgs: []string) -> JsonString: 51 | result: ?JsonString 52 | 53 | workers, error <- getWorkers() 54 | if error != nil: 55 | result <- errorJsonString(error!) 56 | else: 57 | log <- initPeerLogger() 58 | random <- timeRandom() 59 | balancer <- randomBalancer{random}(workers, uris) 60 | result <- balancedEthCall{log, balancer}(method, jsonArgs) 61 | 62 | <- result! 63 | 64 | -- Call RPC method with round-robin load balancing 65 | func roundRobinEth(uris: []string, method: string, jsonArgs: []string, counterServiceId: string, counterPeerId: string) -> JsonString: 66 | result: ?JsonString 67 | 68 | workers, error <- getWorkers() 69 | if error != nil: 70 | result <- errorJsonString(error!) 71 | else: 72 | log <- initPeerLogger() 73 | counter <- onPeerCounter(counterPeerId, counterServiceId) 74 | balancer <- cycleBalancer{counter}(workers, uris) 75 | result <- balancedEthCall{log, balancer}(method, jsonArgs) 76 | 77 | <- result! 78 | 79 | -- Call RPC method with workers quorum and provider load balancing 80 | func quorum{ProviderBalancer, QuorumChecker}(workers: []Worker, quorumNumber: u32, timeout: u32, method: string, jsonArgs: []string) -> QuorumResult: 81 | results: *JsonString 82 | on HOST_PEER_ID: 83 | for worker <- workers par: 84 | provider <- ProviderBalancer.nextProvider() 85 | rpc <- fromWorkerProvider(worker, provider) 86 | results <- rpcCall{rpc}(method, jsonArgs) 87 | 88 | -- wait all results from all workers with timeout 89 | join results[workers.length - 1] 90 | par Peer.timeout(timeout, "Workers timeout") 91 | 92 | <- QuorumChecker.check(results, quorumNumber) 93 | 94 | -- Call RPC method with workers quorum and provider load balancing 95 | func quorumEth(uris: []string, quorumNumber: u32, timeout: u32, method: string, jsonArgs: []string, quorumServiceId: string, quorumPeerId: string) -> QuorumResult: 96 | result: *QuorumResult 97 | 98 | workers, error <- getWorkers() 99 | if error != nil: 100 | result <- errorQuorumResult(error!) 101 | else: 102 | random <- timeRandom() 103 | balancer <- randomBalancer{random}(workers, uris) 104 | quorumChecker <- onPeerQuorumChecker(quorumPeerId, quorumServiceId) 105 | result <- quorum{balancer, quorumChecker}(workers, quorumNumber, timeout, method, jsonArgs) 106 | 107 | <- result! -------------------------------------------------------------------------------- /gateway/aqua/utils.aqua: -------------------------------------------------------------------------------- 1 | module Utils declares NumOp 2 | 3 | -- Used to coerce types 4 | service NumOp("op"): 5 | identity(n: u64) -> i64 -------------------------------------------------------------------------------- /gateway/configs/quickstart_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "providers": [ 3 | "", 4 | "", 5 | "" 6 | ], 7 | "mode": "round-robin", 8 | "relay": "/dns4/0-stage.fluence.dev/tcp/9000/wss/p2p/12D3KooWDcpWuyrMTDinqNgmXAuRdfd2mTdY9VoXZSAet2pDzh6r", 9 | "port": 3000, 10 | "counterServiceId": null, 11 | "counterPeerId": null, 12 | "quorumServiceId": null, 13 | "quorumPeerId": null, 14 | "quorumNumber": null 15 | } 16 | -------------------------------------------------------------------------------- /gateway/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@fluencelabs/aqua-eth-gateway", 3 | "version": "0.0.18", 4 | "description": "", 5 | "main": "src/index.js", 6 | "type": "module", 7 | "scripts": { 8 | "run": "npm run compile && node src/index.js", 9 | "compile": "fluence aqua -i aqua/ -o aqua-compiled/ --js --no-input", 10 | "req": "node web3run.js" 11 | }, 12 | "bin": { 13 | "aqua-eth-gateway": "src/index.js" 14 | }, 15 | "files": [ 16 | "src/*", 17 | "aqua/*", 18 | "aqua-compiled/*" 19 | ], 20 | "author": "Fluence Labs", 21 | "license": "Apache-2.0", 22 | "dependencies": { 23 | "@fluencelabs/js-client": "0.4.2", 24 | "@fluencelabs/marine-worker": "0.5.0", 25 | "body-parser": "1.20.2", 26 | "express": "4.18.2", 27 | "json-rpc-2.0": "1.7.0", 28 | "web3": "4.3.0" 29 | }, 30 | "devDependencies": { 31 | "@fluencelabs/aqua-lib": "0.7.7" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /gateway/src/arguments.js: -------------------------------------------------------------------------------- 1 | import {configHelp} from "./config.js"; 2 | 3 | export function readArguments(args) { 4 | const configPath = args[0] 5 | 6 | let errors = [] 7 | 8 | if (!configPath) { 9 | errors.push("Specify config with uri to ethereum RPC providers") 10 | } 11 | 12 | return { 13 | configPath, errors, 14 | help: "Example: aqua-eth-gateway \n" + configHelp 15 | } 16 | } -------------------------------------------------------------------------------- /gateway/src/config.js: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | 3 | export const configHelp = 4 | "Config structure: { port, relay, serviceId, providers, mode, counterServiceId?, counterPeerId?}\n" + 5 | "Where 'mode' can be: 'random' (default), 'round-robin' or 'quorum',\n" + 6 | "'counterServiceId' and 'counterPeerId' will use local service if undefined.\n"; 7 | ("'quorumServiceId' and 'quorumPeerId' will use local service if undefined.\n"); 8 | 9 | export function readConfig(path) { 10 | const rawdata = fs.readFileSync(path); 11 | const config = JSON.parse(rawdata); 12 | 13 | let errors = []; 14 | if (!config.port) { 15 | errors.push("Specify port ('port') in config"); 16 | } 17 | if (!config.relay) { 18 | errors.push("Specify Fluence peer address ('relay') in config"); 19 | } 20 | if ( 21 | !!config.mode && 22 | !["random", "round-robin", "quorum"].includes(config.mode) 23 | ) { 24 | errors.push( 25 | `Incorrect mode '${config.mode}' in config. Should be 'random', 'round-robin' or 'quorum'` 26 | ); 27 | } 28 | 29 | return { 30 | config, 31 | errors, 32 | help: configHelp, 33 | }; 34 | } 35 | -------------------------------------------------------------------------------- /gateway/src/index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | "use strict"; 4 | 5 | import express from "express"; 6 | import bodyParser from "body-parser"; 7 | import { JSONRPCServer } from "json-rpc-2.0"; 8 | import { Fluence } from "@fluencelabs/js-client"; 9 | import { 10 | quorumEth, 11 | randomLoadBalancingEth, 12 | roundRobinEth, 13 | } from "../aqua-compiled/rpc.js"; 14 | import { registerLoggerSrv } from "../aqua-compiled/logger.js"; 15 | import { registerCounterSrv } from "../aqua-compiled/counter.js"; 16 | import { registerQuorumCheckerSrv } from "../aqua-compiled/quorum.js"; 17 | import { readArguments } from "./arguments.js"; 18 | import { readConfig } from "./config.js"; 19 | import { methods } from "./methods.js"; 20 | 21 | const args = readArguments(process.argv.slice(2)); 22 | 23 | if (args.errors.length > 0) { 24 | console.log(args.help); 25 | args.errors.forEach((err) => console.log(err)); 26 | process.exit(1); 27 | } 28 | 29 | const { config, errors, help } = readConfig(args.configPath); 30 | 31 | if (errors.length > 0) { 32 | errors.forEach((err) => console.log(err)); 33 | console.log(help); 34 | process.exit(1); 35 | } 36 | 37 | console.log("Running server..."); 38 | 39 | const route = "/"; 40 | 41 | const server = new JSONRPCServer(); 42 | 43 | // initialize fluence client 44 | await Fluence.connect(config.relay, {}); 45 | const peerId = (await Fluence.getClient()).getPeerId(); 46 | 47 | // handler for logger 48 | registerLoggerSrv({ 49 | log: (s) => { 50 | console.log("log: " + s); 51 | }, 52 | logCall: (s) => { 53 | console.log("Call will be to : " + s); 54 | }, 55 | logWorker: (s) => { 56 | console.log("Worker used: " + JSON.stringify(s)); 57 | }, 58 | logNum: (s) => { 59 | console.log("Number: " + s); 60 | }, 61 | }); 62 | 63 | let counter = 0; 64 | registerCounterSrv("counter", { 65 | incrementAndReturn: () => { 66 | counter++; 67 | console.log("Counter: " + counter); 68 | return counter; 69 | }, 70 | }); 71 | 72 | function findSameResults(results, minNum) { 73 | const resultCounts = results 74 | .filter((obj) => obj.success) 75 | .map((obj) => obj.value) 76 | .reduce(function (i, v) { 77 | if (i[v] === undefined) { 78 | i[v] = 1; 79 | } else { 80 | i[v] = i[v] + 1; 81 | } 82 | return i; 83 | }, {}); 84 | 85 | const getMaxRepeated = Math.max(...Object.values(resultCounts)); 86 | if (getMaxRepeated >= minNum) { 87 | console.log(resultCounts); 88 | const max = Object.entries(resultCounts).find( 89 | (kv) => kv[1] === getMaxRepeated, 90 | ); 91 | return { 92 | value: max[0], 93 | results: [], 94 | error: "", 95 | }; 96 | } else { 97 | return { 98 | error: "No consensus in results", 99 | results: results, 100 | value: "", 101 | }; 102 | } 103 | } 104 | 105 | registerQuorumCheckerSrv("quorum", { 106 | check: (ethResults, minQuorum) => { 107 | console.log("Check quorum for:"); 108 | console.log(ethResults); 109 | return findSameResults(ethResults, minQuorum); 110 | }, 111 | }); 112 | 113 | const counterServiceId = config.counterServiceId || "counter"; 114 | const counterPeerId = config.counterPeerId || peerId; 115 | const quorumServiceId = config.quorumServiceId || "quorum"; 116 | const quorumPeerId = config.quorumPeerId || peerId; 117 | const quorumNumber = config.quorumNumber || 2; 118 | const mode = config.mode || "random"; 119 | 120 | console.log(`Using mode '${mode}'`); 121 | 122 | async function methodHandler(reqRaw, method) { 123 | const req = reqRaw.map((s) => JSON.stringify(s)); 124 | console.log(`Receiving request '${method}'`); 125 | let result; 126 | if (mode === "random") { 127 | result = await randomLoadBalancingEth(config.providers, method, req); 128 | } else if (mode === "round-robin") { 129 | result = await roundRobinEth( 130 | config.providers, 131 | method, 132 | req, 133 | counterServiceId, 134 | counterPeerId, 135 | config.serviceId, 136 | ); 137 | } else if (mode === "quorum") { 138 | const quorumResult = await quorumEth( 139 | config.providers, 140 | quorumNumber, 141 | 10000, 142 | method, 143 | req, 144 | quorumServiceId, 145 | quorumPeerId, 146 | { ttl: 20000 }, 147 | ); 148 | 149 | if (quorumResult.error) { 150 | console.error( 151 | `quorum failed: ${quorumResult.error}\n${JSON.stringify( 152 | quorumResult.results, 153 | )}`, 154 | ); 155 | result = { success: false, error: quorumResult.error }; 156 | } else { 157 | result = { 158 | success: true, 159 | error: quorumResult.error, 160 | value: quorumResult.value, 161 | }; 162 | } 163 | } 164 | 165 | if (!result.success) { 166 | throw new Error(result.error); 167 | } 168 | 169 | return JSON.parse(result.value || "{}"); 170 | } 171 | 172 | function addMethod(op) { 173 | server.addMethod(op, async (req) => methodHandler(req, op)); 174 | } 175 | 176 | // register all eth methods 177 | methods.forEach((m) => { 178 | addMethod(m); 179 | }); 180 | 181 | const app = express(); 182 | app.use(bodyParser.json()); 183 | 184 | // register JSON-RPC handler 185 | app.post(route, (req, res) => { 186 | const jsonRPCRequest = req.body; 187 | server.receive(jsonRPCRequest).then((jsonRPCResponse) => { 188 | if (jsonRPCResponse) { 189 | res.json(jsonRPCResponse); 190 | } else { 191 | res.sendStatus(204); 192 | } 193 | }); 194 | }); 195 | 196 | app.listen(config.port); 197 | 198 | console.log("Server was started on port " + config.port); 199 | -------------------------------------------------------------------------------- /gateway/src/methods.js: -------------------------------------------------------------------------------- 1 | export const methods = ['eth_accounts', 2 | 'eth_blockNumber', 3 | 'eth_call', 4 | 'eth_chainId', 5 | 'eth_estimateGas', 6 | 'eth_getBalance', 7 | 'eth_getBlockByHash', 8 | 'eth_getBlockByNumber', 9 | 'eth_getBlockTransactionCountByHash', 10 | 'eth_getBlockTransactionCountByNumber', 11 | 'eth_getCode', 12 | 'eth_getLogs', 13 | 'eth_getStorageAt', 14 | 'eth_getTransactionByBlockHashAndIndex', 15 | 'eth_getTransactionByBlockNumberAndIndex', 16 | 'eth_getTransactionByHash', 17 | 'eth_getTransactionCount', 18 | 'eth_getTransactionReceipt', 19 | 'eth_sendTransaction', 20 | 'net_version', 21 | 'web3_sha3', 22 | 'eth_sendRawTransaction', 23 | 'eth_subscribe', 24 | 'eth_maxPriorityFeePerGas', 25 | 'eth_getUncleCountByBlockHash', 26 | 'eth_getUncleCountByBlockNumber', 27 | 'net_listening', 28 | 'net_peerCount', 29 | 'eth_protocolVersion', 30 | 'eth_syncing', 31 | 'eth_coinbase', 32 | 'eth_mining', 33 | 'eth_hashrate', 34 | 'eth_gasPrice', 35 | 'eth_getStorageAt', 36 | 'eth_sign', 37 | 'eth_getCompilers', 38 | 'eth_newBlockFilter', 39 | 'eth_newPendingTransactionFilter', 40 | 'eth_uninstallFilter', 41 | 'eth_getFilterChanges', 42 | 'eth_getWork', 43 | 'eth_submitWork', 44 | 'eth_submitHashrate', 45 | 'db_putString', 46 | 'db_getString', 47 | 'db_putHex', 48 | 'db_getHex', 49 | 'shh_post', 50 | 'shh_version', 51 | 'shh_newIdentity', 52 | 'shh_hasIdentity', 53 | 'shh_newGroup', 54 | 'shh_addToGroup', 55 | 'shh_newFilter', 56 | 'shh_uninstallFilter', 57 | 'shh_getFilterChanges', 58 | 'shh_getMessages'] -------------------------------------------------------------------------------- /gateway/web3run.js: -------------------------------------------------------------------------------- 1 | import Web3 from 'web3'; 2 | 3 | const web3 = new Web3("http://localhost:3000"); 4 | 5 | async function main() { 6 | try { 7 | console.log("Trying to fetch the block number..."); 8 | const bn = await web3.eth.getBlockNumber(); 9 | console.log("Block number is: ", bn); 10 | } catch (e) { 11 | console.error("Error requesting block number\n", e); 12 | } 13 | 14 | try { 15 | console.log("Trying to fetch the transaction information..."); 16 | const resp = await web3.eth.getTransaction("0x7bfa7c9812c67af61872c66f3af13bb65ad0f81b7a44bcf4a11c11900be16409"); 17 | console.log("Transaction is", resp); 18 | } catch (e) { 19 | console.log("Error requesting transaction info!\n", e) 20 | } 21 | } 22 | 23 | main(); 24 | -------------------------------------------------------------------------------- /images/metamask_tx_prompt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fluencelabs/fRPC-Substrate/b3f76a0103a3f37f371fc3b936b476373ff2be03/images/metamask_tx_prompt.png -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2023 Fluence Labs Limited 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | /** @type {import('ts-jest').JestConfigWithTsJest} */ 18 | export default { 19 | testEnvironment: "node", 20 | testTimeout: 1000 * 60 * 10, // 10 minutes in milliseconds 21 | projects: [ 22 | { 23 | // Uses the serial runner for integration test files 24 | displayName: "INTEGRATION", 25 | runner: "jest-serial-runner", 26 | testMatch: ["/test/**/*.integration-test.ts"], 27 | extensionsToTreatAsEsm: [".ts"], 28 | transform: { 29 | "^.+\\.tsx?$": [ 30 | "ts-jest", 31 | { 32 | useESM: true, 33 | tsconfig: "test/tsconfig.json", 34 | }, 35 | ], 36 | }, 37 | }, 38 | ], 39 | }; 40 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "author": "Fluence Labs", 3 | "license": "Apache-2.0", 4 | "type": "module", 5 | "devDependencies": { 6 | "@multiformats/multiaddr": "^12.1.7", 7 | "@tsconfig/node18-strictest-esm": "^1.0.1", 8 | "@types/jest": "^29.5.5", 9 | "jest": "^29.7.0", 10 | "jest-serial-runner": "^1.2.1", 11 | "prettier": "^3.0.3", 12 | "tree-kill": "^1.2.2", 13 | "ts-jest": "^29.1.1", 14 | "ts-node": "^10.9.1", 15 | "typescript": "^5.2.2" 16 | }, 17 | "scripts": { 18 | "build": "tsc", 19 | "test": "node --no-warnings --experimental-vm-modules node_modules/jest/bin/jest.js --verbose" 20 | }, 21 | "prettier": {} 22 | } 23 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly-2023-06-14" 3 | components = [ 4 | "rustfmt", 5 | "clippy", 6 | ] 7 | targets = [ 8 | "x86_64-unknown-linux-gnu", 9 | ] 10 | -------------------------------------------------------------------------------- /src/aqua/main.aqua: -------------------------------------------------------------------------------- 1 | aqua Main 2 | 3 | import "@fluencelabs/aqua-lib/builtin.aqua" 4 | import "@fluencelabs/aqua-lib/subnet.aqua" 5 | 6 | use "deals.aqua" 7 | use "hosts.aqua" 8 | import "services.aqua" 9 | 10 | -- IMPORTANT: Add exports for all functions that you want to run 11 | export showSubnet, helloWorld, helloWorldRemote, getInfo, getInfos 12 | 13 | -- DOCUMENTATION: 14 | -- https://fluence.dev 15 | 16 | 17 | 18 | -- example of running services deployed using `fluence deal deploy` 19 | -- with worker 'defaultWorker' which has service 'MyService' with method 'greeting' 20 | 21 | export showSubnet 22 | 23 | data WorkerServices: 24 | host_id: string 25 | worker_id: ?string 26 | services: ?[]string 27 | 28 | func showSubnet() -> []WorkerServices: 29 | deals <- Deals.get() 30 | dealId = deals.defaultWorker!.dealIdOriginal 31 | on HOST_PEER_ID: 32 | subnet <- Subnet.resolve(dealId) 33 | if subnet.success == false: 34 | Console.print(["Failed to resolve subnet: ", subnet.error]) 35 | 36 | services: *WorkerServices 37 | for w <- subnet.workers: 38 | if w.worker_id != nil: 39 | on w.worker_id! via w.host_id: 40 | -- get list of all services on this worker 41 | srvs <- Srv.list() 42 | 43 | -- gather aliases 44 | aliases: *string 45 | for s <- srvs: 46 | if s.aliases.length != 0: 47 | aliases <<- s.aliases[0] 48 | 49 | services <<- WorkerServices(host_id=w.host_id, worker_id=w.worker_id, services=?[aliases]) 50 | else: 51 | services <<- WorkerServices(host_id=w.host_id, worker_id=nil, services=nil) 52 | 53 | <- services 54 | 55 | 56 | -- local 57 | func helloWorld(name: string) -> string: 58 | <- Op.concat_strings("Hello, ", name) 59 | 60 | -- remote 61 | func helloWorldRemote(name: string) -> string: 62 | on HOST_PEER_ID: 63 | hello_msg <- helloWorld(name) 64 | from_msg <- Op.concat_strings(hello_msg, "! From ") 65 | from_peer_msg <- Op.concat_strings(from_msg, HOST_PEER_ID) 66 | <- from_peer_msg 67 | 68 | -- request response 69 | func getInfo() -> Info, PeerId: 70 | on HOST_PEER_ID: 71 | info <- Peer.identify() 72 | <- info, HOST_PEER_ID 73 | 74 | -- iterate through several peers 75 | func getInfos(peers: []PeerId) -> []Info: 76 | infos: *Info 77 | for p <- peers: 78 | on p: 79 | infos <- Peer.identify() 80 | <- infos 81 | -------------------------------------------------------------------------------- /test/config.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2023 Fluence Labs Limited 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import { promises as fs } from "fs"; 18 | 19 | export const CONFIG_PATH = "./gateway/configs/quickstart_config.json"; 20 | 21 | /** 22 | * Part of the config that is used for the integration tests. 23 | */ 24 | export interface GatewayConfig { 25 | providers: string[]; 26 | relay: string; 27 | port: number; 28 | mode: string; 29 | } 30 | 31 | export async function readConfig(): Promise { 32 | const file = await fs.readFile(CONFIG_PATH); 33 | return JSON.parse(file.toString()); 34 | } 35 | 36 | export async function updateConfig( 37 | update: Partial, 38 | ): Promise { 39 | const current = await readConfig(); 40 | const newConfig = { ...current, ...update }; 41 | await fs.writeFile(CONFIG_PATH, JSON.stringify(newConfig, null, 2)); 42 | return newConfig; 43 | } 44 | -------------------------------------------------------------------------------- /test/env.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2023 Fluence Labs Limited 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | function getOrFail(envVar: string): string { 18 | const value = process.env[envVar]; 19 | if (value === undefined) { 20 | throw new Error(`${envVar} is not set`); 21 | } 22 | 23 | return value; 24 | } 25 | 26 | export const FLUENCE_ENV = getOrFail("FLUENCE_ENV"); 27 | export const FLUENCE_CHAIN_PRIVATE_KEY = getOrFail("FLUENCE_CHAIN_PRIVATE_KEY"); 28 | export const RPC_PROVIDERS = getOrFail("RPC_PROVIDERS").split(","); 29 | -------------------------------------------------------------------------------- /test/fRPC.integration-test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2023 Fluence Labs Limited 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import { multiaddr } from "@multiformats/multiaddr"; 18 | 19 | import { FLUENCE_CHAIN_PRIVATE_KEY, FLUENCE_ENV, RPC_PROVIDERS } from "./env"; 20 | import { 21 | startGateway, 22 | fluence, 23 | subnet, 24 | backupFile, 25 | randomElement, 26 | } from "./utils"; 27 | import { updateConfig } from "./config"; 28 | 29 | function throwError(msg: string): never { 30 | throw new Error(msg); 31 | } 32 | 33 | /** 34 | * Start gateway and test requests to it 35 | * @param mode mode to start gateway in 36 | * @param times how many times to send request 37 | */ 38 | async function testGateway(mode?: string, times = 6) { 39 | const gateway = await startGateway(mode); 40 | try { 41 | for (let i = 0; i < times; ++i) { 42 | const id = 100 + i; 43 | const request = { 44 | jsonrpc: "2.0", 45 | method: "eth_blockNumber", 46 | params: [], 47 | id, 48 | }; 49 | const response = await gateway.request(request); 50 | expect(response).toMatchObject({ 51 | jsonrpc: "2.0", 52 | id, 53 | result: expect.any(String), 54 | }); 55 | } 56 | } finally { 57 | expect(gateway.stop()).toBeTruthy(); 58 | } 59 | } 60 | 61 | /** 62 | * Run fluence CLI with env and private key 63 | */ 64 | async function fluenceKeyEnv(...args: string[]) { 65 | return fluence( 66 | ...args, 67 | "--env", 68 | FLUENCE_ENV, 69 | "--priv-key", 70 | FLUENCE_CHAIN_PRIVATE_KEY, 71 | ); 72 | } 73 | 74 | /** 75 | * WARNING: This tests are not isolated 76 | * They modify fs state 77 | */ 78 | describe("fRPC", () => { 79 | /** 80 | * - Setup RPC providers 81 | */ 82 | beforeAll(async () => { 83 | await updateConfig({ 84 | providers: RPC_PROVIDERS, 85 | }); 86 | }); 87 | 88 | describe("quickstart", () => { 89 | [undefined, "random", "round-robin", "quorum"].forEach((mode) => { 90 | it(`should run ${mode ? `(mode: ${mode})` : ""}`, async () => { 91 | await testGateway(mode); 92 | }); 93 | }); 94 | }); 95 | 96 | /** 97 | * WARNING: This tests should be run in order 98 | * As gateway tests need to have deal deployed 99 | */ 100 | describe("deploy", () => { 101 | /** 102 | * - Setup relay 103 | * - Register provider and add peers only for local env 104 | */ 105 | beforeAll(async () => { 106 | const [getPeers, stderrPeers] = await fluence( 107 | "default", 108 | "peers", 109 | FLUENCE_ENV, 110 | ); 111 | 112 | const peers = getPeers 113 | .split("\n") 114 | .map((p) => p.trim()) 115 | .filter((p) => p.length > 0); 116 | 117 | if (peers.length === 0) { 118 | throw new Error(`Failed to get default peers: 119 | stdout: ${getPeers} 120 | stderr: ${stderrPeers}`); 121 | } 122 | 123 | const relay = randomElement(peers) ?? throwError("Empty peers"); 124 | 125 | await updateConfig({ relay }); 126 | 127 | if (FLUENCE_ENV !== "local") return; 128 | 129 | const [register, stderrReg] = await fluenceKeyEnv( 130 | "provider", 131 | "register", 132 | // TODO: Those values are moved 133 | // to provider config in newer cli version 134 | "--max-collateral", 135 | "100", 136 | "--price-per-epoch", 137 | "1", 138 | ); 139 | 140 | // Here CLI writes success to stdout 141 | if (!register.includes("Successfully")) { 142 | throw new Error(`Failed to register provider: 143 | stdout: ${register} 144 | stderr: ${stderrReg}`); 145 | } 146 | 147 | const providerPeers = peers 148 | .slice(0, RPC_PROVIDERS.length) 149 | .map((p) => multiaddr(p).getPeerId() ?? throwError("Empty peer id")); 150 | 151 | const [stdoutAdd, addPeers] = await fluenceKeyEnv( 152 | "provider", 153 | "add-peer", 154 | ...providerPeers.flatMap((id) => ["--peer-id", id]), 155 | "--compute-units", 156 | "1", 157 | ); 158 | 159 | // Here CLI writes results to stderr 160 | const added = addPeers.match(/Added/g)?.length ?? 0; 161 | if (added != 3) { 162 | throw new Error(`Failed to add peers: 163 | stdout: ${stdoutAdd} 164 | stderr: ${addPeers}`); 165 | } 166 | }); 167 | 168 | it("should deploy the deal", async () => { 169 | // Remove previous deployment info 170 | await backupFile(".fluence/workers.yaml"); 171 | 172 | const [stdout, stderr] = await fluenceKeyEnv( 173 | "deal", 174 | "deploy", 175 | // TODO: Those values are moved 176 | // to deals in fluence config in newer cli version 177 | "--collateral-per-worker", 178 | "1", 179 | "--max-workers-per-provider", 180 | "3", 181 | "--min-workers", 182 | "3", 183 | "--target-workers", 184 | "3", 185 | "--price-per-worker-epoch", 186 | "1", 187 | ); 188 | 189 | expect(stdout.includes("Success!")).toBeTruthy(); 190 | 191 | const workersMatch = stderr.match(/(\d+)\s*workers/); 192 | const workers = 193 | workersMatch?.[1] ?? throwError(`Failed to parse workers.`); 194 | const workersNum = parseInt(workers); 195 | 196 | expect(workersNum).toBeGreaterThanOrEqual(3); 197 | 198 | /** 199 | * Wait for workers to deploy 200 | */ 201 | const DEPLOY_TIMEOUT = 60_000; 202 | const deadline = Date.now() + DEPLOY_TIMEOUT; 203 | for (;;) { 204 | const workers = await subnet(FLUENCE_ENV); 205 | const deployed = workers.filter( 206 | (w) => w.worker_id !== undefined && w.services?.includes("eth_rpc"), 207 | ); 208 | if (deployed.length === workersNum) { 209 | break; 210 | } 211 | if (Date.now() > deadline) { 212 | throw new Error( 213 | `Deployment timeout: ${workersNum} workers expected, 214 | ${deployed.length} deployed. 215 | workers: ${JSON.stringify(workers)} 216 | `, 217 | ); 218 | } 219 | } 220 | }); 221 | 222 | ["random", "round-robin", "quorum"].forEach((mode) => { 223 | it(`should run ${mode ? `(mode: ${mode})` : ""}`, async () => { 224 | await testGateway(mode); 225 | }); 226 | }); 227 | }); 228 | }); 229 | -------------------------------------------------------------------------------- /test/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "compilerOptions": { 4 | "noEmit": true 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /test/utils.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2023 Fluence Labs Limited 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | import { relative } from "path"; 18 | import { promises as fs } from "fs"; 19 | import { ChildProcess, execFile } from "child_process"; 20 | 21 | import treeKill from "tree-kill"; 22 | 23 | import { CONFIG_PATH, readConfig, updateConfig } from "./config"; 24 | 25 | export async function backupFile(path: string): Promise { 26 | await fs.rename(path, `${path}.back`).catch((err) => { 27 | if (err.code !== "ENOENT") { 28 | throw err; 29 | } 30 | }); 31 | } 32 | 33 | export async function execute( 34 | cmd: string, 35 | ...args: string[] 36 | ): Promise<[string, string]> { 37 | return new Promise((resolve, reject) => { 38 | execFile(cmd, args, (error, stdout, stderr) => { 39 | if (error) { 40 | reject(error); 41 | } 42 | 43 | resolve([stdout, stderr]); 44 | }); 45 | }); 46 | } 47 | 48 | export async function fluence(...args: string[]): Promise<[string, string]> { 49 | return execute("fluence", ...args, "--no-input"); 50 | } 51 | 52 | interface Worker { 53 | host_id: string; 54 | worker_id: string | undefined; 55 | services: string[] | undefined; 56 | } 57 | 58 | /** 59 | * Run `showSubnet` function and return workers 60 | * @param env Fluence environment 61 | * @returns Subnet workers 62 | */ 63 | export async function subnet(env: string): Promise { 64 | const [stdout, _] = await fluence( 65 | "run", 66 | "-f", 67 | "showSubnet()", 68 | "-i", 69 | "src/aqua/main.aqua", 70 | "--env", 71 | env, 72 | ); 73 | 74 | return JSON.parse(stdout); 75 | } 76 | 77 | export class Gateway { 78 | private stdout: string = ""; 79 | private stderr: string = ""; 80 | 81 | constructor( 82 | private readonly gateway: ChildProcess, 83 | private readonly port: number, 84 | ) { 85 | gateway.stdout?.on("data", (data: any) => { 86 | this.stdout += data; 87 | }); 88 | gateway.stderr?.on("data", (data: any) => { 89 | this.stderr += data; 90 | }); 91 | } 92 | 93 | public async stop(): Promise { 94 | if (this.gateway.stdin) { 95 | this.gateway.stdin.end(); 96 | } 97 | if (this.gateway.stdout) { 98 | this.gateway.stdout.destroy(); 99 | } 100 | if (this.gateway.stderr) { 101 | this.gateway.stderr.destroy(); 102 | } 103 | if (this.gateway.pid) { 104 | const pid = this.gateway.pid; 105 | /** 106 | * For some reason JS is not able 107 | * to properly kill subprocess tree 108 | */ 109 | await new Promise((resolve, reject) => 110 | treeKill(pid, (err) => { 111 | if (err) { 112 | reject(err); 113 | } else { 114 | resolve(); 115 | } 116 | }), 117 | ); 118 | } 119 | return this.gateway.kill(); 120 | } 121 | 122 | public async request(json: any): Promise { 123 | const response = await fetch(`http://localhost:${this.port}`, { 124 | method: "POST", 125 | body: JSON.stringify(json), 126 | headers: { 127 | "Content-Type": "application/json", 128 | Accept: "application/json", 129 | }, 130 | }); 131 | 132 | return await response.json(); 133 | } 134 | 135 | public getStdout(): string { 136 | return this.stdout; 137 | } 138 | public getStderr(): string { 139 | return this.stderr; 140 | } 141 | } 142 | 143 | export async function startGateway(mode?: string): Promise { 144 | const GATEWAY_DIR = "./gateway"; 145 | const configPath = relative(GATEWAY_DIR, CONFIG_PATH); 146 | 147 | const config = await (mode ? updateConfig({ mode }) : readConfig()); 148 | const gateway = execFile("npm", [ 149 | "-C", 150 | GATEWAY_DIR, 151 | "run", 152 | "run", 153 | configPath, 154 | ]); 155 | 156 | const wrapper = new Gateway(gateway, config.port); 157 | 158 | await new Promise((resolve, reject) => { 159 | const timeout = setTimeout(() => { 160 | gateway.stdout?.removeListener("data", onData); 161 | gateway.stderr?.removeListener("data", onData); 162 | wrapper.stop(); 163 | reject(new Error(`Gateway failed to start in 10 seconds: ${output}`)); 164 | }, 10000); 165 | 166 | let output = ""; 167 | const onData = (data: string) => { 168 | output += data; 169 | if (output.includes("Server was started")) { 170 | gateway.stdout?.removeListener("data", onData); 171 | gateway.stderr?.removeListener("data", onData); 172 | clearTimeout(timeout); 173 | resolve(); 174 | } 175 | }; 176 | 177 | gateway.stdout?.on("data", onData); 178 | gateway.stderr?.on("data", onData); 179 | }); 180 | 181 | return wrapper; 182 | } 183 | 184 | export function randomElement(arr: T[]): T | undefined { 185 | return arr[Math.floor(Math.random() * arr.length)]; 186 | } 187 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/node18-strictest-esm/tsconfig.json", 3 | "compilerOptions": { 4 | "declaration": true, 5 | "outDir": "dist", 6 | "importsNotUsedAsValues": "remove", 7 | "verbatimModuleSyntax": true, 8 | "lib": ["es2023"] 9 | }, 10 | "exclude": ["gateway/**/*"], 11 | "include": ["test/**/*"], 12 | "ts-node": { 13 | "esm": true 14 | }, 15 | "types": ["node", "jest"] 16 | } 17 | -------------------------------------------------------------------------------- /wasm-modules/curl-adapter/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "curl_adapter" 3 | version = "0.1.0" 4 | authors = ["Fluence Labs"] 5 | edition = "2018" 6 | publish = false 7 | 8 | [[bin]] 9 | path = "src/main.rs" 10 | name = "curl_adapter" 11 | 12 | [dependencies] 13 | marine-rs-sdk = "=0.10.1" 14 | log = "=0.4.20" 15 | -------------------------------------------------------------------------------- /wasm-modules/curl-adapter/module.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=../../.fluence/schemas/module.json 2 | 3 | # Defines [Marine Module](https://fluence.dev/docs/build/concepts/#modules). For Fluence CLI, **module** - is a directory which contains this config and either a precompiled .wasm Marine module or a source code of the module written in Rust which can be compiled into a .wasm Marine module. You can use `fluence module new` command to generate a template for new module 4 | 5 | # Documentation: https://github.com/fluencelabs/fluence-cli/tree/main/docs/configs/module.md 6 | 7 | version: 0 8 | type: rust 9 | name: curl_adapter 10 | mountedBinaries: 11 | curl: /usr/bin/curl 12 | -------------------------------------------------------------------------------- /wasm-modules/curl-adapter/src/main.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2021 Fluence Labs Limited 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #![allow(improper_ctypes)] 18 | 19 | use marine_rs_sdk::{marine, MountedBinaryResult}; 20 | 21 | pub fn main() {} 22 | 23 | #[marine] 24 | pub fn curl_request(cmd: Vec) -> MountedBinaryResult { 25 | curl(cmd) 26 | } 27 | 28 | #[marine] 29 | #[link(wasm_import_module = "host")] 30 | extern "C" { 31 | fn curl(cmd: Vec) -> MountedBinaryResult; 32 | } 33 | -------------------------------------------------------------------------------- /wasm-modules/eth-rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eth_rpc" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["Fluence Labs"] 6 | publish = false 7 | 8 | [[bin]] 9 | name = "eth_rpc" 10 | path = "src/main.rs" 11 | 12 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 13 | 14 | [dependencies] 15 | web3 = { version = "=0.19.0", features = [], default-features = false } 16 | #async-std = "1.12.0" # async-std does not support wasm32-wasi 17 | serde_json = "=1.0.108" 18 | serde = "=1.0.193" 19 | jsonrpc-core = "=18.0.0" 20 | tokio = { version = "=1.34.0", default-features = false, features = ["rt"] } 21 | eyre = "=0.6.9" 22 | 23 | marine-rs-sdk = "=0.10.1" 24 | 25 | [dev-dependencies] 26 | marine-rs-sdk-test = "=0.11.1" 27 | -------------------------------------------------------------------------------- /wasm-modules/eth-rpc/module.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=../../.fluence/schemas/module.json 2 | 3 | # Defines [Marine Module](https://fluence.dev/docs/build/concepts/#modules). For Fluence CLI, **module** - is a directory which contains this config and either a precompiled .wasm Marine module or a source code of the module written in Rust which can be compiled into a .wasm Marine module. You can use `fluence module new` command to generate a template for new module 4 | 5 | # Documentation: https://github.com/fluencelabs/fluence-cli/tree/main/docs/configs/module.md 6 | 7 | version: 0 8 | type: rust 9 | name: eth_rpc 10 | -------------------------------------------------------------------------------- /wasm-modules/eth-rpc/src/curl_transport.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use std::sync::Arc; 3 | 4 | use jsonrpc_core::types::request::Call; 5 | use jsonrpc_core::Output; 6 | use serde_json::json; 7 | use serde_json::Value; 8 | use web3::futures::future::BoxFuture; 9 | use web3::{RequestId, Transport}; 10 | 11 | use crate::curl_request; 12 | 13 | pub type FutResult = BoxFuture<'static, web3::error::Result>; 14 | 15 | #[derive(Debug, Clone)] 16 | pub struct CurlTransport { 17 | pub uri: String, 18 | id: Arc, 19 | } 20 | impl CurlTransport { 21 | pub fn new(uri: String) -> Self { 22 | Self { 23 | uri, 24 | id: Arc::new(AtomicUsize::new(0)), 25 | } 26 | } 27 | 28 | pub fn next_id(&self) -> RequestId { 29 | self.id.fetch_add(1, Ordering::AcqRel) 30 | } 31 | } 32 | 33 | impl Transport for CurlTransport { 34 | type Out = FutResult; 35 | 36 | fn prepare(&self, method: &str, params: Vec) -> (RequestId, Call) { 37 | let id = self.next_id(); 38 | let request = web3::helpers::build_request(id, method, params.clone()); 39 | (id, request) 40 | } 41 | 42 | fn send(&self, _: RequestId, call: Call) -> Self::Out { 43 | if let Call::MethodCall(call) = call { 44 | /* 45 | curl --request POST \ 46 | --url $uri \ 47 | --header 'accept: application/json' \ 48 | --header 'content-type: application/json' \ 49 | --data ' 50 | { 51 | "id": 1, 52 | "jsonrpc": "2.0", 53 | "method": "eth_accounts" 54 | } 55 | ' 56 | */ 57 | let uri = self.uri.clone(); 58 | Box::pin(async move { 59 | let json = json!(call).to_string(); 60 | let args = vec![ 61 | "--request", 62 | "POST", 63 | "--url", 64 | &uri, 65 | "--header", 66 | "accept: application/json", 67 | "--header", 68 | "content-type: application/json", 69 | "--data", 70 | json.as_str(), 71 | ]; 72 | let args = args.into_iter().map(|s| s.to_string()).collect(); 73 | let response = curl_request(args); 74 | 75 | /* 76 | println!( 77 | "response is: \nstdout: {:?}\nstderr: {:?}", 78 | String::from_utf8(response.stdout.clone()), 79 | String::from_utf8(response.stderr.clone()) 80 | ); 81 | 82 | println!("slice: {:?}", serde_json::from_value::(serde_json::from_slice(response.stdout.as_slice())?)); 83 | */ 84 | 85 | // FIX: if there's a bad uri, the panic kicks in here. 86 | 87 | if response.ret_code != 0 { 88 | let stdout = String::from_utf8_lossy(&response.stdout); 89 | let error = if response.error.is_empty() { 90 | stdout.to_string() 91 | } else { 92 | format!("error: {}\nstdout: {stdout}", response.error) 93 | }; 94 | return Err(web3::error::Error::Transport( 95 | web3::error::TransportError::Message(format!("error: {}", error)), 96 | )); 97 | } 98 | 99 | let response = serde_json::from_slice(response.stdout.as_slice())?; 100 | let response: Output = serde_json::from_value(response)?; 101 | let result = match response { 102 | Output::Success(jsonrpc_core::types::Success { result, .. }) => result, 103 | 104 | // no sure if that's enough vs the complete jsonrpc error msg 105 | Output::Failure(jsonrpc_core::types::Failure { error, .. }) => { 106 | serde_json::to_value(error.message).unwrap() 107 | } /* 108 | Output::Failure(failure) => panic!( 109 | "JSON RPC response was a failure {}", 110 | json!(failure).to_string() 111 | ), 112 | */ 113 | /* 114 | Output::Failure(failure) => { 115 | let err = jsonrpc_core::types::error::Error.parse_error() 116 | } 117 | 118 | format!("JSON RPC response was a failure {}", 119 | json!(failure).to_string()), 120 | */ 121 | }; 122 | 123 | // println!("parsed result is {}", result.to_string()); 124 | Ok(result) 125 | }) 126 | } else { 127 | todo!() 128 | } 129 | // Box::pin(async { Ok(json!(["0x407d73d8a49eeb85d32cf465507dd71d507100c1"])) }) 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /wasm-modules/eth-rpc/src/eth_call.rs: -------------------------------------------------------------------------------- 1 | use eyre::eyre; 2 | use marine_rs_sdk::marine; 3 | use serde::{Deserialize, Serialize}; 4 | use serde_json::Value; 5 | use tokio::runtime::Builder; 6 | use web3::Transport; 7 | 8 | use crate::curl_transport::CurlTransport; 9 | use crate::values::JsonString; 10 | 11 | #[marine] 12 | pub fn eth_call(uri: String, method: &str, json_args: Vec) -> JsonString { 13 | let result: eyre::Result = try { 14 | let rt = Builder::new_current_thread().build()?; 15 | 16 | let args: Result, _> = 17 | json_args.iter().map(|a| serde_json::from_str(a)).collect(); 18 | let args = args.map_err(|err| { 19 | eyre!("Invalid arguments. Expected JSON serialized to string, got {json_args:?}: {err}") 20 | })?; 21 | let transport = CurlTransport::new(uri); 22 | let result = rt.block_on(transport.execute(method, args))?; 23 | 24 | result 25 | }; 26 | 27 | result.into() 28 | } 29 | 30 | #[marine] 31 | #[derive(Debug, Clone, Deserialize, Serialize)] 32 | pub struct RPCResult { 33 | provider_name: String, 34 | stdout: String, 35 | stderr: String, 36 | } 37 | 38 | pub fn eth_call_2(uri: String, method: &str, json_args: Vec) -> JsonString { 39 | let result: eyre::Result = try { 40 | let rt = Builder::new_current_thread().build()?; 41 | 42 | let args: Result, _> = json_args 43 | .into_iter() 44 | .map(|a| serde_json::from_str(&a)) 45 | .collect(); 46 | let transport = CurlTransport::new(uri); 47 | let result = rt.block_on(transport.execute(method, args?))?; 48 | 49 | result 50 | }; 51 | 52 | result.into() 53 | } 54 | 55 | #[cfg(test)] 56 | mod tests { 57 | use marine_rs_sdk_test::marine_test; 58 | 59 | #[marine_test( 60 | config_path = "../tests_artifacts/Config.toml", 61 | modules_dir = "../tests_artifacts" 62 | )] 63 | fn get_accounts_bad_uri(rpc: marine_test_env::eth_rpc::ModuleInterface) { 64 | let uri: String = "http://bad_uri.to".into(); 65 | let method: String = "eth_accounts".into(); 66 | let json_args: Vec = vec![]; 67 | 68 | let accounts = rpc.eth_call(uri, method, json_args); 69 | println!("bad uri call: {:?}", accounts); 70 | // println!("accounts: {:?}", accounts); 71 | // assert_eq!(accounts.len(), 0); 72 | } 73 | 74 | #[marine_test( 75 | config_path = "../tests_artifacts/Config.toml", 76 | modules_dir = "../tests_artifacts" 77 | )] 78 | fn get_accounts_bad_method(rpc: marine_test_env::eth_rpc::ModuleInterface) { 79 | let uri: String = std::fs::read_to_string("./infura_uri.txt").unwrap(); 80 | let method: String = "eth_getAccounts".into(); 81 | let json_args: Vec = vec![]; 82 | 83 | let accounts = rpc.eth_call(uri, method, json_args); 84 | println!("bad method: {:?}", accounts); 85 | } 86 | 87 | #[marine_test( 88 | config_path = "../tests_artifacts/Config.toml", 89 | modules_dir = "../tests_artifacts" 90 | )] 91 | fn get_accounts_good_uri(rpc: marine_test_env::eth_rpc::ModuleInterface) { 92 | let uri: String = std::fs::read_to_string("./infura_uri.txt").unwrap(); 93 | let method: String = "eth_accounts".into(); 94 | let json_args: Vec = vec![]; 95 | 96 | let accounts = rpc.eth_call(uri, method, json_args); 97 | println!("all good: {:?}", accounts); 98 | 99 | // println!("accounts: {:?}", accounts); 100 | // assert_eq!(accounts.len(), 0); 101 | } 102 | 103 | #[marine_test( 104 | config_path = "../tests_artifacts/Config.toml", 105 | modules_dir = "../tests_artifacts" 106 | )] 107 | fn get_transaction(rpc: marine_test_env::eth_rpc::ModuleInterface) { 108 | use serde_json::json; 109 | 110 | let uri: String = todo!("put Goerli ETH RPC URL here"); 111 | let method: String = "eth_getTransactionByHash".into(); 112 | let json_args: Vec = 113 | vec![ 114 | json!("0x3ffaa16b93ef90b9385b6f6a140d8297c43b6551bf8e8b804d9eecff7bc1509f") 115 | .to_string(), 116 | ]; 117 | 118 | let result = rpc.eth_call(uri.clone(), method.clone(), json_args); 119 | assert!(result.success, "{}", result.error); 120 | assert_eq!(result.value, "null", "{}", result.value); 121 | 122 | let json_args: Vec = 123 | vec!["0x3ffaa16b93ef90b9385b6f6a140d8297c43b6551bf8e8b804d9eecff7bc1509f".into()]; 124 | 125 | let result = rpc.eth_call(uri, method, json_args); 126 | assert!(!result.success); 127 | assert!( 128 | result 129 | .error 130 | .starts_with("Invalid arguments. Expected JSON serialized to string"), 131 | "{}", 132 | result.error 133 | ); 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /wasm-modules/eth-rpc/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(try_blocks)] 2 | 3 | use marine_rs_sdk::module_manifest; 4 | use marine_rs_sdk::{marine, MountedBinaryResult}; 5 | use tokio::runtime::Builder; 6 | use web3::api::Eth; 7 | use web3::helpers::CallFuture; 8 | use web3::types::Address; 9 | use web3::Web3; 10 | 11 | use crate::curl_transport::{CurlTransport, FutResult}; 12 | 13 | pub mod curl_transport; 14 | pub mod eth_call; 15 | pub mod typed; 16 | pub mod values; 17 | 18 | module_manifest!(); 19 | 20 | pub fn main() {} 21 | 22 | // #[tokio::main(flavor = "current_thread")] 23 | // flavor idea comes from https://github.com/rjzak/tokio-echo-test/blob/main/src/main.rs#L42 24 | // but seems to require additional tokio futures 25 | pub fn get_accounts(uri: String) -> web3::error::Result>> { 26 | let rt = Builder::new_current_thread().build()?; 27 | 28 | let web3 = Web3::new(CurlTransport::new(uri)); 29 | 30 | let eth = web3.eth(); 31 | println!("Calling accounts."); 32 | let accounts: CallFuture, FutResult> = eth.accounts(); 33 | let accounts: web3::Result> = rt.block_on(accounts); 34 | println!("Accounts: {:?}", accounts); 35 | 36 | Ok(accounts? 37 | .into_iter() 38 | .map(|a: Address| a.as_bytes().to_vec()) 39 | .collect()) 40 | } 41 | 42 | pub fn web3_call< 43 | Out: serde::de::DeserializeOwned, 44 | F: FnOnce(Eth) -> CallFuture, 45 | >( 46 | uri: String, 47 | call: F, 48 | ) -> web3::error::Result { 49 | let rt = Builder::new_current_thread() 50 | .build() 51 | .expect("error starting tokio runtime"); 52 | 53 | let web3 = Web3::new(CurlTransport::new(uri)); 54 | 55 | let result: CallFuture = call(web3.eth()); 56 | let result: web3::error::Result = rt.block_on(result); 57 | 58 | result 59 | } 60 | 61 | #[marine] 62 | pub fn call_get_accounts(uri: String) -> Vec> { 63 | get_accounts(uri).expect("error calling main") 64 | } 65 | 66 | #[marine] 67 | #[link(wasm_import_module = "curl_adapter")] 68 | extern "C" { 69 | pub fn curl_request(cmd: Vec) -> MountedBinaryResult; 70 | } 71 | 72 | #[cfg(test)] 73 | mod tests { 74 | use marine_rs_sdk_test::marine_test; 75 | 76 | #[marine_test( 77 | config_path = "../tests_artifacts/Config.toml", 78 | modules_dir = "../tests_artifacts" 79 | )] 80 | fn get_accounts_generic(rpc: marine_test_env::eth_rpc::ModuleInterface) { 81 | let uri: String = std::fs::read_to_string("./infura_uri.txt").unwrap(); 82 | let method: String = "eth_accounts".into(); 83 | let json_args: Vec = vec![]; 84 | 85 | let accounts = rpc.eth_call(uri, method, json_args); 86 | println!("accounts: {:?}", accounts); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /wasm-modules/eth-rpc/src/typed.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | use marine_rs_sdk::marine; 4 | use web3::types::{BlockId, BlockNumber, Bytes, CallRequest}; 5 | 6 | use crate::values::{BytesValue, JsonString, U64Value}; 7 | use crate::web3_call; 8 | 9 | /// Get list of available accounts. 10 | #[marine] 11 | pub fn accounts(uri: String) -> Vec { 12 | web3_call(uri, |w| w.accounts()) 13 | .into_iter() 14 | .map(|a| { 15 | let json = serde_json::to_value(&a).map_err(eyre::Report::from); 16 | JsonString::from(json) 17 | }) 18 | .collect() 19 | } 20 | 21 | /// Get current block number 22 | #[marine] 23 | pub fn block_number(uri: String) -> U64Value { 24 | web3_call(uri, |w| w.block_number()).into() 25 | } 26 | 27 | /// Call a constant method of contract without changing the state of the blockchain. 28 | #[marine] 29 | pub fn call(uri: String, req: String, block: u64) -> BytesValue { 30 | let result: eyre::Result = try { 31 | let req: CallRequest = serde_json::from_str(&req)?; 32 | web3_call(uri, move |w| { 33 | w.call( 34 | req, 35 | Some(BlockId::Number(BlockNumber::Number(block.into()))), 36 | ) 37 | })? 38 | }; 39 | 40 | result.into() 41 | } 42 | 43 | /// Get coinbase address 44 | // #[marine] 45 | pub fn coinbase(uri: String) -> String { 46 | todo!() 47 | } 48 | 49 | /// Compile LLL 50 | // #[marine] 51 | pub fn compile_lll(uri: String, code: String) -> Vec { 52 | todo!() 53 | } 54 | 55 | /// Compile Solidity 56 | // #[marine] 57 | pub fn compile_solidity(uri: String, code: String) -> Vec { 58 | todo!() 59 | } 60 | 61 | /// Compile Serpent 62 | // #[marine] 63 | pub fn compile_serpent(uri: String, code: String) -> Vec { 64 | todo!() 65 | } 66 | 67 | /// Call a contract without changing the state of the blockchain to estimate gas usage. 68 | // #[marine] 69 | pub fn estimate_gas(uri: String, req: String, block: String) -> String { 70 | todo!() 71 | } 72 | 73 | /// Get current recommended gas price 74 | // #[marine] 75 | pub fn gas_price(uri: String) -> String { 76 | todo!() 77 | } 78 | 79 | /// Returns a collection of historical gas information. This can be used for evaluating the max_fee_per_gas 80 | /// and max_priority_fee_per_gas to send the future transactions. 81 | // #[marine] 82 | pub fn fee_history( 83 | uri: String, 84 | block_count: String, 85 | newest_block: String, 86 | reward_percentiles: Vec, 87 | ) -> String { 88 | todo!() 89 | } 90 | 91 | /// Get balance of given address 92 | // #[marine] 93 | pub fn balance(uri: String, address: String, block: String) -> String { 94 | todo!() 95 | } 96 | 97 | /// Get all logs matching a given filter object 98 | // #[marine] 99 | pub fn logs(uri: String, filter: String) -> Vec { 100 | todo!() 101 | } 102 | 103 | /// Get block details with transaction hashes. 104 | // #[marine] 105 | pub fn block(uri: String, block: String) -> String { 106 | todo!() 107 | } 108 | 109 | /// Get block details with full transaction objects. 110 | // #[marine] 111 | pub fn block_with_txs(uri: String, block: String) -> String { 112 | todo!() 113 | } 114 | 115 | /// Get number of transactions in block 116 | // #[marine] 117 | pub fn block_transaction_count(uri: String, block: String) -> String { 118 | todo!() 119 | } 120 | 121 | /// Get code under given address 122 | // #[marine] 123 | pub fn code(uri: String, address: String, block: String) -> Vec { 124 | todo!() 125 | } 126 | 127 | /// Get supported compilers 128 | // #[marine] 129 | pub fn compilers(uri: String) -> Vec { 130 | todo!() 131 | } 132 | 133 | /// Get chain id 134 | // #[marine] 135 | pub fn chain_id(uri: String) -> String { 136 | todo!() 137 | } 138 | 139 | /// Get available user accounts. This method is only available in the browser. With MetaMask, 140 | /// this will cause the popup that prompts the user to allow or deny access to their accounts 141 | /// to your app. 142 | // #[marine] 143 | pub fn request_accounts(uri: String) -> Vec { 144 | todo!() 145 | } 146 | 147 | /// Get storage entry 148 | // #[marine] 149 | pub fn storage(uri: String, address: String, idx: String, block: String) -> String { 150 | todo!() 151 | } 152 | 153 | /// Get nonce 154 | // #[marine] 155 | pub fn transaction_count(uri: String, address: String, block: String) -> String { 156 | todo!() 157 | } 158 | 159 | /// Get transaction 160 | // #[marine] 161 | pub fn transaction(uri: String, id: String) -> String { 162 | todo!() 163 | } 164 | 165 | /// Get transaction receipt 166 | // #[marine] 167 | pub fn transaction_receipt(uri: String, hash: String) -> String { 168 | todo!() 169 | } 170 | 171 | /// Get uncle header by block ID and uncle index. 172 | /// 173 | /// This method is meant for TurboGeth compatiblity, 174 | /// which is missing transaction hashes in the response. 175 | // #[marine] 176 | pub fn uncle_header(uri: String, block: String, index: String) -> String { 177 | todo!() 178 | } 179 | 180 | /// Get uncle by block ID and uncle index -- transactions only has hashes. 181 | // #[marine] 182 | pub fn uncle(uri: String, block: String, index: String) -> String { 183 | todo!() 184 | } 185 | 186 | /// Get uncle count in block 187 | // #[marine] 188 | pub fn uncle_count(uri: String, block: String) -> String { 189 | todo!() 190 | } 191 | 192 | /// Get work package 193 | // #[marine] 194 | pub fn work(uri: String) -> String { 195 | todo!() 196 | } 197 | 198 | /// Get hash rate 199 | // #[marine] 200 | pub fn hashrate(uri: String) -> String { 201 | todo!() 202 | } 203 | 204 | /// Get mining status 205 | // #[marine] 206 | pub fn mining(uri: String) -> bool { 207 | todo!() 208 | } 209 | 210 | /// Start new block filter 211 | // #[marine] 212 | pub fn new_block_filter(uri: String) -> String { 213 | todo!() 214 | } 215 | 216 | /// Start new pending transaction filter 217 | // #[marine] 218 | pub fn new_pending_transaction_filter(uri: String) -> String { 219 | todo!() 220 | } 221 | 222 | /// Start new pending transaction filter 223 | // #[marine] 224 | pub fn protocol_version(uri: String) -> String { 225 | todo!() 226 | } 227 | 228 | /// Sends a rlp-encoded signed transaction 229 | // #[marine] 230 | pub fn send_raw_transaction(uri: String, rlp: String) -> String { 231 | todo!() 232 | } 233 | 234 | /// Sends a transaction transaction 235 | // #[marine] 236 | pub fn send_transaction(uri: String, tx: String) -> String { 237 | todo!() 238 | } 239 | 240 | /// Signs a hash of given data 241 | // #[marine] 242 | pub fn sign(uri: String, address: String, data: String) -> String { 243 | todo!() 244 | } 245 | 246 | /// Submit hashrate of external miner 247 | // #[marine] 248 | pub fn submit_hashrate(uri: String, rate: String, id: String) -> bool { 249 | todo!() 250 | } 251 | 252 | /// Submit work of external miner 253 | // #[marine] 254 | pub fn submit_work(uri: String, nonce: String, pow_hash: String, mix_hash: String) -> bool { 255 | todo!() 256 | } 257 | 258 | /// Get syncing status 259 | // #[marine] 260 | pub fn syncing(uri: String) -> String { 261 | todo!() 262 | } 263 | 264 | /// Returns the account- and storage-values of the specified account including the Merkle-proof. 265 | // #[marine] 266 | pub fn proof(uri: String, address: String, keys: String, block: String) -> String { 267 | todo!() 268 | } 269 | -------------------------------------------------------------------------------- /wasm-modules/eth-rpc/src/values.rs: -------------------------------------------------------------------------------- 1 | use marine_rs_sdk::marine; 2 | use serde_json::Value; 3 | use web3::types::Bytes; 4 | use web3::types::U64; 5 | 6 | #[marine] 7 | pub struct JsonString { 8 | pub value: String, 9 | pub success: bool, 10 | pub error: String, 11 | } 12 | 13 | impl From> for JsonString { 14 | fn from(value: eyre::Result) -> Self { 15 | match value { 16 | Ok(value) => JsonString { 17 | value: value.to_string(), 18 | success: true, 19 | error: String::new(), 20 | }, 21 | Err(err) => JsonString { 22 | value: String::new(), 23 | success: false, 24 | error: format!("{}\n{:?}", err, err), 25 | }, 26 | } 27 | } 28 | } 29 | 30 | #[marine] 31 | pub struct U64Value { 32 | pub value: u64, 33 | pub success: bool, 34 | pub error: String, 35 | } 36 | 37 | impl From> for U64Value { 38 | fn from(value: web3::error::Result) -> Self { 39 | match value { 40 | Ok(value) => U64Value { 41 | value: value.as_u64(), 42 | success: true, 43 | error: String::new(), 44 | }, 45 | Err(err) => U64Value { 46 | value: u64::default(), 47 | success: false, 48 | error: format!("{}\n{:?}", err, err), 49 | }, 50 | } 51 | } 52 | } 53 | 54 | #[marine] 55 | pub struct BytesValue { 56 | pub value: Vec, 57 | pub success: bool, 58 | pub error: String, 59 | } 60 | 61 | impl From> for BytesValue { 62 | fn from(value: eyre::Result) -> Self { 63 | match value { 64 | Ok(value) => BytesValue { 65 | value: value.0, 66 | success: true, 67 | error: String::new(), 68 | }, 69 | Err(err) => BytesValue { 70 | value: vec![], 71 | success: false, 72 | error: format!("{}\n{:?}", err, err), 73 | }, 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /wasm-modules/service.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=../.fluence/schemas/service.json 2 | 3 | # Defines a [Marine service](https://fluence.dev/docs/build/concepts/#services), most importantly the modules that the service consists of. For Fluence CLI, **service** - is a directory which contains this config. You can use `fluence service new` command to generate a template for new service 4 | 5 | # Documentation: https://github.com/fluencelabs/fluence-cli/tree/main/docs/configs/service.md 6 | 7 | version: 0 8 | name: eth_rpc 9 | modules: 10 | facade: 11 | get: eth-rpc 12 | curl-adapter: 13 | get: curl-adapter 14 | --------------------------------------------------------------------------------