├── .github └── workflows │ └── release.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── LICENSE.confura ├── Makefile ├── README.md ├── cmd ├── data_context.go ├── nm.go ├── root.go ├── rpc.go ├── sync.go ├── test │ ├── cfx.go │ ├── eth.go │ ├── root.go │ └── ws.go └── util │ └── shutdown.go ├── config ├── config.go ├── config.yml └── version.go ├── docker-compose.yml ├── go.mod ├── go.sum ├── main.go ├── node ├── cfxclient.go ├── client.go ├── config.go ├── ethclient.go ├── factory.go ├── manager.go ├── manager_monitor.go ├── node.go ├── node_status.go ├── repartition.go ├── repartition_redis.go ├── router.go └── server.go ├── rpc ├── apis.go ├── cache │ ├── cache_cfx.go │ ├── cache_eth.go │ ├── expiry_cache.go │ ├── expiry_cache_test.go │ └── status.go ├── cfx_api.go ├── cfx_api_pubsub.go ├── cfxbridge │ ├── cfx_api.go │ ├── constants.go │ ├── convert.go │ ├── convert_trace.go │ ├── trace_api.go │ ├── trace_builder.go │ ├── txpool_api.go │ └── types.go ├── error.go ├── eth_api.go ├── eth_api_pubsub.go ├── eth_pubsub.go ├── eth_trace_api.go ├── ethbridge │ ├── convert.go │ └── convert_test.go ├── gastation_api.go ├── handler │ ├── cfx_logs.go │ ├── cfx_logs_pruned.go │ ├── cfx_store.go │ ├── eth_logs.go │ ├── eth_store.go │ └── gasstation.go ├── metrics.go ├── net_api.go ├── parity_api.go ├── pos_api.go ├── pubsub.go ├── pubsub_test.go ├── server.go ├── server_middleware.go ├── throttle │ └── ref_count.go ├── trace_api.go ├── txpool_api.go └── web3_api.go ├── store ├── epoch_data.go ├── eth_data.go ├── ext_data.go ├── glue_data.go ├── log.go ├── log_filter.go ├── mysql │ ├── common_partition.go │ ├── config.go │ ├── store.go │ ├── store_block.go │ ├── store_common.go │ ├── store_common_partition.go │ ├── store_common_partition_bn.go │ ├── store_conf.go │ ├── store_contract.go │ ├── store_log.go │ ├── store_log_addr.go │ ├── store_log_big_contract.go │ ├── store_log_filter.go │ ├── store_map_epoch_block.go │ ├── store_pruner.go │ ├── store_ratelimit.go │ ├── store_tx.go │ └── store_user.go ├── redis │ ├── models.go │ └── store.go ├── store.go ├── types.go └── variadic_value.go ├── sync ├── catchup │ ├── benchmark.go │ ├── config.go │ ├── syncer.go │ └── worker.go ├── epoch_pivot.go ├── epoch_pivot_test.go ├── epoch_sub.go ├── epoch_window.go ├── epoch_window_test.go ├── prune.go ├── sync_check.go ├── sync_db.go ├── sync_db_test.go ├── sync_eth.go └── sync_kv.go ├── test ├── epoch_validate.go ├── eth_validate.go ├── pubsub_validate.go └── rpc_test.go ├── types ├── gastation.go └── range.go └── util ├── alert ├── alert_test.go ├── dingtalk.go ├── logrus_hook.go └── logrus_hook_test.go ├── blacklist └── contract_addr.go ├── blockchain.go ├── blockchain_test.go ├── encoding.go ├── gasstation ├── estimater.go └── types.go ├── lru.go ├── map.go ├── math.go ├── metrics ├── block.go ├── epoch.go ├── metrics.go ├── percentage.go ├── percentage_time_window.go ├── report.go ├── service │ ├── metrics.go │ ├── registry.go │ ├── server.go │ └── updater.go ├── timer_updater.go └── util.go ├── rate ├── limit.go ├── registry.go ├── registry_reload.go └── strategy.go ├── relay └── relay.go ├── rpc ├── client_cfx.go ├── client_eth.go ├── client_middlewares.go ├── config.go ├── handlers │ ├── handler.go │ ├── ip.go │ └── rate_limit.go ├── middlewares │ ├── billing.go │ ├── log.go │ ├── metrics.go │ ├── rate_limit.go │ └── recover.go └── server.go ├── types.go └── whitelist └── debugger_whitelist.go /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Build and publish docker image 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*.*.*' 7 | 8 | env: 9 | AWS_REGION: us-west-2 10 | 11 | jobs: 12 | build: 13 | name: Clone, Build, Publish 14 | runs-on: ubuntu-latest 15 | steps: 16 | 17 | - name: Check out repository 18 | uses: actions/checkout@v2 19 | 20 | - name: Set up QEMU 21 | uses: docker/setup-qemu-action@v2 22 | 23 | - name: Set up Docker Buildx 24 | uses: docker/setup-buildx-action@v2 25 | 26 | - name: Login to Dockerhub 27 | uses: docker/login-action@v2 28 | with: 29 | username: ${{ secrets.DOCKERHUB_USERNAME }} 30 | password: ${{ secrets.DOCKERHUB_TOKEN }} 31 | 32 | # build and push to aws ecr 33 | - name: Configure AWS credentials 34 | uses: aws-actions/configure-aws-credentials@v4 35 | with: 36 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 37 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 38 | aws-region: ${{ env.AWS_REGION }} 39 | 40 | - name: Login to Amazon ECR 41 | id: login-ecr 42 | uses: aws-actions/amazon-ecr-login@v2 43 | 44 | - name: Build image 45 | id: build 46 | env: 47 | ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }} 48 | ECR_REPOSITORY: rpc-gateway 49 | IMAGE_TAG: ${{ github.ref_name }} 50 | uses: docker/build-push-action@v3 51 | with: 52 | platforms: linux/amd64,linux/arm64 53 | push: true 54 | tags: | 55 | ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.ECR_REPOSITORY }}:${{ env.IMAGE_TAG }} 56 | ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.ECR_REPOSITORY }}:latest 57 | ${{ env.ECR_REGISTRY }}/${{ env.ECR_REPOSITORY }}:${{ env.IMAGE_TAG }} 58 | ${{ env.ECR_REGISTRY }}/${{ env.ECR_REPOSITORY }}:latest 59 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | 17 | # Build binary 18 | ./bin 19 | confura -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # builder image 2 | FROM golang:1.16-alpine AS builder 3 | RUN apk --no-cache add build-base 4 | RUN mkdir /build 5 | WORKDIR /build 6 | COPY go.mod go.sum ./ 7 | # you may use `GOPROXY` to speedup in Mainland China. 8 | # RUN GOPROXY=https://goproxy.cn,direct go mod download 9 | RUN go mod download 10 | COPY . . 11 | RUN go build -o confura . 12 | 13 | # final target image for multi-stage builds 14 | FROM alpine:3.16 15 | RUN apk --no-cache add ca-certificates 16 | COPY --from=builder /build/confura . 17 | COPY ./config/config.yml ./config.yml 18 | ENTRYPOINT [ "./confura" ] 19 | CMD [ "--help" ] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Scroll Foundation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /LICENSE.confura: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Conflux Network Foundation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # The output binary name 2 | BINARY=bin/confura 3 | 4 | # Values to pass for VERSION and BUILD etc., 5 | # eg., git tag 1.0.1 then git commit -am "One more change after the tags" 6 | ifndef VERSION 7 | VERSION=`git describe --tags` # Shall we need --exact-match? 8 | endif 9 | 10 | BUILD_DATE=`date +%FT%T%z` 11 | GIT_COMMIT=`git rev-parse HEAD` 12 | 13 | # Setup the -ldflags option for go build here, interpolate the variable values 14 | PKG=github.com/scroll-tech/rpc-gateway/config 15 | LDFLAGS=-ldflags "-w -s -X ${PKG}.Version=${VERSION} -X ${PKG}.BuildDate=${BUILD_DATE} -X ${PKG}.GitCommit=${GIT_COMMIT}" 16 | 17 | # Build the project 18 | build: 19 | go build ${LDFLAGS} -o ${BINARY} 20 | 21 | # Install project: copy binaries 22 | install: 23 | go install ${LDFLAGS} 24 | 25 | # Clean project: delete binaries 26 | clean: 27 | @if [ -f ${BINARY} ] ; then rm ${BINARY} ; fi 28 | 29 | .PHONY: build clean install -------------------------------------------------------------------------------- /cmd/data_context.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 5 | "github.com/openweb3/web3go" 6 | "github.com/scroll-tech/rpc-gateway/store" 7 | "github.com/scroll-tech/rpc-gateway/store/mysql" 8 | "github.com/scroll-tech/rpc-gateway/store/redis" 9 | "github.com/scroll-tech/rpc-gateway/util/rpc" 10 | ) 11 | 12 | // storeContext context to hold store instances 13 | type storeContext struct { 14 | cfxDB *mysql.MysqlStore 15 | ethDB *mysql.MysqlStore 16 | cfxCache *redis.RedisStore 17 | } 18 | 19 | func mustInitStoreContext() storeContext { 20 | var ctx storeContext 21 | 22 | // prepare core space db store 23 | if config := mysql.MustNewConfigFromViper(); config.Enabled { 24 | ctx.cfxDB = config.MustOpenOrCreate(mysql.StoreOption{ 25 | Disabler: store.StoreConfig(), 26 | }) 27 | } 28 | 29 | // prepare evm space db store 30 | if ethConfig := mysql.MustNewEthStoreConfigFromViper(); ethConfig.Enabled { 31 | ctx.ethDB = ethConfig.MustOpenOrCreate(mysql.StoreOption{ 32 | Disabler: store.EthStoreConfig(), 33 | }) 34 | } 35 | 36 | // prepare redis store 37 | if redis, ok := redis.MustNewRedisStoreFromViper(store.StoreConfig()); ok { 38 | ctx.cfxCache = redis 39 | } 40 | 41 | return ctx 42 | } 43 | 44 | func (ctx *storeContext) Close() { 45 | if ctx.cfxDB != nil { 46 | ctx.cfxDB.Close() 47 | } 48 | 49 | if ctx.ethDB != nil { 50 | ctx.ethDB.Close() 51 | } 52 | 53 | if ctx.cfxCache != nil { 54 | ctx.cfxCache.Close() 55 | } 56 | } 57 | 58 | // syncContext context to hold sdk clients for blockchain interoperation. 59 | type syncContext struct { 60 | storeContext 61 | 62 | syncCfx *sdk.Client 63 | subCfx *sdk.Client 64 | syncEth *web3go.Client 65 | } 66 | 67 | func mustInitSyncContext(storeCtx storeContext) syncContext { 68 | sc := syncContext{storeContext: storeCtx} 69 | 70 | if storeCtx.cfxDB != nil || storeCtx.cfxCache != nil { 71 | sc.syncCfx = rpc.MustNewCfxClientFromViper(rpc.WithClientHookMetrics(true)) 72 | sc.subCfx = rpc.MustNewCfxWsClientFromViper() 73 | } 74 | 75 | if storeCtx.ethDB != nil { 76 | sc.syncEth = rpc.MustNewEthClientFromViper(rpc.WithClientHookMetrics(true)) 77 | } 78 | 79 | return sc 80 | } 81 | 82 | func (ctx *syncContext) Close() { 83 | // Usually, storeContext will be defer closed by itself 84 | // ctx.storeContext.Close() 85 | if ctx.syncCfx != nil { 86 | ctx.syncCfx.Close() 87 | } 88 | 89 | if ctx.subCfx != nil { 90 | ctx.subCfx.Close() 91 | } 92 | 93 | // not provided yet! 94 | // ctx.syncEth.Close() 95 | } 96 | -------------------------------------------------------------------------------- /cmd/nm.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/scroll-tech/rpc-gateway/cmd/util" 8 | "github.com/scroll-tech/rpc-gateway/node" 9 | "github.com/scroll-tech/rpc-gateway/util/rpc" 10 | "github.com/sirupsen/logrus" 11 | "github.com/spf13/cobra" 12 | ) 13 | 14 | var ( 15 | // node management boot options 16 | nmOpt struct { 17 | cfxEnabled bool 18 | ethEnabled bool 19 | } 20 | 21 | nmCmd = &cobra.Command{ 22 | Use: "nm", 23 | Short: "Start node management service, including core space and evm space node managers", 24 | Run: startNodeManagerService, 25 | } 26 | ) 27 | 28 | func init() { 29 | // boot flag for core space 30 | nmCmd.Flags().BoolVar( 31 | &nmOpt.cfxEnabled, "cfx", false, "start core space node manager server", 32 | ) 33 | 34 | // boot flag for evm space 35 | nmCmd.Flags().BoolVar( 36 | &nmOpt.ethEnabled, "eth", false, "start evm space node manager server", 37 | ) 38 | 39 | rootCmd.AddCommand(nmCmd) 40 | } 41 | 42 | func startNodeManagerService(*cobra.Command, []string) { 43 | if !nmOpt.cfxEnabled && !nmOpt.ethEnabled { 44 | logrus.Fatal("No node mananger server specified") 45 | } 46 | 47 | ctx, cancel := context.WithCancel(context.Background()) 48 | var wg sync.WaitGroup 49 | 50 | if nmOpt.cfxEnabled { 51 | startNativeSpaceNodeServer(ctx, &wg) 52 | } 53 | 54 | if nmOpt.ethEnabled { 55 | startEvmSpaceNodeServer(ctx, &wg) 56 | } 57 | 58 | util.GracefulShutdown(&wg, cancel) 59 | } 60 | 61 | func startNativeSpaceNodeServer(ctx context.Context, wg *sync.WaitGroup) { 62 | server, endpoint := node.Factory().CreatRpcServer() 63 | go server.MustServeGraceful(ctx, wg, endpoint, rpc.ProtocolHttp) 64 | } 65 | 66 | func startEvmSpaceNodeServer(ctx context.Context, wg *sync.WaitGroup) { 67 | server, endpoint := node.EthFactory().CreatRpcServer() 68 | go server.MustServeGraceful(ctx, wg, endpoint, rpc.ProtocolHttp) 69 | } 70 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "sync" 8 | 9 | "github.com/scroll-tech/rpc-gateway/cmd/test" 10 | "github.com/scroll-tech/rpc-gateway/cmd/util" 11 | "github.com/scroll-tech/rpc-gateway/config" 12 | "github.com/sirupsen/logrus" 13 | "github.com/spf13/cobra" 14 | ) 15 | 16 | var ( 17 | flagVersion bool 18 | nodeServerEnabled bool 19 | rpcServerEnabled bool 20 | syncServerEnabled bool 21 | 22 | rootCmd = &cobra.Command{ 23 | Use: "confura", 24 | Short: "Ethereum Infura like Public RPC Service on Conflux Network.", 25 | Run: start, 26 | } 27 | ) 28 | 29 | func init() { 30 | // print version and exit 31 | rootCmd.Flags().BoolVarP( 32 | &flagVersion, "version", "v", false, "If true, print version and exit", 33 | ) 34 | 35 | // boot flag for node management service 36 | rootCmd.Flags().BoolVar( 37 | &nodeServerEnabled, "nm", false, "whether to start node management service", 38 | ) 39 | 40 | // boot flag for public RPC service 41 | rootCmd.Flags().BoolVar( 42 | &rpcServerEnabled, "rpc", false, "whether to start Confura public RPC service", 43 | ) 44 | 45 | // boot flag for sync service (accompanied with prune) 46 | rootCmd.Flags().BoolVar( 47 | &syncServerEnabled, "sync", false, "whether to start data sync/prune service", 48 | ) 49 | 50 | rootCmd.AddCommand(test.Cmd) 51 | } 52 | 53 | func start(cmd *cobra.Command, args []string) { 54 | // dump version 55 | if flagVersion { 56 | config.DumpVersionInfo() 57 | return 58 | } 59 | 60 | if !nodeServerEnabled && !rpcServerEnabled && !syncServerEnabled { 61 | logrus.Fatal("No services started") 62 | } 63 | 64 | ctx, cancel := context.WithCancel(context.Background()) 65 | wg := &sync.WaitGroup{} 66 | 67 | storeCtx := mustInitStoreContext() 68 | defer storeCtx.Close() 69 | 70 | if syncServerEnabled { // start sync 71 | syncCtx := mustInitSyncContext(storeCtx) 72 | defer syncCtx.Close() 73 | 74 | startSyncServiceAdaptively(ctx, wg, syncCtx) 75 | } 76 | 77 | if rpcServerEnabled { // start RPC 78 | startNativeSpaceRpcServer(ctx, wg, storeCtx) 79 | startEvmSpaceRpcServer(ctx, wg, storeCtx) 80 | startNativeSpaceBridgeRpcServer(ctx, wg) 81 | startDebugSpaceRpcServer(ctx, wg) 82 | } 83 | 84 | if nodeServerEnabled { // start node management 85 | startNativeSpaceNodeServer(ctx, wg) 86 | startEvmSpaceNodeServer(ctx, wg) 87 | } 88 | 89 | util.GracefulShutdown(wg, cancel) 90 | } 91 | 92 | // Execute is the command line entrypoint. 93 | func Execute() { 94 | if err := rootCmd.Execute(); err != nil { 95 | fmt.Println(err) 96 | os.Exit(1) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /cmd/test/cfx.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "math" 5 | "time" 6 | 7 | "github.com/scroll-tech/rpc-gateway/cmd/util" 8 | "github.com/scroll-tech/rpc-gateway/test" 9 | "github.com/sirupsen/logrus" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | var ( 14 | // core space epoch data validation configuration 15 | validConf test.EVConfig 16 | 17 | testCmd = &cobra.Command{ 18 | Use: "cfx", 19 | Short: "validate if epoch data from core space JSON-RPC proxy complies with fullnode", 20 | Run: startTest, 21 | } 22 | ) 23 | 24 | func init() { 25 | // fullnode endpoint URL 26 | testCmd.Flags().StringVarP( 27 | &validConf.FullnodeRpcEndpoint, 28 | "fn-endpoint", "f", "", "fullnode rpc endpoint used as benchmark", 29 | ) 30 | testCmd.MarkFlagRequired("fn-endpoint") 31 | 32 | // confura RPC endpoint 33 | testCmd.Flags().StringVarP( 34 | &validConf.InfuraRpcEndpoint, 35 | "infura-endpoint", "u", "", "infura rpc endpoint to be validated against", 36 | ) 37 | testCmd.MarkFlagRequired("infura-endpoint") 38 | 39 | // start point of epoch 40 | testCmd.Flags().Uint64VarP( 41 | &validConf.EpochScanFrom, 42 | "start-epoch", "e", math.MaxUint64, "the epoch from where scan validation will start", 43 | ) 44 | 45 | // scan interval 46 | testCmd.Flags().DurationVarP( 47 | &validConf.ScanInterval, 48 | "scan-interval", "c", 1*time.Second, "the interval for each scan validation", 49 | ) 50 | 51 | // sampling interval 52 | testCmd.Flags().DurationVarP( 53 | &validConf.SamplingInterval, 54 | "sampling-interval", "a", 10*time.Second, "the interval for each sampling validation", 55 | ) 56 | 57 | // sampling epoch type 58 | testCmd.Flags().StringVarP( 59 | &validConf.SamplingEpochType, 60 | "sampling-epoch-type", "t", "lf", `sampling epoch type: 61 | lm(latest_mined)/ls(latest_state)/lc(latest_confirmed)/lf(latest_finalized)/lcp(latest_checkpoint) 62 | `, 63 | ) 64 | 65 | Cmd.AddCommand(testCmd) 66 | } 67 | 68 | func startTest(cmd *cobra.Command, args []string) { 69 | if len(validConf.FullnodeRpcEndpoint) == 0 || len(validConf.InfuraRpcEndpoint) == 0 { 70 | logrus.Fatal("Fullnode && infura rpc endpoint must be configured for epoch data test/validation") 71 | } 72 | 73 | logrus.Info("Starting epoch data validator...") 74 | 75 | validator := test.MustNewEpochValidator(&validConf) 76 | defer validator.Destroy() 77 | 78 | util.StartAndGracefulShutdown(validator.Run) 79 | } 80 | -------------------------------------------------------------------------------- /cmd/test/eth.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "math" 5 | "time" 6 | 7 | "github.com/scroll-tech/rpc-gateway/cmd/util" 8 | "github.com/scroll-tech/rpc-gateway/test" 9 | "github.com/sirupsen/logrus" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | var ( 14 | // evm space block data validation configuration 15 | ethValidConf test.EthValidConfig 16 | 17 | ethTestCmd = &cobra.Command{ 18 | Use: "eth", 19 | Short: "validate if epoch data from evm space JSON-RPC proxy complies with fullnode", 20 | Run: startEthTest, 21 | } 22 | ) 23 | 24 | func init() { 25 | // fullnode endpoint 26 | ethTestCmd.Flags().StringVarP( 27 | ðValidConf.FullnodeRpcEndpoint, 28 | "fn-endpoint", "f", "", "fullnode rpc endpoint used as benchmark", 29 | ) 30 | ethTestCmd.MarkFlagRequired("fn-endpoint") 31 | 32 | // confura RPC endpoint 33 | ethTestCmd.Flags().StringVarP( 34 | ðValidConf.InfuraRpcEndpoint, 35 | "infura-endpoint", "u", "", "infura rpc endpoint to be validated against", 36 | ) 37 | ethTestCmd.MarkFlagRequired("infura-endpoint") 38 | 39 | // start point of block 40 | ethTestCmd.Flags().Uint64VarP( 41 | ðValidConf.ScanFromBlock, 42 | "start-block", "b", math.MaxUint64, "the block from where scan validation will start", 43 | ) 44 | 45 | // scan interval 46 | ethTestCmd.Flags().DurationVarP( 47 | ðValidConf.ScanInterval, 48 | "scan-interval", "c", 1*time.Second, "the interval for each scan validation", 49 | ) 50 | 51 | // sampling interval 52 | ethTestCmd.Flags().DurationVarP( 53 | ðValidConf.SamplingInterval, 54 | "sampling-interval", "a", 15*time.Second, "the interval for each sampling validation", 55 | ) 56 | 57 | Cmd.AddCommand(ethTestCmd) 58 | } 59 | 60 | func startEthTest(cmd *cobra.Command, args []string) { 61 | if len(ethValidConf.FullnodeRpcEndpoint) == 0 || len(ethValidConf.InfuraRpcEndpoint) == 0 { 62 | logrus.Fatal("Fullnode && infura rpc endpoint must be configured for ETH data test/validation") 63 | } 64 | 65 | logrus.Info("Starting ETH data validator...") 66 | 67 | validator := test.MustNewEthValidator(ðValidConf) 68 | defer validator.Destroy() 69 | 70 | util.StartAndGracefulShutdown(validator.Run) 71 | } 72 | -------------------------------------------------------------------------------- /cmd/test/root.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | var Cmd = &cobra.Command{ 8 | Use: "test", 9 | Short: "Start data validity test for JSON-RPC and Pub/Sub proxy including core space and evm space", 10 | Run: func(cmd *cobra.Command, args []string) { 11 | cmd.Help() 12 | }, 13 | } 14 | -------------------------------------------------------------------------------- /cmd/test/ws.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "github.com/scroll-tech/rpc-gateway/cmd/util" 5 | "github.com/scroll-tech/rpc-gateway/test" 6 | "github.com/sirupsen/logrus" 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var ( 11 | // core space Pub/Sub validation configuration 12 | psValidConf test.PSVConfig 13 | 14 | wsTestCmd = &cobra.Command{ 15 | Use: "ws", 16 | Short: "validate if epoch data from core space websocket Pub/Sub proxy complies with fullnode", 17 | Run: startWSTest, 18 | } 19 | ) 20 | 21 | func init() { 22 | // fullnode endpoint 23 | wsTestCmd.Flags().StringVarP( 24 | &psValidConf.FullnodeRpcEndpoint, 25 | "fn-endpoint", "f", "", "fullnode rpc endpoint used as benchmark", 26 | ) 27 | wsTestCmd.MarkFlagRequired("fn-endpoint") 28 | 29 | // confura RPC endpoint 30 | wsTestCmd.Flags().StringVarP( 31 | &psValidConf.InfuraRpcEndpoint, 32 | "infura-endpoint", "u", "", "infura rpc endpoint to be validated against", 33 | ) 34 | wsTestCmd.MarkFlagRequired("infura-endpoint") 35 | 36 | Cmd.AddCommand(wsTestCmd) 37 | } 38 | 39 | func startWSTest(cmd *cobra.Command, args []string) { 40 | if len(psValidConf.FullnodeRpcEndpoint) == 0 || len(psValidConf.InfuraRpcEndpoint) == 0 { 41 | logrus.Fatal("Fullnode && infura websocket rpc endpoint must be configured for pubsub test/validation") 42 | } 43 | 44 | logrus.Info("Starting websocket pubsub validator...") 45 | 46 | validator := test.MustNewPubSubValidator(&psValidConf) 47 | defer validator.Destroy() 48 | 49 | util.StartAndGracefulShutdown(validator.Run) 50 | } 51 | -------------------------------------------------------------------------------- /cmd/util/shutdown.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "os/signal" 7 | "sync" 8 | "syscall" 9 | 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | // GracefulShutdown supports to clean up goroutines after termination signal captured. 14 | func GracefulShutdown(wg *sync.WaitGroup, cancel context.CancelFunc) { 15 | // Handle sigterm and await termChan signal 16 | termChan := make(chan os.Signal, 1) 17 | signal.Notify(termChan, syscall.SIGTERM, syscall.SIGINT) 18 | 19 | // Wait for SIGTERM to be captured 20 | <-termChan 21 | logrus.Info("SIGTERM/SIGINT received, shutdown process initiated") 22 | 23 | // Cancel to notify active goroutines to clean up. 24 | cancel() 25 | 26 | logrus.Info("Waiting for shutdown...") 27 | wg.Wait() 28 | 29 | logrus.Info("Shutdown gracefully") 30 | } 31 | 32 | // StartAndGracefulShutdown starts to run the specified task in a goroutine and wait for termination 33 | // signal to shutdown gracefully. 34 | // 35 | // Note, this method is not suitable for any non-blocking task that release resources in defer way. 36 | func StartAndGracefulShutdown(run func(ctx context.Context, wg *sync.WaitGroup)) { 37 | ctx, cancel := context.WithCancel(context.Background()) 38 | var wg sync.WaitGroup 39 | 40 | if run != nil { 41 | go run(ctx, &wg) 42 | } 43 | 44 | GracefulShutdown(&wg, cancel) 45 | } 46 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/Conflux-Chain/go-conflux-util/viper" 7 | "github.com/ethereum/go-ethereum/log" 8 | "github.com/pkg/errors" 9 | "github.com/scroll-tech/rpc-gateway/util/alert" 10 | "github.com/scroll-tech/rpc-gateway/util/metrics" 11 | "github.com/sirupsen/logrus" 12 | 13 | // For go-ethereum v1.0.15, node pkg imports internal/debug pkg which will inits log root 14 | // with `log.GlogHandler`. If we import node pkg from somewhere else, it will override our 15 | // custom handler defined within function `adaptGethLogger`. 16 | _ "github.com/ethereum/go-ethereum/node" 17 | ) 18 | 19 | // Read system enviroment variables prefixed with "INFURA". 20 | // eg., `INFURA_LOG_LEVEL` will override "log.level" config item from the config file. 21 | const viperEnvPrefix = "infura" 22 | 23 | func init() { 24 | // init viper 25 | viper.MustInit(viperEnvPrefix) 26 | // init logger 27 | initLogger() 28 | // init metrics 29 | metrics.Init() 30 | // init alert 31 | alert.InitDingRobot() 32 | } 33 | 34 | func initLogger() { 35 | var config struct { 36 | Level string `default:"info"` 37 | ForceColor bool 38 | } 39 | viper.MustUnmarshalKey("log", &config) 40 | 41 | // set log level 42 | level, err := logrus.ParseLevel(config.Level) 43 | if err != nil { 44 | logrus.WithError(err).Fatalf("invalid log level configured: %v", config.Level) 45 | } 46 | logrus.SetLevel(level) 47 | 48 | if config.ForceColor { 49 | logrus.SetFormatter(&logrus.TextFormatter{ 50 | ForceColors: true, 51 | FullTimestamp: true, 52 | }) 53 | } 54 | 55 | // add alert hook for logrus fatal/warn/error level 56 | hookLevels := []logrus.Level{logrus.FatalLevel, logrus.WarnLevel, logrus.ErrorLevel} 57 | logrus.AddHook(alert.NewLogrusAlertHook(hookLevels)) 58 | 59 | // customize logger here... 60 | adaptGethLogger() 61 | } 62 | 63 | // adaptGethLogger adapt geth logger (which is used by go sdk) to be attached to logrus. 64 | func adaptGethLogger() { 65 | formatter := log.TerminalFormat(false) 66 | 67 | // geth level => logrus levl 68 | logrusLevelsMap := map[log.Lvl]logrus.Level{ 69 | log.LvlCrit: logrus.FatalLevel, 70 | log.LvlError: logrus.ErrorLevel, 71 | log.LvlWarn: logrus.DebugLevel, 72 | log.LvlInfo: logrus.DebugLevel, 73 | log.LvlDebug: logrus.DebugLevel, 74 | log.LvlTrace: logrus.TraceLevel, 75 | } 76 | 77 | log.Root().SetHandler(log.FuncHandler(func(r *log.Record) error { 78 | logLvl, ok := logrusLevelsMap[r.Lvl] 79 | if !ok { 80 | return errors.New("unsupported log level") 81 | } 82 | 83 | if logLvl <= logrus.GetLevel() { 84 | logStr := string(formatter.Format(r)) 85 | abbrStr := logStr 86 | 87 | firstLineEnd := strings.IndexRune(logStr, '\n') 88 | if firstLineEnd > 0 { // extract first line as abstract 89 | abbrStr = logStr[:firstLineEnd] 90 | } 91 | 92 | logrus.WithField("gethWrappedLogs", logStr).Log(logLvl, abbrStr) 93 | } 94 | 95 | return nil 96 | })) 97 | } 98 | -------------------------------------------------------------------------------- /config/version.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "runtime" 5 | 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | var ( 10 | Version string 11 | GitCommit string 12 | 13 | BuildDate string 14 | BuildOS string 15 | BuildArch string 16 | ) 17 | 18 | func init() { 19 | BuildOS = runtime.GOOS 20 | BuildArch = runtime.GOARCH 21 | } 22 | 23 | func DumpVersionInfo() { 24 | strFormat := "%-12v%v\n" 25 | 26 | logrus.Infof(strFormat, "Version:", Version) 27 | logrus.Infof(strFormat, "Git Commit:", GitCommit) 28 | logrus.Infof(strFormat, "Build OS:", BuildOS) 29 | logrus.Infof(strFormat, "Build Arch:", BuildArch) 30 | logrus.Infof(strFormat, "Build Date:", BuildDate) 31 | } 32 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | services: 4 | # node management 5 | node-management: 6 | build: . 7 | image: conflux/confura:latest 8 | command: nm --cfx 9 | ports: 10 | - 22530:22530 11 | restart: unless-stopped 12 | environment: 13 | - INFURA_LOG_LEVEL=debug 14 | container_name: confura-node-management 15 | 16 | ethnode-management: 17 | image: conflux/confura:latest 18 | command: nm --eth 19 | ports: 20 | - 28530:28530 21 | restart: unless-stopped 22 | environment: 23 | - INFURA_LOG_LEVEL=debug 24 | container_name: confura-ethnode-management 25 | 26 | # blockchain sync 27 | chain-sync: 28 | image: conflux/confura:latest 29 | command: sync --db --kv 30 | restart: unless-stopped 31 | depends_on: 32 | - db 33 | - redis 34 | environment: 35 | - INFURA_LOG_LEVEL=debug 36 | - INFURA_STORE_MYSQL_ENABLED=true 37 | - INFURA_STORE_MYSQL_DSN=root:root@tcp(db:3306)/confura_cfx?parseTime=true 38 | - INFURA_STORE_REDIS_ENABLED=true 39 | - INFURA_STORE_REDIS_URL=redis://redis:6379/0 40 | container_name: confura-sync 41 | 42 | ethchain-sync: 43 | image: conflux/confura:latest 44 | command: sync --eth 45 | restart: unless-stopped 46 | depends_on: 47 | - db 48 | environment: 49 | - INFURA_LOG_LEVEL=debug 50 | - INFURA_ETHSTORE_MYSQL_ENABLED=true 51 | - INFURA_ETHSTORE_MYSQL_DSN=root:root@tcp(db:3306)/confura_eth?parseTime=true 52 | - INFURA_SYNC_ETH_FROMBLOCK=36935000 53 | container_name: confura-ethsync 54 | 55 | # rpc proxy 56 | rpc-proxy: 57 | image: conflux/confura:latest 58 | command: rpc --cfx 59 | ports: 60 | - "22537:22537" 61 | restart: unless-stopped 62 | depends_on: 63 | - db 64 | - redis 65 | - node-management 66 | environment: 67 | - INFURA_LOG_LEVEL=debug 68 | - INFURA_STORE_MYSQL_ENABLED=true 69 | - INFURA_STORE_MYSQL_DSN=root:root@tcp(db:3306)/confura_cfx?parseTime=true 70 | - INFURA_STORE_REDIS_ENABLED=true 71 | - INFURA_STORE_REDIS_URL=redis://redis:6379/0 72 | - INFURA_NODE_ROUTER_NODERPCURL=http://node-management:22530 73 | container_name: confura-rpc 74 | 75 | ethrpc-proxy: 76 | image: conflux/confura:latest 77 | command: rpc --eth 78 | ports: 79 | - "28545:28545" 80 | restart: unless-stopped 81 | depends_on: 82 | - db 83 | - ethnode-management 84 | environment: 85 | - INFURA_LOG_LEVEL=debug 86 | - INFURA_STORE_MYSQL_ENABLED=true 87 | - INFURA_STORE_MYSQL_DSN=root:root@tcp(db:3306)/confura_eth?parseTime=true 88 | - INFURA_NODE_ROUTER_ETHNODERPCURL=http://ethnode-management:28530 89 | container_name: confura-ethrpc 90 | 91 | # data validator 92 | data-validator: 93 | image: conflux/confura:latest 94 | command: test cfx -f http://test.confluxrpc.com -u http://rpc-proxy:22537 95 | restart: on-failure:3 96 | volumes: 97 | - validator-data:/root 98 | depends_on: 99 | - rpc-proxy 100 | - chain-sync 101 | environment: 102 | - INFURA_LOG_LEVEL=debug 103 | container_name: confura-data-validator 104 | 105 | ethdata-validator: 106 | image: conflux/confura:latest 107 | command: test eth -f http://evmtestnet.confluxrpc.com -u http://ethrpc-proxy:28545 -b 36935000 108 | restart: on-failure:3 109 | volumes: 110 | - validator-data:/root 111 | depends_on: 112 | - ethrpc-proxy 113 | - ethchain-sync 114 | environment: 115 | - INFURA_LOG_LEVEL=debug 116 | container_name: confura-ethdata-validator 117 | 118 | # middlewares 119 | db: 120 | image: mysql:5.7 121 | ports: 122 | - "3306" 123 | restart: unless-stopped 124 | environment: 125 | - MYSQL_ROOT_PASSWORD=root 126 | volumes: 127 | - db-data:/var/lib/mysql 128 | container_name: confura-database 129 | 130 | redis: 131 | image: redis:6.2 132 | ports: 133 | - "6379" 134 | restart: unless-stopped 135 | volumes: 136 | - redis-data:/data 137 | container_name: confura-redis 138 | 139 | # others 140 | networks: 141 | default: 142 | name: confura-internal 143 | 144 | volumes: 145 | db-data: {} 146 | redis-data: {} 147 | validator-data: {} -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/scroll-tech/rpc-gateway 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/Conflux-Chain/go-conflux-sdk v1.4.2 7 | github.com/Conflux-Chain/go-conflux-util v0.0.0-20220907035343-2d1233bccd70 8 | github.com/Conflux-Chain/web3pay-service v0.0.0-20220915034912-b5c10ef3163a 9 | github.com/buraksezer/consistent v0.9.0 10 | github.com/cespare/xxhash v1.1.0 11 | github.com/ethereum/go-ethereum v1.10.15 12 | github.com/go-redis/redis/v8 v8.8.2 13 | github.com/go-sql-driver/mysql v1.6.0 14 | github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d 15 | github.com/montanaflynn/stats v0.6.6 16 | github.com/openweb3/go-rpc-provider v0.2.9 17 | github.com/openweb3/web3go v0.2.0 18 | github.com/patrickmn/go-cache v2.1.0+incompatible 19 | github.com/pkg/errors v0.9.1 20 | github.com/royeo/dingrobot v1.0.1-0.20191230075228-c90a788ca8fd 21 | github.com/sirupsen/logrus v1.8.1 22 | github.com/spf13/cobra v1.5.0 23 | github.com/spf13/viper v1.10.0 24 | github.com/stretchr/testify v1.7.0 25 | github.com/zealws/golang-ring v0.0.0-20210116075443-7c86fdb43134 26 | go.uber.org/multierr v1.6.0 27 | golang.org/x/time v0.0.0-20220411224347-583f2d630306 28 | gorm.io/driver/mysql v1.3.6 29 | gorm.io/gorm v1.23.8 30 | ) 31 | 32 | // for debugging development 33 | // replace github.com/Conflux-Chain/go-conflux-sdk => ../go-conflux-sdk 34 | // replace github.com/Conflux-Chain/web3pay-service => ../web3pay-service 35 | 36 | replace github.com/openweb3/web3go => github.com/scroll-tech/web3go v0.0.0-20240705130914-989891731ea2 37 | 38 | replace github.com/openweb3/go-rpc-provider => github.com/scroll-tech/go-rpc-provider v0.0.0-20230619123848-99cc70301fd1 39 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | // ensure viper based configuration initialized at the very beginning 5 | _ "github.com/scroll-tech/rpc-gateway/config" 6 | 7 | "github.com/scroll-tech/rpc-gateway/cmd" 8 | ) 9 | 10 | func main() { 11 | cmd.Execute() 12 | } 13 | -------------------------------------------------------------------------------- /node/cfxclient.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "context" 5 | 6 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 7 | "github.com/scroll-tech/rpc-gateway/util/rpc" 8 | ) 9 | 10 | // CfxClientProvider provides core space client by router. 11 | type CfxClientProvider struct { 12 | *clientProvider 13 | } 14 | 15 | func NewCfxClientProvider(router Router) *CfxClientProvider { 16 | cp := &CfxClientProvider{ 17 | clientProvider: newClientProvider(router, func(url string) (interface{}, error) { 18 | return rpc.NewCfxClient(url, rpc.WithClientHookMetrics(true)) 19 | }), 20 | } 21 | 22 | for grp := range urlCfg { 23 | cp.registerGroup(grp) 24 | } 25 | 26 | return cp 27 | } 28 | 29 | // GetClientByIP gets client of normal HTTP group by remote IP address. 30 | func (p *CfxClientProvider) GetClientByIP(ctx context.Context) (sdk.ClientOperator, error) { 31 | return p.GetClientByIPGroup(ctx, GroupCfxHttp) 32 | } 33 | 34 | // GetClientByIPGroup gets client of specific group by remote IP address. 35 | func (p *CfxClientProvider) GetClientByIPGroup(ctx context.Context, group Group) (sdk.ClientOperator, error) { 36 | remoteAddr := remoteAddrFromContext(ctx) 37 | 38 | client, err := p.getClient(remoteAddr, group) 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | return client.(sdk.ClientOperator), nil 44 | } 45 | -------------------------------------------------------------------------------- /node/client.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/scroll-tech/rpc-gateway/util" 9 | "github.com/scroll-tech/rpc-gateway/util/rpc" 10 | "github.com/scroll-tech/rpc-gateway/util/rpc/handlers" 11 | "github.com/sirupsen/logrus" 12 | ) 13 | 14 | var ( 15 | ErrClientUnavailable = errors.New("no full node available") 16 | ) 17 | 18 | // clientFactory factory method to create RPC client for fullnode proxy. 19 | type clientFactory func(url string) (interface{}, error) 20 | 21 | // clientProvider provides different RPC client based on request IP to achieve load balance 22 | // or with node group for resource isolation. Generally, it is used by RPC server to delegate 23 | // RPC requests to full node cluster. 24 | type clientProvider struct { 25 | router Router 26 | factory clientFactory 27 | mutex sync.Mutex 28 | 29 | // group => node name => RPC client 30 | clients map[Group]*util.ConcurrentMap 31 | } 32 | 33 | func newClientProvider(router Router, factory clientFactory) *clientProvider { 34 | return &clientProvider{ 35 | router: router, 36 | factory: factory, 37 | clients: make(map[Group]*util.ConcurrentMap), 38 | } 39 | } 40 | 41 | // registerGroup registers node group 42 | func (p *clientProvider) registerGroup(group Group) *util.ConcurrentMap { 43 | if _, ok := p.clients[group]; !ok { 44 | p.mutex.Lock() 45 | defer p.mutex.Unlock() 46 | 47 | if _, ok := p.clients[group]; !ok { // double check 48 | p.clients[group] = &util.ConcurrentMap{} 49 | } 50 | } 51 | 52 | return p.clients[group] 53 | } 54 | 55 | // getClient gets client based on keyword and node group type. 56 | func (p *clientProvider) getClient(key string, group Group) (interface{}, error) { 57 | clients, ok := p.clients[group] 58 | if !ok { 59 | return nil, errors.Errorf("Unknown node group %v", group) 60 | } 61 | 62 | url := p.router.Route(group, []byte(key)) 63 | 64 | logger := logrus.WithFields(logrus.Fields{ 65 | "key": key, 66 | "group": group, 67 | }) 68 | 69 | if len(url) == 0 { 70 | logger.WithError(ErrClientUnavailable).Error("Failed to get full node client from provider") 71 | return nil, ErrClientUnavailable 72 | } 73 | 74 | nodeName := rpc.Url2NodeName(url) 75 | 76 | logger = logger.WithFields(logrus.Fields{ 77 | "node": nodeName, 78 | "url": url, 79 | }) 80 | logger.Trace("Route RPC requests") 81 | 82 | client, loaded, err := clients.LoadOrStoreFnErr(nodeName, func(interface{}) (interface{}, error) { 83 | // TODO improvements required 84 | // 1. Necessary retry? (but longer timeout). Better to let user side to decide. 85 | // 2. Different metrics for different full nodes. 86 | return p.factory(url) 87 | }) 88 | 89 | if err != nil { 90 | err := errors.WithMessage(err, "bad full node connection") 91 | logger.WithError(err).Error("Failed to get full node client from provider") 92 | 93 | return nil, err 94 | } 95 | 96 | if !loaded { 97 | logger.Info("Succeeded to connect to full node") 98 | } 99 | 100 | return client, nil 101 | } 102 | 103 | func remoteAddrFromContext(ctx context.Context) string { 104 | if ip, ok := handlers.GetIPAddressFromContext(ctx); ok { 105 | return ip 106 | } 107 | 108 | return "unknown_ip" 109 | } 110 | -------------------------------------------------------------------------------- /node/config.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/Conflux-Chain/go-conflux-util/viper" 7 | "github.com/buraksezer/consistent" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | // Node manager component always uses configuration from viper. 12 | 13 | var cfg config 14 | var urlCfg map[Group]UrlConfig 15 | var ethUrlCfg map[Group]UrlConfig 16 | 17 | func init() { 18 | viper.MustUnmarshalKey("node", &cfg) 19 | logrus.WithField("config", cfg).Debug("Node manager configurations loaded.") 20 | 21 | urlCfg = map[Group]UrlConfig{ 22 | GroupCfxHttp: { 23 | Nodes: cfg.URLs, 24 | Failover: cfg.Router.ChainedFailover.URL, 25 | }, 26 | GroupCfxWs: { 27 | Nodes: cfg.WSURLs, 28 | Failover: cfg.Router.ChainedFailover.WSURL, 29 | }, 30 | GroupCfxArchives: { 31 | Nodes: cfg.ArchiveNodes, 32 | }, 33 | GroupCfxLogs: { 34 | Nodes: cfg.LogNodes, 35 | }, 36 | } 37 | 38 | ethUrlCfg = map[Group]UrlConfig{ 39 | GroupEthHttp: { 40 | Nodes: cfg.EthURLs, 41 | Failover: cfg.Router.ChainedFailover.EthURL, 42 | }, 43 | GroupEthWs: { 44 | Nodes: cfg.EthWSURLs, 45 | Failover: cfg.Router.ChainedFailover.EthWSURL, 46 | }, 47 | GroupEthLogs: { 48 | Nodes: cfg.EthLogNodes, 49 | }, 50 | GroupDebugHttp: { 51 | Nodes: cfg.DebugURLs, 52 | }, 53 | } 54 | 55 | } 56 | 57 | type config struct { 58 | Endpoint string `default:":22530"` 59 | EthEndpoint string `default:":28530"` 60 | URLs []string 61 | EthURLs []string 62 | DebugURLs []string 63 | WSURLs []string 64 | EthWSURLs []string 65 | LogNodes []string 66 | EthLogNodes []string 67 | ArchiveNodes []string 68 | HashRing struct { 69 | PartitionCount int `default:"15739"` 70 | ReplicationFactor int `default:"51"` 71 | Load float64 `default:"1.25"` 72 | } 73 | Monitor struct { 74 | Interval time.Duration `default:"1s"` 75 | Unhealth struct { 76 | Failures uint64 `default:"3"` 77 | EpochsFallBehind uint64 `default:"30"` 78 | LatencyPercentile float64 `default:"0.9"` 79 | MaxLatency time.Duration `default:"3s"` 80 | } 81 | Recover struct { 82 | RemindInterval time.Duration `default:"5m"` 83 | SuccessCounter uint64 `default:"60"` 84 | } 85 | } 86 | Router struct { 87 | RedisURL string 88 | NodeRPCURL string 89 | EthNodeRPCURL string 90 | ChainedFailover struct { 91 | URL string 92 | WSURL string 93 | EthURL string 94 | EthWSURL string 95 | } 96 | } 97 | } 98 | 99 | func (c *config) HashRingRaw() consistent.Config { 100 | return consistent.Config{ 101 | PartitionCount: c.HashRing.PartitionCount, 102 | ReplicationFactor: c.HashRing.ReplicationFactor, 103 | Load: c.HashRing.Load, 104 | Hasher: &hasher{}, 105 | } 106 | } 107 | 108 | type UrlConfig struct { 109 | Nodes []string 110 | Failover string 111 | } 112 | -------------------------------------------------------------------------------- /node/ethclient.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand" 7 | 8 | "github.com/openweb3/web3go" 9 | "github.com/scroll-tech/rpc-gateway/util/rpc" 10 | ) 11 | 12 | type Web3goClient struct { 13 | *web3go.Client 14 | 15 | URL string 16 | } 17 | 18 | // EthClientProvider provides evm space client by router. 19 | type EthClientProvider struct { 20 | *clientProvider 21 | } 22 | 23 | func NewEthClientProvider(router Router) *EthClientProvider { 24 | cp := &EthClientProvider{ 25 | clientProvider: newClientProvider(router, func(url string) (interface{}, error) { 26 | client, err := rpc.NewEthClient(url, rpc.WithClientHookMetrics(true)) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | return &Web3goClient{client, url}, nil 32 | }), 33 | } 34 | 35 | for grp := range ethUrlCfg { 36 | cp.registerGroup(grp) 37 | } 38 | 39 | return cp 40 | } 41 | 42 | // GetClientByIP gets client of normal HTTP group by remote IP address. 43 | func (p *EthClientProvider) GetClientByIP(ctx context.Context) (*Web3goClient, error) { 44 | return p.GetClientByIPGroup(ctx, GroupEthHttp) 45 | } 46 | 47 | // GetClientByIPGroup gets client of specific group by remote IP address. 48 | func (p *EthClientProvider) GetClientByIPGroup(ctx context.Context, group Group) (*Web3goClient, error) { 49 | remoteAddr := remoteAddrFromContext(ctx) 50 | client, err := p.getClient(remoteAddr, group) 51 | 52 | return client.(*Web3goClient), err 53 | } 54 | 55 | func (p *EthClientProvider) GetClientRandom() (*Web3goClient, error) { 56 | return p.GetClientRandomByGroup(GroupEthHttp); 57 | } 58 | 59 | func (p *EthClientProvider) GetClientRandomByGroup(group Group) (*Web3goClient, error) { 60 | key := fmt.Sprintf("random_key_%v", rand.Int()) 61 | client, err := p.getClient(key, group) 62 | 63 | return client.(*Web3goClient), err 64 | } 65 | -------------------------------------------------------------------------------- /node/factory.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/scroll-tech/rpc-gateway/util/rpc" 7 | ) 8 | 9 | var ( 10 | cfxFactory *factory 11 | cfxOnce sync.Once 12 | 13 | ethFactory *factory 14 | ethOnce sync.Once 15 | ) 16 | 17 | // Factory returns core space instance factory 18 | func Factory() *factory { 19 | cfxOnce.Do(func() { 20 | cfxFactory = newFactory( 21 | func(group Group, name, url string, hm HealthMonitor) (Node, error) { 22 | return NewCfxNode(group, name, url, hm), nil 23 | }, 24 | cfg.Endpoint, urlCfg, cfg.Router.NodeRPCURL, 25 | ) 26 | }) 27 | 28 | return cfxFactory 29 | } 30 | 31 | // EthFactory returns evm space instance factory 32 | func EthFactory() *factory { 33 | ethOnce.Do(func() { 34 | ethFactory = newFactory( 35 | func(group Group, name, url string, hm HealthMonitor) (Node, error) { 36 | return NewEthNode(group, name, url, hm), nil 37 | }, 38 | cfg.EthEndpoint, ethUrlCfg, cfg.Router.EthNodeRPCURL, 39 | ) 40 | }) 41 | 42 | return ethFactory 43 | } 44 | 45 | // factory creates router and RPC server. 46 | type factory struct { 47 | nodeRpcUrl string 48 | rpcSrvEndpoint string 49 | groupConf map[Group]UrlConfig 50 | nodeFactory nodeFactory 51 | } 52 | 53 | func newFactory(nf nodeFactory, rpcSrvEndpoint string, groupConf map[Group]UrlConfig, nodeRpcUrl string) *factory { 54 | return &factory{ 55 | nodeRpcUrl: nodeRpcUrl, 56 | nodeFactory: nf, 57 | rpcSrvEndpoint: rpcSrvEndpoint, 58 | groupConf: groupConf, 59 | } 60 | } 61 | 62 | // CreatRpcServer creates node manager RPC server 63 | func (f *factory) CreatRpcServer() (*rpc.Server, string) { 64 | return NewServer(f.nodeFactory, f.groupConf), f.rpcSrvEndpoint 65 | } 66 | 67 | // CreateRouter creates node router 68 | func (f *factory) CreateRouter() Router { 69 | return MustNewRouter(cfg.Router.RedisURL, f.nodeRpcUrl, f.groupConf) 70 | } 71 | -------------------------------------------------------------------------------- /node/manager_monitor.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "sort" 5 | "sync/atomic" 6 | 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | // Implementations for HealthMonitor interface. 11 | 12 | // HealthyEpoch returns the middle epoch height collected from managed cluster nodes, 13 | // which is also regarded as the overall health epoch height. 14 | func (m *Manager) HealthyEpoch() uint64 { 15 | return atomic.LoadUint64(&m.midEpoch) 16 | } 17 | 18 | // ReportEpoch reports latest epoch height of managed node to manager. 19 | func (m *Manager) ReportEpoch(nodeName string, epoch uint64) { 20 | m.mu.Lock() 21 | defer m.mu.Unlock() 22 | 23 | m.nodeName2Epochs[nodeName] = epoch 24 | if len(m.nodeName2Epochs) == 1 { 25 | atomic.StoreUint64(&m.midEpoch, epoch) 26 | return 27 | } 28 | 29 | var epochs []int 30 | for _, epoch := range m.nodeName2Epochs { 31 | epochs = append(epochs, int(epoch)) 32 | } 33 | 34 | sort.Ints(epochs) 35 | 36 | atomic.StoreUint64(&m.midEpoch, uint64(epochs[len(epochs)/2])) 37 | } 38 | 39 | // ReportUnhealthy reports unhealthy status of managed node to manager. 40 | func (m *Manager) ReportUnhealthy(nodeName string, remind bool, reason error) { 41 | logger := logrus.WithError(reason).WithField("node", nodeName) 42 | 43 | // alert 44 | if remind { 45 | logger.Error("Node not recovered") 46 | } else { 47 | logger.Error("Node became unhealthy") 48 | } 49 | 50 | // remove unhealthy node from hash ring 51 | m.hashRing.Remove(nodeName) 52 | 53 | // FIXME update repartition cache if configured 54 | } 55 | 56 | // ReportHealthy reports healthy status of managed node to manager. 57 | func (m *Manager) ReportHealthy(nodeName string) { 58 | // alert 59 | logrus.WithField("node", nodeName).Warn("Node became healthy now") 60 | 61 | // add recovered node into hash ring again 62 | m.hashRing.Add(m.nodes[nodeName]) 63 | } 64 | -------------------------------------------------------------------------------- /node/repartition.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "container/list" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | // RepartitionResolver is implemented to support repartition when item added or removed 10 | // in the consistent hash ring. 11 | type RepartitionResolver interface { 12 | Get(key uint64) (string, bool) 13 | Put(key uint64, value string) 14 | } 15 | 16 | type noopRepartitionResolver struct{} 17 | 18 | func (r *noopRepartitionResolver) Get(key uint64) (string, bool) { return "", false } 19 | func (r *noopRepartitionResolver) Put(key uint64, value string) {} 20 | 21 | type partitionInfo struct { 22 | key uint64 23 | node string 24 | deadline time.Time 25 | } 26 | 27 | type SimpleRepartitionResolver struct { 28 | key2Items sync.Map 29 | items *list.List 30 | ttl time.Duration 31 | mu sync.Mutex 32 | } 33 | 34 | func NewSimpleRepartitionResolver(ttl time.Duration) *SimpleRepartitionResolver { 35 | return &SimpleRepartitionResolver{ 36 | items: list.New(), 37 | ttl: ttl, 38 | } 39 | } 40 | 41 | func (r *SimpleRepartitionResolver) Get(key uint64) (string, bool) { 42 | value, ok := r.key2Items.Load(key) 43 | if !ok { 44 | return "", false 45 | } 46 | 47 | r.mu.Lock() 48 | defer r.mu.Unlock() 49 | 50 | item := value.(*list.Element) 51 | info := item.Value.(partitionInfo) 52 | now := time.Now() 53 | 54 | // passively check expiration 55 | if info.deadline.Before(now) { 56 | r.items.Remove(item) 57 | r.key2Items.Delete(key) 58 | return "", false 59 | } 60 | 61 | // update expiration 62 | info.deadline = now.Add(r.ttl) 63 | item.Value = info 64 | r.items.MoveToBack(item) 65 | 66 | return info.node, true 67 | } 68 | 69 | func (r *SimpleRepartitionResolver) Put(key uint64, value string) { 70 | r.mu.Lock() 71 | defer r.mu.Unlock() 72 | 73 | r.gc() 74 | 75 | info := partitionInfo{ 76 | key: key, 77 | node: value, 78 | deadline: time.Now().Add(r.ttl), 79 | } 80 | 81 | if value, ok := r.key2Items.Load(key); ok { 82 | // update item value 83 | item := value.(*list.Element) 84 | item.Value = info 85 | r.items.MoveToBack(item) 86 | } else { 87 | // add new item 88 | item := r.items.PushBack(info) 89 | r.key2Items.Store(key, item) 90 | } 91 | } 92 | 93 | // gc removes the expired items. 94 | func (r *SimpleRepartitionResolver) gc() { 95 | now := time.Now() 96 | 97 | for { 98 | front := r.items.Front() 99 | if front == nil { 100 | break 101 | } 102 | 103 | info := front.Value.(partitionInfo) 104 | if info.deadline.After(now) { 105 | break 106 | } 107 | 108 | r.items.Remove(front) 109 | r.key2Items.Delete(info.key) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /node/repartition_redis.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "time" 8 | 9 | "github.com/go-redis/redis/v8" 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | // RedisRepartitionResolver implements RepartitionResolver 14 | type RedisRepartitionResolver struct { 15 | client *redis.Client 16 | ttl time.Duration 17 | ctx context.Context 18 | keyPrefix string 19 | logger *logrus.Entry 20 | } 21 | 22 | func NewRedisRepartitionResolver(client *redis.Client, ttl time.Duration, keyPrefix string) *RedisRepartitionResolver { 23 | return &RedisRepartitionResolver{ 24 | client: client, 25 | ttl: ttl, 26 | ctx: context.Background(), 27 | keyPrefix: keyPrefix, 28 | logger: logrus.WithField("module", "RedisRepartitionResolver"), 29 | } 30 | } 31 | 32 | func (r *RedisRepartitionResolver) Get(key uint64) (string, bool) { 33 | redisKey := redisRepartitionKey(key, r.keyPrefix) 34 | node, err := r.client.GetEx(r.ctx, redisKey, r.ttl).Result() 35 | if err == redis.Nil { 36 | return "", false 37 | } 38 | 39 | if err != nil { 40 | r.logger.WithError(err).WithField("key", redisKey).Error("Failed to read key from redis") 41 | return "", false 42 | } 43 | 44 | return node, true 45 | } 46 | 47 | func (r *RedisRepartitionResolver) Put(key uint64, value string) { 48 | redisKey := redisRepartitionKey(key, r.keyPrefix) 49 | if err := r.client.Set(r.ctx, redisKey, value, r.ttl).Err(); err != nil { 50 | r.logger.WithError(err).WithField("key", redisKey).Error("Failed to set key-value to redis") 51 | } 52 | } 53 | 54 | func redisRepartitionKey(key uint64, prefixs ...string) string { 55 | prefixs = append(prefixs, "key") 56 | prefixStr := strings.Join(prefixs, ":") 57 | 58 | return fmt.Sprintf("node:repartition:%v:%v", prefixStr, key) 59 | } 60 | -------------------------------------------------------------------------------- /node/server.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "github.com/ethereum/go-ethereum/common/hexutil" 5 | "github.com/scroll-tech/rpc-gateway/util" 6 | "github.com/scroll-tech/rpc-gateway/util/rpc" 7 | ) 8 | 9 | // NewServer creates node management RPC server 10 | func NewServer(nf nodeFactory, groupConf map[Group]UrlConfig) *rpc.Server { 11 | managers := make(map[Group]*Manager) 12 | for k, v := range groupConf { 13 | managers[k] = NewManager(k, nf, v.Nodes) 14 | } 15 | 16 | return rpc.MustNewServer("node", map[string]interface{}{ 17 | "node": &api{managers}, 18 | }) 19 | } 20 | 21 | // api node management RPC APIs. 22 | type api struct { 23 | managers map[Group]*Manager 24 | } 25 | 26 | func (api *api) Add(group Group, url string) { 27 | if m, ok := api.managers[group]; ok { 28 | m.Add(url) 29 | } 30 | } 31 | 32 | func (api *api) Remove(group Group, url string) { 33 | if m, ok := api.managers[group]; ok { 34 | m.Remove(url) 35 | } 36 | } 37 | 38 | // List returns the URL list of all nodes. 39 | func (api *api) List(group Group) []string { 40 | m, ok := api.managers[group] 41 | if !ok { 42 | return nil 43 | } 44 | 45 | var nodes []string 46 | 47 | for _, n := range m.List() { 48 | nodes = append(nodes, n.Url()) 49 | } 50 | 51 | return nodes 52 | } 53 | 54 | func (api *api) Status(group Group, url *string) (res []Status) { 55 | mgr := api.managers[group] 56 | if mgr == nil { // no group found 57 | return 58 | } 59 | 60 | if url != nil { // get specific node status 61 | if n := mgr.Get(*url); !util.IsInterfaceValNil(n) { 62 | res = append(res, n.Status()) 63 | } 64 | 65 | return 66 | } 67 | 68 | // get all group node status 69 | for _, n := range mgr.List() { 70 | res = append(res, n.Status()) 71 | } 72 | 73 | return 74 | } 75 | 76 | // List returns the URL list of all nodes. 77 | func (api *api) ListAll() map[Group][]string { 78 | result := make(map[Group][]string) 79 | 80 | for group := range api.managers { 81 | result[group] = api.List(group) 82 | } 83 | 84 | return result 85 | } 86 | 87 | // Route implements the Router interface. It routes the specified key to any node 88 | // and return the node URL. 89 | func (api *api) Route(group Group, key hexutil.Bytes) string { 90 | if m, ok := api.managers[group]; ok { 91 | return m.Route(key) 92 | } 93 | 94 | return "" 95 | } 96 | -------------------------------------------------------------------------------- /rpc/cache/cache_cfx.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "time" 5 | 6 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 7 | "github.com/ethereum/go-ethereum/common/hexutil" 8 | ) 9 | 10 | var CfxDefault = NewCfx() 11 | 12 | // CfxCache memory cache for some core space RPC methods 13 | type CfxCache struct { 14 | *StatusCache 15 | 16 | priceCache *expiryCache 17 | versionCache *expiryCache 18 | } 19 | 20 | func NewCfx() *CfxCache { 21 | return &CfxCache{ 22 | StatusCache: NewStatusCache(), 23 | 24 | priceCache: newExpiryCache(3 * time.Second), 25 | versionCache: newExpiryCache(time.Minute), 26 | } 27 | } 28 | 29 | func (cache *CfxCache) GetGasPrice(cfx sdk.ClientOperator) (*hexutil.Big, error) { 30 | val, err := cache.priceCache.getOrUpdate(func() (interface{}, error) { 31 | return cfx.GetGasPrice() 32 | }) 33 | 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | return val.(*hexutil.Big), nil 39 | } 40 | 41 | func (cache *CfxCache) GetClientVersion(cfx sdk.ClientOperator) (string, error) { 42 | val, err := cache.versionCache.getOrUpdate(func() (interface{}, error) { 43 | return cfx.GetClientVersion() 44 | }) 45 | 46 | if err != nil { 47 | return "", err 48 | } 49 | 50 | return val.(string), nil 51 | } 52 | -------------------------------------------------------------------------------- /rpc/cache/cache_eth.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "math/big" 5 | "time" 6 | 7 | "github.com/ethereum/go-ethereum/common/hexutil" 8 | "github.com/openweb3/web3go" 9 | "github.com/scroll-tech/rpc-gateway/node" 10 | "github.com/scroll-tech/rpc-gateway/util/rpc" 11 | ) 12 | 13 | var EthDefault = NewEth() 14 | 15 | // EthCache memory cache for some evm space RPC methods 16 | type EthCache struct { 17 | netVersionCache *expiryCache 18 | clientVersionCache *expiryCache 19 | chainIdCache *expiryCache 20 | priceCache *expiryCache 21 | blockNumberCache *nodeExpiryCaches 22 | } 23 | 24 | func NewEth() *EthCache { 25 | return &EthCache{ 26 | netVersionCache: newExpiryCache(time.Minute), 27 | clientVersionCache: newExpiryCache(time.Minute), 28 | chainIdCache: newExpiryCache(time.Hour * 24 * 365 * 100), 29 | priceCache: newExpiryCache(3 * time.Second), 30 | blockNumberCache: newNodeExpiryCaches(time.Second), 31 | } 32 | } 33 | 34 | func (cache *EthCache) GetNetVersion(client *web3go.Client) (string, error) { 35 | val, err := cache.netVersionCache.getOrUpdate(func() (interface{}, error) { 36 | return client.Eth.NetVersion() 37 | }) 38 | 39 | if err != nil { 40 | return "", err 41 | } 42 | 43 | return val.(string), nil 44 | } 45 | 46 | func (cache *EthCache) GetClientVersion(client *web3go.Client) (string, error) { 47 | val, err := cache.clientVersionCache.getOrUpdate(func() (interface{}, error) { 48 | return client.Eth.ClientVersion() 49 | }) 50 | 51 | if err != nil { 52 | return "", err 53 | } 54 | 55 | return val.(string), nil 56 | } 57 | 58 | func (cache *EthCache) GetChainId(client *web3go.Client) (*hexutil.Uint64, error) { 59 | val, err := cache.chainIdCache.getOrUpdate(func() (interface{}, error) { 60 | return client.Eth.ChainId() 61 | }) 62 | 63 | if err != nil { 64 | return nil, err 65 | } 66 | 67 | return (*hexutil.Uint64)(val.(*uint64)), nil 68 | } 69 | 70 | func (cache *EthCache) GetGasPrice(client *web3go.Client) (*hexutil.Big, error) { 71 | val, err := cache.priceCache.getOrUpdate(func() (interface{}, error) { 72 | return client.Eth.GasPrice() 73 | }) 74 | 75 | if err != nil { 76 | return nil, err 77 | } 78 | 79 | return (*hexutil.Big)(val.(*big.Int)), nil 80 | } 81 | 82 | func (cache *EthCache) GetBlockNumber(client *node.Web3goClient) (*hexutil.Big, error) { 83 | nodeName := rpc.Url2NodeName(client.URL) 84 | 85 | val, err := cache.blockNumberCache.getOrUpdate(nodeName, func() (interface{}, error) { 86 | return client.Eth.BlockNumber() 87 | }) 88 | 89 | if err != nil { 90 | return nil, err 91 | } 92 | 93 | return (*hexutil.Big)(val.(*big.Int)), nil 94 | } 95 | -------------------------------------------------------------------------------- /rpc/cache/expiry_cache.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | "time" 7 | 8 | "github.com/scroll-tech/rpc-gateway/util" 9 | ) 10 | 11 | // for atomic load/store in cache. 12 | type cacheValue struct { 13 | value interface{} 14 | expireAt time.Time 15 | } 16 | 17 | // expiryCache is used to cache value with specified expiration time. 18 | type expiryCache struct { 19 | value atomic.Value 20 | timeout time.Duration 21 | mu sync.Mutex 22 | } 23 | 24 | func newExpiryCache(timeout time.Duration) *expiryCache { 25 | return &expiryCache{ 26 | timeout: timeout, 27 | } 28 | } 29 | 30 | func (cache *expiryCache) get() (interface{}, bool) { 31 | return cache.getAt(time.Now()) 32 | } 33 | 34 | func (cache *expiryCache) getAt(time time.Time) (interface{}, bool) { 35 | value := cache.value.Load() 36 | if value == nil { 37 | return nil, false 38 | } 39 | 40 | val := value.(cacheValue) 41 | if val.expireAt.Before(time) { 42 | return nil, false 43 | } 44 | 45 | return val.value, true 46 | } 47 | 48 | func (cache *expiryCache) getOrUpdate(updateFunc func() (interface{}, error)) (interface{}, error) { 49 | return cache.getOrUpdateAt(time.Now(), updateFunc) 50 | } 51 | 52 | func (cache *expiryCache) getOrUpdateAt(time time.Time, updateFunc func() (interface{}, error)) (interface{}, error) { 53 | // cache value not expired 54 | if val, ok := cache.getAt(time); ok { 55 | return val, nil 56 | } 57 | 58 | // otherwise, query from fullnode and cache 59 | cache.mu.Lock() 60 | defer cache.mu.Unlock() 61 | 62 | // double check for concurrency 63 | if val, ok := cache.getAt(time); ok { 64 | return val, nil 65 | } 66 | 67 | val, err := updateFunc() 68 | if err != nil { 69 | return nil, err 70 | } 71 | 72 | // update cache 73 | cache.value.Store(cacheValue{ 74 | value: val, 75 | expireAt: time.Add(cache.timeout), 76 | }) 77 | 78 | return val, nil 79 | } 80 | 81 | // nodeExpiryCaches is used for multiple nodes to cache data respectively. 82 | type nodeExpiryCaches struct { 83 | node2Caches util.ConcurrentMap // node name => expiryCache 84 | timeout time.Duration 85 | } 86 | 87 | func newNodeExpiryCaches(timeout time.Duration) *nodeExpiryCaches { 88 | return &nodeExpiryCaches{ 89 | timeout: timeout, 90 | } 91 | } 92 | 93 | func (caches *nodeExpiryCaches) getOrUpdate(node string, updateFunc func() (interface{}, error)) (interface{}, error) { 94 | val, _ := caches.node2Caches.LoadOrStoreFn(node, func(interface{}) interface{} { 95 | return newExpiryCache(caches.timeout) 96 | }) 97 | 98 | return val.(*expiryCache).getOrUpdate(updateFunc) 99 | } 100 | -------------------------------------------------------------------------------- /rpc/cache/expiry_cache_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestExpiryCacheGet(t *testing.T) { 12 | cache := newExpiryCache(time.Minute) 13 | 14 | // no data by default 15 | val, ok := cache.get() 16 | assert.Nil(t, val) 17 | assert.False(t, ok) 18 | 19 | // add data into cache 20 | cache.getOrUpdate(func() (interface{}, error) { 21 | return "data", nil 22 | }) 23 | 24 | // could get data 25 | val, ok = cache.get() 26 | assert.Equal(t, "data", val.(string)) 27 | assert.True(t, ok) 28 | 29 | // timeout 30 | val, ok = cache.getAt(time.Now().Add(time.Minute + time.Nanosecond)) 31 | assert.Nil(t, val) 32 | assert.False(t, ok) 33 | } 34 | 35 | func TestExpiryCacheGetOrUpdateWithError(t *testing.T) { 36 | cache := newExpiryCache(time.Minute) 37 | 38 | fooErr := errors.New("foo error") 39 | 40 | val, err := cache.getOrUpdate(func() (interface{}, error) { 41 | return "data", fooErr 42 | }) 43 | 44 | assert.Nil(t, val) 45 | assert.Equal(t, fooErr, err) 46 | } 47 | 48 | func TestExpiryCacheGetOrUpdate(t *testing.T) { 49 | cache := newExpiryCache(time.Minute) 50 | 51 | // cache data 52 | val, err := cache.getOrUpdate(func() (interface{}, error) { 53 | return "data", nil 54 | }) 55 | assert.Equal(t, "data", val.(string)) 56 | assert.Nil(t, err) 57 | 58 | // get cached data 59 | val, err = cache.getOrUpdate(func() (interface{}, error) { 60 | return "data - 2", nil 61 | }) 62 | assert.Equal(t, "data", val.(string)) 63 | assert.Nil(t, err) 64 | 65 | // timeout and get new cached data 66 | val, err = cache.getOrUpdateAt(time.Now().Add(time.Minute+time.Nanosecond), func() (interface{}, error) { 67 | return "data - 2", nil 68 | }) 69 | assert.Equal(t, "data - 2", val.(string)) 70 | assert.Nil(t, err) 71 | } 72 | -------------------------------------------------------------------------------- /rpc/cache/status.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "time" 5 | 6 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 7 | "github.com/Conflux-Chain/go-conflux-sdk/types" 8 | "github.com/ethereum/go-ethereum/common/hexutil" 9 | "github.com/scroll-tech/rpc-gateway/util/rpc" 10 | ) 11 | 12 | // StatusCache memory cache for core space status related RPC method suites. 13 | type StatusCache struct { 14 | inner *nodeExpiryCaches 15 | } 16 | 17 | func NewStatusCache() *StatusCache { 18 | return &StatusCache{ 19 | // epoch increase every 1 second and different nodes have different epoch number 20 | inner: newNodeExpiryCaches(time.Second), 21 | } 22 | } 23 | 24 | func (c *StatusCache) GetStatus(cfx sdk.ClientOperator) (types.Status, error) { 25 | nodeName := rpc.Url2NodeName(cfx.GetNodeURL()) 26 | 27 | val, err := c.inner.getOrUpdate(nodeName, func() (interface{}, error) { 28 | return cfx.GetStatus() 29 | }) 30 | 31 | if err != nil { 32 | return types.Status{}, err 33 | } 34 | 35 | return val.(types.Status), nil 36 | } 37 | 38 | func (c *StatusCache) GetEpochNumber(cfx sdk.ClientOperator, epoch *types.Epoch) (*hexutil.Big, error) { 39 | if types.EpochEarliest.Equals(epoch) { 40 | return types.NewBigInt(0), nil 41 | } 42 | 43 | status, err := c.GetStatus(cfx) 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | // latest mined by default 49 | if epoch == nil { 50 | return types.NewBigInt(uint64(status.EpochNumber)), nil 51 | } 52 | 53 | // epoch number 54 | if num, ok := epoch.ToInt(); ok { 55 | if num.Uint64() <= uint64(status.EpochNumber) { 56 | return types.NewBigIntByRaw(num), nil 57 | } 58 | 59 | return cfx.GetEpochNumber(epoch) 60 | } 61 | 62 | // default epoch tags 63 | switch { 64 | case types.EpochLatestCheckpoint.Equals(epoch): 65 | return types.NewBigInt(uint64(status.LatestCheckpoint)), nil 66 | case types.EpochLatestFinalized.Equals(epoch): 67 | return types.NewBigInt(uint64(status.LatestFinalized)), nil 68 | case types.EpochLatestConfirmed.Equals(epoch): 69 | return types.NewBigInt(uint64(status.LatestConfirmed)), nil 70 | case types.EpochLatestState.Equals(epoch): 71 | return types.NewBigInt(uint64(status.LatestState)), nil 72 | } 73 | 74 | return cfx.GetEpochNumber(epoch) 75 | } 76 | 77 | func (c *StatusCache) GetBestBlockHash(cfx sdk.ClientOperator) (types.Hash, error) { 78 | status, err := c.GetStatus(cfx) 79 | if err != nil { 80 | return "", err 81 | } 82 | 83 | return status.BestHash, nil 84 | } 85 | -------------------------------------------------------------------------------- /rpc/cfxbridge/constants.go: -------------------------------------------------------------------------------- 1 | package cfxbridge 2 | 3 | import ( 4 | "github.com/Conflux-Chain/go-conflux-sdk/types" 5 | "github.com/pkg/errors" 6 | ) 7 | 8 | var ( 9 | // ETH space only accept latest_state as block number. Other epoch tags, e.g. latest_checkpoint, 10 | // latest_confirmed and latest_mined are not supported. 11 | ErrEpochUnsupported = errors.New("epoch not supported") 12 | 13 | // ETH space only have pivot blocks as a list, and do not require to query non-pivot blocks. 14 | // To compatible with CFX space RPC, bridge service will check the assumption against pivot block. 15 | ErrInvalidBlockAssumption = errors.New("invalid block assumption") 16 | ) 17 | 18 | var ( 19 | HexBig0 = types.NewBigInt(0) 20 | ) 21 | 22 | var ( 23 | emptyDepositList = []types.DepositInfo{} 24 | emptyVoteList = []types.VoteStakeInfo{} 25 | emptyStorageChangeList = []types.StorageChange{} 26 | emptyTraces = []types.LocalizedTrace{} 27 | emptyTxTraces = []types.LocalizedTransactionTrace{} 28 | ) 29 | -------------------------------------------------------------------------------- /rpc/cfxbridge/trace_api.go: -------------------------------------------------------------------------------- 1 | package cfxbridge 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/Conflux-Chain/go-conflux-sdk/types" 7 | "github.com/openweb3/web3go" 8 | ethTypes "github.com/openweb3/web3go/types" 9 | ) 10 | 11 | type TraceAPI struct { 12 | ethClient *web3go.Client 13 | ethNetworkId uint32 14 | } 15 | 16 | func NewTraceAPI(ethClient *web3go.Client, ethNetworkId uint32) *TraceAPI { 17 | return &TraceAPI{ 18 | ethClient: ethClient, 19 | ethNetworkId: ethNetworkId, 20 | } 21 | } 22 | 23 | func (api *TraceAPI) Block(ctx context.Context, blockHash types.Hash) (*types.LocalizedBlockTrace, error) { 24 | ethBlockHash := *blockHash.ToCommonHash() 25 | ethBlock, err := api.ethClient.Eth.BlockByHash(ethBlockHash, false) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | if ethBlock == nil { 31 | return nil, nil 32 | } 33 | 34 | bnh := ethTypes.BlockNumberOrHashWithHash(ethBlockHash, true) 35 | traces, err := api.ethClient.Trace.Blocks(bnh) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | if traces == nil { 41 | return nil, nil 42 | } 43 | 44 | var builder BlockTraceBuilder 45 | for i := range traces { 46 | cfxTrace, cfxTraceResult := ConvertTrace(&traces[i], api.ethNetworkId) 47 | builder.Append(cfxTrace, cfxTraceResult, traces[i].Subtraces) 48 | } 49 | 50 | return &types.LocalizedBlockTrace{ 51 | TransactionTraces: builder.Build(), 52 | EpochHash: blockHash, 53 | EpochNumber: *types.NewBigIntByRaw(ethBlock.Number), 54 | BlockHash: blockHash, 55 | }, nil 56 | } 57 | 58 | func (api *TraceAPI) Filter(ctx context.Context, filter types.TraceFilter) ([]types.LocalizedTrace, error) { 59 | // not supported yet 60 | return emptyTraces, nil 61 | } 62 | 63 | func (api *TraceAPI) Transaction(ctx context.Context, txHash types.Hash) ([]types.LocalizedTrace, error) { 64 | traces, err := api.ethClient.Trace.Transactions(*txHash.ToCommonHash()) 65 | if err != nil { 66 | return nil, err 67 | } 68 | 69 | if traces == nil { 70 | return nil, nil 71 | } 72 | 73 | var builder TraceBuilder 74 | for i := range traces { 75 | cfxTrace, cfxTraceResult := ConvertTrace(&traces[i], api.ethNetworkId) 76 | builder.Append(cfxTrace, cfxTraceResult, traces[i].Subtraces) 77 | } 78 | 79 | return builder.Build(), nil 80 | } 81 | -------------------------------------------------------------------------------- /rpc/cfxbridge/trace_builder.go: -------------------------------------------------------------------------------- 1 | package cfxbridge 2 | 3 | import ( 4 | "container/list" 5 | 6 | "github.com/Conflux-Chain/go-conflux-sdk/types" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | type stackedTraceResult struct { 11 | traceResult *types.LocalizedTrace 12 | subTraces uint 13 | } 14 | 15 | // TraceBuilder builds traces in stack way and thread unsafe. 16 | type TraceBuilder struct { 17 | traces []types.LocalizedTrace 18 | stackedResults *list.List 19 | } 20 | 21 | func (tb *TraceBuilder) Build() []types.LocalizedTrace { 22 | if tb.traces == nil { 23 | return emptyTraces 24 | } 25 | 26 | return tb.traces 27 | } 28 | 29 | func (tb *TraceBuilder) Append(trace, traceResult *types.LocalizedTrace, subTraces uint) { 30 | // E.g. reward & suicide trace not supported in Conflux. 31 | if trace == nil { 32 | return 33 | } 34 | 35 | tb.traces = append(tb.traces, *trace) 36 | 37 | // E.g. internal_transfer_action trace has no result trace. 38 | if traceResult == nil { 39 | return 40 | } 41 | 42 | if subTraces == 0 { 43 | tb.traces = append(tb.traces, *traceResult) 44 | tb.pop() 45 | } else { 46 | tb.push(traceResult, subTraces) 47 | } 48 | } 49 | 50 | func (tb *TraceBuilder) push(traceResult *types.LocalizedTrace, subTraces uint) { 51 | // Lazy initialize the stack, but thread unsafe. 52 | if tb.stackedResults == nil { 53 | tb.stackedResults = list.New() 54 | } 55 | 56 | tb.stackedResults.PushBack(&stackedTraceResult{ 57 | traceResult: traceResult, 58 | subTraces: subTraces, 59 | }) 60 | } 61 | 62 | func (tb *TraceBuilder) pop() { 63 | // No item pushed into stack before 64 | if tb.stackedResults == nil { 65 | return 66 | } 67 | 68 | // No pending trace result to handle 69 | topEle := tb.stackedResults.Back() 70 | if topEle == nil { 71 | return 72 | } 73 | 74 | item := topEle.Value.(*stackedTraceResult) 75 | 76 | // Should never happen, but make code robust 77 | if item.subTraces == 0 { 78 | logrus.WithField("tx", item.traceResult.TransactionHash.String()).Error("Failed to pop due to invalid subtraces") 79 | return 80 | } 81 | 82 | item.subTraces-- 83 | 84 | // There are remaining sub traces that unhandled 85 | if item.subTraces > 0 { 86 | return 87 | } 88 | 89 | // All sub traces handled and pop the trace result 90 | tb.traces = append(tb.traces, *item.traceResult) 91 | tb.stackedResults.Remove(topEle) 92 | } 93 | 94 | type TransactionTraceBuilder struct { 95 | txTrace types.LocalizedTransactionTrace 96 | builder TraceBuilder 97 | } 98 | 99 | func (ttb *TransactionTraceBuilder) Build() (*types.LocalizedTransactionTrace, bool) { 100 | if len(ttb.txTrace.TransactionHash) == 0 { 101 | return nil, false 102 | } 103 | 104 | ttb.txTrace.Traces = ttb.builder.Build() 105 | return &ttb.txTrace, true 106 | } 107 | 108 | func (ttb *TransactionTraceBuilder) Append(trace, traceResult *types.LocalizedTrace, subTraces uint) bool { 109 | if trace == nil { 110 | // ignore nil trace and continue to append other traces 111 | return true 112 | } 113 | 114 | if len(ttb.txTrace.TransactionHash) == 0 { 115 | // initialize transaction hash and position with the first trace. 116 | ttb.txTrace.TransactionHash = *trace.TransactionHash 117 | if trace.TransactionPosition != nil { 118 | ttb.txTrace.TransactionPosition = *trace.TransactionPosition 119 | } 120 | } else if ttb.txTrace.TransactionHash != *trace.TransactionHash { 121 | return false 122 | } 123 | 124 | ttb.builder.Append(trace, traceResult, subTraces) 125 | 126 | return true 127 | } 128 | 129 | type BlockTraceBuilder struct { 130 | txTraces []types.LocalizedTransactionTrace 131 | builer TransactionTraceBuilder 132 | } 133 | 134 | func (btb *BlockTraceBuilder) Build() []types.LocalizedTransactionTrace { 135 | btb.seal() 136 | 137 | if btb.txTraces == nil { 138 | return emptyTxTraces 139 | } 140 | 141 | return btb.txTraces 142 | } 143 | 144 | func (btb *BlockTraceBuilder) Append(trace, traceResult *types.LocalizedTrace, subTraces uint) { 145 | if trace == nil { 146 | return 147 | } 148 | 149 | if btb.builer.Append(trace, traceResult, subTraces) { 150 | return 151 | } 152 | 153 | btb.seal() 154 | btb.builer.Append(trace, traceResult, subTraces) 155 | } 156 | 157 | func (btb *BlockTraceBuilder) seal() { 158 | if txTrace, ok := btb.builer.Build(); ok { 159 | btb.txTraces = append(btb.txTraces, *txTrace) 160 | btb.builer = TransactionTraceBuilder{} 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /rpc/cfxbridge/txpool_api.go: -------------------------------------------------------------------------------- 1 | package cfxbridge 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ethereum/go-ethereum/common/hexutil" 7 | "github.com/ethereum/go-ethereum/rpc" 8 | "github.com/openweb3/web3go" 9 | "github.com/openweb3/web3go/types" 10 | ) 11 | 12 | type TxpoolAPI struct { 13 | ethClient *web3go.Client 14 | } 15 | 16 | func NewTxpoolAPI(ethClient *web3go.Client) *TxpoolAPI { 17 | return &TxpoolAPI{ethClient} 18 | } 19 | 20 | func (api *TxpoolAPI) NextNonce(ctx context.Context, address EthAddress) (val *hexutil.Big, err error) { 21 | pendingBlock := types.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) 22 | 23 | nonce, err := api.ethClient.Eth.TransactionCount(address.value, &pendingBlock) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | return (*hexutil.Big)(nonce), nil 29 | } 30 | -------------------------------------------------------------------------------- /rpc/error.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "github.com/pkg/errors" 5 | "github.com/scroll-tech/rpc-gateway/store" 6 | ) 7 | 8 | // rpc errors conform to fullnode 9 | 10 | var ( 11 | errInvalidLogFilter = errors.Errorf( 12 | "Filter must provide one of the following: %v, %v, %v", 13 | "(1) an epoch range through `fromEpoch` and `toEpoch`", 14 | "(2) a block number range through `fromBlock` and `toBlock`", 15 | "(3) a set of block hashes through `blockHashes`", 16 | ) 17 | 18 | errInvalidLogFilterBlockRange = errors.New( 19 | "invalid block range (from block larger than to block)", 20 | ) 21 | 22 | errInvalidLogFilterEpochRange = errors.New( 23 | "invalid epoch range (from epoch larger than to epoch)", 24 | ) 25 | 26 | errInvalidEthLogFilter = errors.Errorf( 27 | "Filter must provide one of the following: %v, %v", 28 | "(1) a block number range through `fromBlock` and `toBlock`", 29 | "(2) a set of block hashes through `blockHash`", 30 | ) 31 | ) 32 | 33 | func errExceedLogFilterBlockHashLimit(size int) error { 34 | return errors.Errorf( 35 | "filter.block_hashes can contain up to %v hashes; %v were provided.", 36 | store.MaxLogBlockHashesSize, size, 37 | ) 38 | } 39 | -------------------------------------------------------------------------------- /rpc/eth_trace_api.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ethereum/go-ethereum/common" 7 | "github.com/openweb3/web3go/types" 8 | ) 9 | 10 | // ethTraceAPI provides evm space trace RPC proxy API. 11 | type ethTraceAPI struct{} 12 | 13 | func (api *ethTraceAPI) Block(ctx context.Context, blockNumOrHash types.BlockNumberOrHash) ([]types.LocalizedTrace, error) { 14 | return GetEthClientFromContext(ctx).Trace.Blocks(blockNumOrHash) 15 | } 16 | 17 | func (api *ethTraceAPI) Filter(ctx context.Context, filter types.TraceFilter) ([]types.LocalizedTrace, error) { 18 | return GetEthClientFromContext(ctx).Trace.Filter(filter) 19 | } 20 | 21 | func (api *ethTraceAPI) Transaction(ctx context.Context, txHash common.Hash) ([]types.LocalizedTrace, error) { 22 | return GetEthClientFromContext(ctx).Trace.Transactions(txHash) 23 | } 24 | -------------------------------------------------------------------------------- /rpc/ethbridge/convert_test.go: -------------------------------------------------------------------------------- 1 | package ethbridge 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 8 | "github.com/ethereum/go-ethereum/common" 9 | "github.com/ethereum/go-ethereum/rpc" 10 | "github.com/openweb3/web3go" 11 | "github.com/pkg/errors" 12 | "github.com/scroll-tech/rpc-gateway/rpc/cfxbridge" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | var ( 17 | ethHttpNode = "http://evmtestnet.confluxrpc.com" 18 | cfxHttpNode = "http://test.confluxrpc.com" 19 | 20 | ethClient *web3go.Client 21 | cfxClient sdk.ClientOperator 22 | 23 | ethNetworkId *uint64 24 | ) 25 | 26 | func setup() error { 27 | var err error 28 | 29 | if ethClient, err = web3go.NewClient(ethHttpNode); err != nil { 30 | return errors.WithMessage(err, "failed to new web3go client") 31 | } 32 | 33 | if ethNetworkId, err = ethClient.Eth.ChainId(); err != nil { 34 | return errors.WithMessage(err, "failed to get eth chainid") 35 | } 36 | 37 | if cfxClient, err = sdk.NewClient(cfxHttpNode); err != nil { 38 | return errors.WithMessage(err, "failed to new cfx client") 39 | } 40 | 41 | return nil 42 | } 43 | 44 | func teardown() (err error) { 45 | if ethClient != nil { 46 | ethClient.Provider().Close() 47 | } 48 | 49 | if cfxClient != nil { 50 | cfxClient.Close() 51 | } 52 | 53 | return nil 54 | } 55 | 56 | func TestMain(m *testing.M) { 57 | if err := setup(); err != nil { 58 | panic(errors.WithMessage(err, "failed to setup")) 59 | } 60 | 61 | code := m.Run() 62 | 63 | if err := teardown(); err != nil { 64 | panic(errors.WithMessage(err, "failed to tear down")) 65 | } 66 | 67 | os.Exit(code) 68 | } 69 | 70 | func TestConvertBlockHeader(t *testing.T) { 71 | blockNum := rpc.BlockNumber(64630500) 72 | 73 | ethBlock, err := ethClient.Eth.BlockByNumber(blockNum, false) 74 | assert.NoError(t, err) 75 | 76 | t.Log("logsBloom for original eth block: ", ethBlock.LogsBloom) 77 | 78 | convertedCfxBlock := cfxbridge.ConvertBlockSummary(ethBlock, uint32(*ethNetworkId)) 79 | t.Log("logsBloom for converted cfx block: ", convertedCfxBlock.DeferredLogsBloomHash) 80 | 81 | convertedEthBlock := ConvertBlockHeader(&convertedCfxBlock.BlockHeader, nil) 82 | t.Log("logsBloom for converted eth block: ", convertedEthBlock.LogsBloom) 83 | 84 | assert.Equal(t, ethBlock.LogsBloom, convertedEthBlock.LogsBloom) 85 | } 86 | 87 | func TestConvertReceipt(t *testing.T) { 88 | txHash := "0xff2438365f72360a0eb60faf217b4d2ea2cc3599d59f5141113b68a58802452c" 89 | ethTxHash := common.HexToHash(txHash) 90 | 91 | ethReceipt, err := ethClient.Eth.TransactionReceipt(ethTxHash) 92 | assert.NoError(t, err) 93 | 94 | convertedCfxReceipt := cfxbridge.ConvertReceipt(ethReceipt, uint32(*ethNetworkId)) 95 | t.Log("logsBloom for converted cfx block: ", convertedCfxReceipt.LogsBloom) 96 | 97 | convertedEthReceipt := ConvertReceipt(convertedCfxReceipt, nil) 98 | t.Log("logsBloom for converted eth block: ", convertedEthReceipt.LogsBloom) 99 | 100 | assert.Equal(t, ethReceipt.LogsBloom, convertedEthReceipt.LogsBloom) 101 | } 102 | -------------------------------------------------------------------------------- /rpc/gastation_api.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/scroll-tech/rpc-gateway/rpc/handler" 7 | "github.com/scroll-tech/rpc-gateway/types" 8 | ) 9 | 10 | // gasStationAPI provides core space gasstation API. 11 | type gasStationAPI struct { 12 | handler *handler.GasStationHandler 13 | } 14 | 15 | func newGasStationAPI(handler *handler.GasStationHandler) *gasStationAPI { 16 | return &gasStationAPI{handler: handler} 17 | } 18 | 19 | func (api *gasStationAPI) Price(ctx context.Context) (*types.GasStationPrice, error) { 20 | return api.handler.GetPrice() 21 | } 22 | -------------------------------------------------------------------------------- /rpc/handler/cfx_logs_pruned.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "strings" 8 | 9 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 10 | "github.com/Conflux-Chain/go-conflux-sdk/types" 11 | "github.com/go-redis/redis/v8" 12 | "github.com/pkg/errors" 13 | "github.com/scroll-tech/rpc-gateway/node" 14 | "github.com/scroll-tech/rpc-gateway/rpc/throttle" 15 | "github.com/scroll-tech/rpc-gateway/store/mysql" 16 | "github.com/scroll-tech/rpc-gateway/util/rpc" 17 | "github.com/sirupsen/logrus" 18 | ) 19 | 20 | const thresholdGetLogs = 1 21 | 22 | var ( 23 | errQuotaNotEnough = errors.New("quota not enough") 24 | ) 25 | 26 | // CfxPrunedLogsHandler RPC handler to get pruned event logs from some archive fullnodes 27 | // in rate limited way or vip mode with dedicated fullnode. 28 | type CfxPrunedLogsHandler struct { 29 | pool *node.CfxClientProvider 30 | store *mysql.UserStore 31 | throttling *throttle.RefCounter 32 | } 33 | 34 | func NewCfxPrunedLogsHandler( 35 | pool *node.CfxClientProvider, store *mysql.UserStore, client *redis.Client) *CfxPrunedLogsHandler { 36 | return &CfxPrunedLogsHandler{ 37 | pool: pool, 38 | store: store, 39 | throttling: throttle.NewRefCounter(client, thresholdGetLogs), 40 | } 41 | } 42 | 43 | func (h *CfxPrunedLogsHandler) GetLogs(ctx context.Context, filter types.LogFilter) ([]types.Log, error) { 44 | logs, ok, err := h.getLogsByUser(ctx, filter) 45 | if err != nil { 46 | return nil, err 47 | } 48 | 49 | if ok { 50 | return logs, nil 51 | } 52 | 53 | client, err := h.pool.GetClientByIPGroup(ctx, node.GroupCfxArchives) 54 | if err == node.ErrClientUnavailable { 55 | return nil, errQuotaNotEnough 56 | } 57 | 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | return h.getLogsThrottled(client, filter) 63 | } 64 | 65 | func (h *CfxPrunedLogsHandler) getLogsByUser(ctx context.Context, filter types.LogFilter) ([]types.Log, bool, error) { 66 | request, ok := ctx.Value("request").(*http.Request) 67 | if !ok { 68 | // WebSocket have no http.Request object. 69 | return nil, false, nil 70 | } 71 | 72 | if request.URL == nil { 73 | return nil, false, nil 74 | } 75 | 76 | key := strings.TrimLeft(request.URL.Path, "/") 77 | if idx := strings.Index(key, "/"); idx > 0 { 78 | key = key[:idx] 79 | } 80 | 81 | user, ok, err := h.store.GetUserByKey(key) 82 | if err != nil { 83 | logrus.WithError(err).WithField("key", key).Warn("Failed to get user by key") 84 | return nil, false, err 85 | } 86 | 87 | if !ok { 88 | return nil, false, nil 89 | } 90 | 91 | // TODO cache client for user 92 | client, err := sdk.NewClient(user.NodeUrl) 93 | if err != nil { 94 | logrus.WithError(err).WithFields(logrus.Fields{ 95 | "user": user.Name, 96 | "node": user.NodeUrl, 97 | }).Warn("Failed to connect to full node for user") 98 | return nil, false, err 99 | } 100 | defer client.Close() 101 | 102 | logs, err := h.getLogsThrottled(client, filter) 103 | if err != nil { 104 | return nil, false, err 105 | } 106 | 107 | return logs, true, nil 108 | } 109 | 110 | func (h *CfxPrunedLogsHandler) getLogsThrottled(cfx sdk.ClientOperator, filter types.LogFilter) ([]types.Log, error) { 111 | nodeName := rpc.Url2NodeName(cfx.GetNodeURL()) 112 | key := fmt.Sprintf("rpc:throttle:cfx_getLogs:%v", nodeName) 113 | if !h.throttling.Ref(key) { 114 | return nil, errQuotaNotEnough 115 | } 116 | defer h.throttling.UnrefAsync(key) 117 | 118 | return cfx.GetLogs(filter) 119 | } 120 | -------------------------------------------------------------------------------- /rpc/metrics.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/ethereum/go-ethereum/metrics" 7 | ) 8 | 9 | const ( 10 | metricPrefixRPC = "rpc/" 11 | metricPrefixInfura = "infura/" 12 | ) 13 | 14 | func init() { 15 | // Remove unused metrics imported from ethereum rpc package 16 | var names []string 17 | for k := range metrics.DefaultRegistry.GetAll() { 18 | if !strings.HasPrefix(k, metricPrefixRPC) && !strings.HasPrefix(k, metricPrefixInfura) { 19 | names = append(names, k) 20 | } 21 | } 22 | 23 | for _, v := range names { 24 | metrics.DefaultRegistry.Unregister(v) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /rpc/net_api.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/scroll-tech/rpc-gateway/rpc/cache" 7 | ) 8 | 9 | // netAPI provides evm space net RPC proxy API. 10 | type netAPI struct{} 11 | 12 | // Version returns the current network id. 13 | func (api *netAPI) Version(ctx context.Context) (string, error) { 14 | w3c := GetEthClientFromContext(ctx) 15 | return cache.EthDefault.GetNetVersion(w3c.Client) 16 | } 17 | -------------------------------------------------------------------------------- /rpc/parity_api.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/openweb3/web3go/types" 7 | ) 8 | 9 | // parityAPI provides evm space parity RPC proxy API. 10 | type parityAPI struct{} 11 | 12 | func (api *parityAPI) GetBlockReceipts(ctx context.Context, blockNumOrHash *types.BlockNumberOrHash) ([]types.Receipt, error) { 13 | return GetEthClientFromContext(ctx).Parity.BlockReceipts(blockNumOrHash) 14 | } 15 | -------------------------------------------------------------------------------- /rpc/pos_api.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/Conflux-Chain/go-conflux-sdk/types" 7 | postypes "github.com/Conflux-Chain/go-conflux-sdk/types/pos" 8 | "github.com/ethereum/go-ethereum/common/hexutil" 9 | ) 10 | 11 | // posAPI provides core space POS RPC proxy API. 12 | type posAPI struct{} 13 | 14 | func (api *posAPI) GetStatus(ctx context.Context) (postypes.Status, error) { 15 | return GetCfxClientFromContext(ctx).Pos().GetStatus() 16 | } 17 | 18 | func (api *posAPI) GetAccount(ctx context.Context, address postypes.Address, view ...hexutil.Uint64) (postypes.Account, error) { 19 | return GetCfxClientFromContext(ctx).Pos().GetAccount(address, view...) 20 | } 21 | 22 | func (api *posAPI) GetCommittee(ctx context.Context, view ...hexutil.Uint64) (postypes.CommitteeState, error) { 23 | return GetCfxClientFromContext(ctx).Pos().GetCommittee(view...) 24 | } 25 | 26 | func (api *posAPI) GetBlockByHash(ctx context.Context, blockHash types.Hash) (*postypes.Block, error) { 27 | return GetCfxClientFromContext(ctx).Pos().GetBlockByHash(blockHash) 28 | } 29 | 30 | func (api *posAPI) GetBlockByNumber(ctx context.Context, blockNumber postypes.BlockNumber) (*postypes.Block, error) { 31 | return GetCfxClientFromContext(ctx).Pos().GetBlockByNumber(blockNumber) 32 | } 33 | 34 | func (api *posAPI) GetTransactionByNumber(ctx context.Context, txNumber hexutil.Uint64) (*postypes.Transaction, error) { 35 | return GetCfxClientFromContext(ctx).Pos().GetTransactionByNumber(txNumber) 36 | } 37 | 38 | func (api *posAPI) GetRewardsByEpoch(ctx context.Context, epochNumber hexutil.Uint64) (postypes.EpochReward, error) { 39 | return GetCfxClientFromContext(ctx).Pos().GetRewardsByEpoch(epochNumber) 40 | } 41 | -------------------------------------------------------------------------------- /rpc/server.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | infuraNode "github.com/scroll-tech/rpc-gateway/node" 5 | "github.com/scroll-tech/rpc-gateway/rpc/handler" 6 | "github.com/scroll-tech/rpc-gateway/util/rate" 7 | "github.com/scroll-tech/rpc-gateway/util/rpc" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | const ( 12 | nativeSpaceRpcServerName = "core_space_rpc" 13 | evmSpaceRpcServerName = "evm_space_rpc" 14 | 15 | nativeSpaceBridgeRpcServerName = "core_space_bridge_rpc" 16 | ) 17 | 18 | // MustNewNativeSpaceServer new core space RPC server by specifying router, handler 19 | // and exposed modules. Argument exposedModules is a list of API modules to expose 20 | // via the RPC interface. If the module list is empty, all RPC API endpoints designated 21 | // public will be exposed. 22 | func MustNewNativeSpaceServer( 23 | router infuraNode.Router, gashandler *handler.GasStationHandler, 24 | exposedModules []string, option ...CfxAPIOption, 25 | ) *rpc.Server { 26 | // retrieve all available core space rpc apis 27 | clientProvider := infuraNode.NewCfxClientProvider(router) 28 | allApis := nativeSpaceApis(clientProvider, gashandler, option...) 29 | 30 | exposedApis, err := filterExposedApis(allApis, exposedModules) 31 | if err != nil { 32 | logrus.WithError(err).Fatal( 33 | "Failed to new native space RPC server with bad exposed modules", 34 | ) 35 | } 36 | 37 | middleware := httpMiddleware(rate.DefaultRegistryCfx, clientProvider) 38 | 39 | return rpc.MustNewServer(nativeSpaceRpcServerName, exposedApis, middleware) 40 | } 41 | 42 | // MustNewEvmSpaceServer new evm space RPC server by specifying router, and exposed modules. 43 | // `exposedModules` is a list of API modules to expose via the RPC interface. If the module 44 | // list is empty, all RPC API endpoints designated public will be exposed. 45 | func MustNewEvmSpaceServer( 46 | router infuraNode.Router, exposedModules []string, option ...EthAPIOption, 47 | ) *rpc.Server { 48 | // retrieve all available evm space rpc apis 49 | clientProvider := infuraNode.NewEthClientProvider(router) 50 | allApis, err := evmSpaceApis(clientProvider, option...) 51 | if err != nil { 52 | logrus.WithError(err).Fatal("Failed to new EVM space RPC server") 53 | } 54 | 55 | exposedApis, err := filterExposedApis(allApis, exposedModules) 56 | if err != nil { 57 | logrus.WithError(err).Fatal( 58 | "Failed to new EVM space RPC server with bad exposed modules", 59 | ) 60 | } 61 | 62 | middleware := httpMiddleware(rate.DefaultRegistryEth, clientProvider) 63 | 64 | return rpc.MustNewServer(evmSpaceRpcServerName, exposedApis, middleware) 65 | } 66 | 67 | type CfxBridgeServerConfig struct { 68 | EthNode string 69 | CfxNode string 70 | ExposedModules []string 71 | Endpoint string `default:":32537"` 72 | } 73 | 74 | func MustNewNativeSpaceBridgeServer(config *CfxBridgeServerConfig) *rpc.Server { 75 | allApis, err := nativeSpaceBridgeApis(config.EthNode, config.CfxNode) 76 | if err != nil { 77 | logrus.WithError(err).Fatal("Failed to new CFX bridge RPC server") 78 | } 79 | 80 | exposedApis, err := filterExposedApis(allApis, config.ExposedModules) 81 | if err != nil { 82 | logrus.WithError(err).Fatal("Failed to new CFX bridge RPC server with bad exposed modules") 83 | } 84 | 85 | return rpc.MustNewServer(nativeSpaceBridgeRpcServerName, exposedApis) 86 | } 87 | -------------------------------------------------------------------------------- /rpc/server_middleware.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | 7 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 8 | "github.com/openweb3/go-rpc-provider" 9 | "github.com/scroll-tech/rpc-gateway/node" 10 | "github.com/scroll-tech/rpc-gateway/util/rate" 11 | "github.com/scroll-tech/rpc-gateway/util/rpc/handlers" 12 | "github.com/scroll-tech/rpc-gateway/util/rpc/middlewares" 13 | "github.com/sirupsen/logrus" 14 | "github.com/spf13/viper" 15 | ) 16 | 17 | const ( 18 | ctxKeyClientProvider = handlers.CtxKey("Infura-RPC-Client-Provider") 19 | ctxKeyClient = handlers.CtxKey("Infura-RPC-Client") 20 | ) 21 | 22 | // go-rpc-provider only supports static middlewares for RPC server. 23 | func init() { 24 | viper.SetDefault("rpc.loadBalancerMode", "consistentHashing") 25 | // middlewares executed in order 26 | 27 | // panic recovery 28 | rpc.HookHandleCallMsg(middlewares.Recover) 29 | 30 | // web3pay billing 31 | if web3payClient, ok := middlewares.MustNewWeb3PayClient(); ok { 32 | logrus.Info("Web3Pay billing RPC middleware enabled") 33 | rpc.HookHandleCallMsg(middlewares.Billing(web3payClient)) 34 | } 35 | 36 | // rate limit 37 | rpc.HookHandleBatch(middlewares.RateLimitBatch) 38 | rpc.HookHandleCallMsg(middlewares.RateLimit) 39 | 40 | // metrics 41 | rpc.HookHandleBatch(middlewares.MetricsBatch) 42 | rpc.HookHandleCallMsg(middlewares.Metrics) 43 | 44 | // log 45 | rpc.HookHandleBatch(middlewares.LogBatch) 46 | rpc.HookHandleCallMsg(middlewares.Log) 47 | 48 | // cfx/eth client 49 | rpc.HookHandleCallMsg(clientMiddleware) 50 | 51 | // invalid json rpc request without `ID`` 52 | rpc.HookHandleCallMsg(rpc.PreventMessagesWithouID) 53 | } 54 | 55 | // Inject values into context for static RPC call middlewares, e.g. rate limit 56 | func httpMiddleware(registry *rate.Registry, clientProvider interface{}) handlers.Middleware { 57 | return func(next http.Handler) http.Handler { 58 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 59 | ctx := r.Context() 60 | 61 | if token := handlers.GetAccessToken(r); len(token) > 0 { // optional 62 | ctx = context.WithValue(ctx, handlers.CtxAccessToken, token) 63 | } 64 | 65 | ctx = context.WithValue(ctx, handlers.CtxKeyRealIP, handlers.GetIPAddress(r)) 66 | ctx = context.WithValue(ctx, handlers.CtxKeyRateRegistry, registry) 67 | ctx = context.WithValue(ctx, ctxKeyClientProvider, clientProvider) 68 | 69 | next.ServeHTTP(w, r.WithContext(ctx)) 70 | }) 71 | } 72 | } 73 | 74 | func clientMiddleware(next rpc.HandleCallMsgFunc) rpc.HandleCallMsgFunc { 75 | loadBalancerMode := viper.GetString("rpc.loadBalancerMode") 76 | return func(ctx context.Context, msg *rpc.JsonRpcMessage) *rpc.JsonRpcMessage { 77 | var client interface{} 78 | var err error 79 | 80 | if cfxProvider, ok := ctx.Value(ctxKeyClientProvider).(*node.CfxClientProvider); ok { 81 | switch msg.Method { 82 | case "cfx_getLogs": 83 | client, err = cfxProvider.GetClientByIPGroup(ctx, node.GroupCfxLogs) 84 | default: 85 | client, err = cfxProvider.GetClientByIP(ctx) 86 | } 87 | } else if ethProvider, ok := ctx.Value(ctxKeyClientProvider).(*node.EthClientProvider); ok { 88 | if (loadBalancerMode == "consistentHashing") { 89 | switch msg.Method { 90 | case "eth_getLogs": 91 | client, err = ethProvider.GetClientByIPGroup(ctx, node.GroupEthLogs) 92 | default: 93 | client, err = ethProvider.GetClientByIP(ctx) 94 | } 95 | } else { 96 | switch msg.Method{ 97 | case "eth_getLogs": 98 | client, err = ethProvider.GetClientRandomByGroup(node.GroupEthLogs) 99 | default: 100 | client, err = ethProvider.GetClientRandom() 101 | } 102 | } 103 | } else { 104 | return next(ctx, msg) 105 | } 106 | 107 | // no fullnode available to request RPC 108 | if err != nil { 109 | return msg.ErrorResponse(err) 110 | } 111 | 112 | ctx = context.WithValue(ctx, ctxKeyClient, client) 113 | 114 | return next(ctx, msg) 115 | } 116 | } 117 | 118 | func GetCfxClientFromContext(ctx context.Context) sdk.ClientOperator { 119 | return ctx.Value(ctxKeyClient).(sdk.ClientOperator) 120 | } 121 | 122 | func GetEthClientFromContext(ctx context.Context) *node.Web3goClient { 123 | return ctx.Value(ctxKeyClient).(*node.Web3goClient) 124 | } 125 | -------------------------------------------------------------------------------- /rpc/throttle/ref_count.go: -------------------------------------------------------------------------------- 1 | package throttle 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/go-redis/redis/v8" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | var DefaultExpiration = time.Minute 12 | 13 | // RefCounter allows to throttle based on reference counter. 14 | type RefCounter struct { 15 | client *redis.Client 16 | quota int64 17 | ctx context.Context 18 | } 19 | 20 | func NewRefCounter(client *redis.Client, quota int64) *RefCounter { 21 | if quota <= 0 { 22 | logrus.WithField("quota", quota).Fatal("Invalid quota to create RefCounter instance") 23 | } 24 | 25 | return &RefCounter{ 26 | client: client, 27 | quota: quota, 28 | ctx: context.Background(), 29 | } 30 | } 31 | 32 | func (rc *RefCounter) Ref(key string, expiration ...time.Duration) bool { 33 | if rc.client == nil { 34 | return false 35 | } 36 | 37 | timeout := DefaultExpiration 38 | if len(expiration) > 0 && expiration[0] > 0 { 39 | timeout = expiration[0] 40 | } 41 | 42 | batchOps := rc.client.TxPipeline() 43 | incrCmd := batchOps.Incr(rc.ctx, key) 44 | batchOps.Expire(rc.ctx, key, timeout) 45 | if _, err := batchOps.Exec(rc.ctx); err != nil { 46 | // treat as no quota if any error occurred 47 | logrus.WithError(err).WithField("key", key).Warn("Failed to exec batch cmd to add ref") 48 | return false 49 | } 50 | 51 | count, err := incrCmd.Result() 52 | if err != nil { 53 | // treat as no quota if any error occurred 54 | logrus.WithError(err).WithField("key", key).Warn("Failed to incr to add ref") 55 | return false 56 | } 57 | 58 | if count <= rc.quota { 59 | return true 60 | } 61 | 62 | // auto unref if no quota available 63 | rc.UnrefAsync(key) 64 | 65 | return false 66 | } 67 | 68 | func (rc *RefCounter) SafeUnref(key string) { 69 | if rc.client == nil { 70 | return 71 | } 72 | 73 | for { 74 | _, err := rc.client.Decr(rc.ctx, key).Result() 75 | if err == nil { 76 | break 77 | } 78 | 79 | logrus.WithError(err).WithField("key", key).Warn("Failed to decr to unref, try again") 80 | time.Sleep(100 * time.Millisecond) 81 | } 82 | } 83 | 84 | func (rc *RefCounter) UnrefAsync(key string) { 85 | go func() { 86 | rc.SafeUnref(key) 87 | }() 88 | } 89 | -------------------------------------------------------------------------------- /rpc/trace_api.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/Conflux-Chain/go-conflux-sdk/types" 7 | ) 8 | 9 | // traceAPI provides core space trace RPC proxy API. 10 | type traceAPI struct{} 11 | 12 | func (api *traceAPI) Block(ctx context.Context, blockHash types.Hash) (*types.LocalizedBlockTrace, error) { 13 | return GetCfxClientFromContext(ctx).GetBlockTraces(blockHash) 14 | } 15 | 16 | func (api *traceAPI) Filter(ctx context.Context, filter types.TraceFilter) ([]types.LocalizedTrace, error) { 17 | return GetCfxClientFromContext(ctx).FilterTraces(filter) 18 | } 19 | 20 | func (api *traceAPI) Transaction(ctx context.Context, txHash types.Hash) ([]types.LocalizedTrace, error) { 21 | return GetCfxClientFromContext(ctx).GetTransactionTraces(txHash) 22 | } 23 | -------------------------------------------------------------------------------- /rpc/txpool_api.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/Conflux-Chain/go-conflux-sdk/types" 7 | "github.com/ethereum/go-ethereum/common/hexutil" 8 | ) 9 | 10 | // txPoolAPI provides core space txPool RPC proxy API. 11 | type txPoolAPI struct{} 12 | 13 | func (api *txPoolAPI) NextNonce(ctx context.Context, address types.Address) (*hexutil.Big, error) { 14 | return GetCfxClientFromContext(ctx).TxPool().NextNonce(address) 15 | } 16 | -------------------------------------------------------------------------------- /rpc/web3_api.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/scroll-tech/rpc-gateway/rpc/cache" 7 | ) 8 | 9 | // web3API provides evm space web3 RPC proxy API. 10 | type web3API struct{} 11 | 12 | // ClientVersion returns the current client version. 13 | func (api *web3API) ClientVersion(ctx context.Context) (string, error) { 14 | w3c := GetEthClientFromContext(ctx) 15 | return cache.EthDefault.GetClientVersion(w3c.Client) 16 | } 17 | -------------------------------------------------------------------------------- /store/ext_data.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/ethereum/go-ethereum/common" 5 | "github.com/ethereum/go-ethereum/common/hexutil" 6 | gethTypes "github.com/ethereum/go-ethereum/core/types" 7 | web3Types "github.com/openweb3/web3go/types" 8 | ) 9 | 10 | // custom block fields for extention 11 | type BlockExtra struct { 12 | // extended fields for ETH block 13 | BaseFeePerGas *hexutil.Big `json:"baseFeePerGas,omitempty"` 14 | MixHash *common.Hash `json:"mixHash,omitempty"` 15 | TotalDifficulty *hexutil.Big `json:"totalDifficulty,omitempty"` 16 | Sha3Uncles *common.Hash `json:"sha3Uncles,omitempty"` 17 | 18 | TxnExts []*TransactionExtra `json:"-"` 19 | } 20 | 21 | // custom transaction fields for extention 22 | type TransactionExtra struct { 23 | // extended fields for ETH transaction 24 | Accesses gethTypes.AccessList `json:"accessList,omitempty"` 25 | BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` 26 | MaxFeePerGas *hexutil.Big `json:"maxFeePerGas,omitempty"` 27 | MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` 28 | Type *uint64 `json:"type,omitempty"` 29 | StandardV *hexutil.Big `json:"standardV,omitempty"` 30 | } 31 | 32 | // custom receipt fields for extention 33 | type ReceiptExtra struct { 34 | // extended fields for ETH receipt 35 | CumulativeGasUsed *uint64 `json:"cumulativeGasUsed,omitempty"` 36 | EffectiveGasPrice *uint64 `json:"effectiveGasPrice,omitempty"` 37 | Type *uint `json:"type,omitempty"` 38 | 39 | LogExts []*LogExtra `json:"-"` 40 | } 41 | 42 | // custom event log fields for extention 43 | type LogExtra struct { 44 | // extended fields for ETH log 45 | LogType *string `json:"logType,omitempty"` 46 | Removed *bool `json:"removed,,omitempty"` 47 | } 48 | 49 | func ExtractEthBlockExt(ethBlock *web3Types.Block) *BlockExtra { 50 | ethTxns := ethBlock.Transactions.Transactions() 51 | txnExts := make([]*TransactionExtra, len(ethTxns)) 52 | 53 | for i := 0; i < len(ethTxns); i++ { 54 | txnExts[i] = ExtractEthTransactionExt(ðTxns[i]) 55 | } 56 | 57 | return &BlockExtra{ 58 | BaseFeePerGas: (*hexutil.Big)(ethBlock.BaseFeePerGas), 59 | MixHash: ethBlock.MixHash, 60 | TotalDifficulty: (*hexutil.Big)(ethBlock.TotalDifficulty), 61 | Sha3Uncles: ðBlock.Sha3Uncles, 62 | TxnExts: txnExts, 63 | } 64 | } 65 | 66 | func ExtractEthTransactionExt(ethTxn *web3Types.TransactionDetail) *TransactionExtra { 67 | return &TransactionExtra{ 68 | Accesses: ethTxn.Accesses, 69 | BlockNumber: (*hexutil.Big)(ethTxn.BlockNumber), 70 | MaxFeePerGas: (*hexutil.Big)(ethTxn.MaxFeePerGas), 71 | MaxPriorityFeePerGas: (*hexutil.Big)(ethTxn.MaxPriorityFeePerGas), 72 | Type: ethTxn.Type, 73 | StandardV: (*hexutil.Big)(ethTxn.StandardV), 74 | } 75 | } 76 | 77 | func ExtractEthLogExt(ethLog *web3Types.Log) *LogExtra { 78 | return &LogExtra{ 79 | LogType: ethLog.LogType, Removed: ðLog.Removed, 80 | } 81 | } 82 | 83 | func ExtractEthReceiptExt(ethRcpt *web3Types.Receipt) *ReceiptExtra { 84 | ethLogs := ethRcpt.Logs 85 | logExts := make([]*LogExtra, len(ethLogs)) 86 | 87 | for i := 0; i < len(ethLogs); i++ { 88 | logExts[i] = ExtractEthLogExt(ethLogs[i]) 89 | } 90 | 91 | return &ReceiptExtra{ 92 | CumulativeGasUsed: ðRcpt.CumulativeGasUsed, 93 | EffectiveGasPrice: ðRcpt.EffectiveGasPrice, 94 | Type: ethRcpt.Type, 95 | LogExts: logExts, 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /store/glue_data.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/Conflux-Chain/go-conflux-sdk/types" 5 | ) 6 | 7 | type Block struct { 8 | CfxBlock *types.Block 9 | Extra *BlockExtra 10 | } 11 | 12 | type BlockSummary struct { 13 | CfxBlockSummary *types.BlockSummary 14 | Extra *BlockExtra 15 | } 16 | 17 | type Transaction struct { 18 | CfxTransaction *types.Transaction 19 | Extra *TransactionExtra 20 | } 21 | 22 | type TransactionReceipt struct { 23 | CfxReceipt *types.TransactionReceipt 24 | Extra *ReceiptExtra 25 | } 26 | -------------------------------------------------------------------------------- /store/log.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "github.com/Conflux-Chain/go-conflux-sdk/types" 7 | "github.com/Conflux-Chain/go-conflux-sdk/types/cfxaddress" 8 | "github.com/ethereum/go-ethereum/common/hexutil" 9 | "github.com/scroll-tech/rpc-gateway/util" 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | type Log struct { 14 | ID uint64 15 | ContractID uint64 16 | BlockNumber uint64 17 | Epoch uint64 18 | Topic0 string 19 | Topic1 string 20 | Topic2 string 21 | Topic3 string 22 | LogIndex uint64 23 | Extra []byte 24 | } 25 | 26 | func (log *Log) cmp(other *Log) int { 27 | if log.BlockNumber < other.BlockNumber { 28 | return -1 29 | } 30 | 31 | if log.BlockNumber > other.BlockNumber { 32 | return 1 33 | } 34 | 35 | if log.LogIndex < other.LogIndex { 36 | return -1 37 | } 38 | 39 | if log.LogIndex > other.LogIndex { 40 | return 1 41 | } 42 | 43 | return 0 44 | } 45 | 46 | type LogSlice []*Log 47 | 48 | func (s LogSlice) Len() int { return len(s) } 49 | func (s LogSlice) Less(i, j int) bool { return s[i].cmp(s[j]) < 0 } 50 | func (s LogSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 51 | 52 | type logExtraData struct { 53 | Address cfxaddress.Address `json:"addr,omitempty"` 54 | BlockHash *types.Hash `json:"bh,omitempty"` 55 | TransactionHash *types.Hash `json:"th,omitempty"` 56 | TransactionIndex *hexutil.Big `json:"ti,omitempty"` 57 | TransactionLogIndex *hexutil.Big `json:"tli,omitempty"` 58 | Data hexutil.Bytes `json:"data,omitempty"` 59 | EthExtra *LogExtra `json:"eth,omitempty"` 60 | } 61 | 62 | func ParseCfxLog(log *types.Log, cid, bn uint64, logExt *LogExtra) *Log { 63 | convertLogTopicFunc := func(log *types.Log, index int) string { 64 | if index < 0 || index >= len(log.Topics) { 65 | return "" 66 | } 67 | 68 | return log.Topics[index].String() 69 | } 70 | 71 | return &Log{ 72 | ContractID: cid, 73 | BlockNumber: bn, 74 | Epoch: log.EpochNumber.ToInt().Uint64(), 75 | Topic0: convertLogTopicFunc(log, 0), 76 | Topic1: convertLogTopicFunc(log, 1), 77 | Topic2: convertLogTopicFunc(log, 2), 78 | Topic3: convertLogTopicFunc(log, 3), 79 | LogIndex: log.LogIndex.ToInt().Uint64(), 80 | Extra: util.MustMarshalJson(logExtraData{ 81 | Address: log.Address, 82 | BlockHash: log.BlockHash, 83 | TransactionHash: log.TransactionHash, 84 | TransactionIndex: log.TransactionIndex, 85 | TransactionLogIndex: log.TransactionLogIndex, 86 | Data: log.Data, 87 | EthExtra: logExt, 88 | }), 89 | } 90 | } 91 | 92 | func (log *Log) ToCfxLog() (*types.Log, *LogExtra) { 93 | var extra logExtraData 94 | if err := json.Unmarshal(log.Extra, &extra); err != nil { 95 | logrus.WithError(err).Error("Failed to unmarshal cfx log from Extra field") 96 | } 97 | 98 | var topics []types.Hash 99 | for _, v := range []string{log.Topic0, log.Topic1, log.Topic2, log.Topic3} { 100 | if len(v) > 0 { 101 | topics = append(topics, types.Hash(v)) 102 | } 103 | } 104 | 105 | return &types.Log{ 106 | Address: extra.Address, 107 | Topics: topics, 108 | Data: extra.Data, 109 | BlockHash: extra.BlockHash, 110 | EpochNumber: types.NewBigInt(log.Epoch), 111 | TransactionHash: extra.TransactionHash, 112 | TransactionIndex: extra.TransactionIndex, 113 | LogIndex: types.NewBigInt(log.LogIndex), 114 | TransactionLogIndex: extra.TransactionLogIndex, 115 | }, extra.EthExtra 116 | } 117 | -------------------------------------------------------------------------------- /store/mysql/common_partition.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | 8 | "gorm.io/gorm" 9 | ) 10 | 11 | // mysqlPartition represents a MySQL table partition. 12 | type mysqlPartition struct { 13 | Name string `gorm:"column:PARTITION_NAME"` 14 | Description string `gorm:"column:PARTITION_DESCRIPTION"` 15 | OrdinalPosition int `gorm:"column:PARTITION_ORDINAL_POSITION"` 16 | } 17 | 18 | // mysqlPartitioner helper struct for MySQL partitioned tables. 19 | type mysqlPartitioner struct { 20 | dbName string // database name 21 | tableName string // table name 22 | } 23 | 24 | func newMysqlPartitioner(dbName, tableName string) *mysqlPartitioner { 25 | return &mysqlPartitioner{ 26 | dbName: dbName, tableName: tableName, 27 | } 28 | } 29 | 30 | // latestPartition returns latest created partition of the table. 31 | func (msp *mysqlPartitioner) latestPartition(db *gorm.DB) (*mysqlPartition, error) { 32 | partitions, err := msp.loadPartitions(db, false, 1) 33 | if err == nil && len(partitions) > 0 { 34 | return partitions[0], nil 35 | } 36 | 37 | return nil, err 38 | } 39 | 40 | // loadPartitions loads partitions of the table with specified ordinal position order. 41 | func (msp *mysqlPartitioner) loadPartitions(db *gorm.DB, orderAsc bool, limit int) ([]*mysqlPartition, error) { 42 | db = db.Table("information_schema.partitions"). 43 | Where("TABLE_SCHEMA = ?", msp.dbName). 44 | Where("TABLE_NAME = ?", msp.tableName). 45 | Where("PARTITION_NAME IS NOT NULL") 46 | 47 | if orderAsc { 48 | db = db.Order("PARTITION_ORDINAL_POSITION ASC") 49 | } else { 50 | db = db.Order("PARTITION_ORDINAL_POSITION DESC") 51 | } 52 | 53 | if limit > 0 { 54 | db = db.Limit(limit) 55 | } 56 | 57 | var partitions []*mysqlPartition 58 | if err := db.Find(&partitions).Error; err != nil { 59 | return nil, err 60 | } 61 | 62 | return partitions, nil 63 | } 64 | 65 | // mysqlRangePartitioner helper struct for MySQL range partitioned tables. 66 | type mysqlRangePartitioner struct { 67 | *mysqlPartitioner 68 | 69 | rangeBy string 70 | } 71 | 72 | func newMysqlRangePartitioner(dbName, tableName, rangeBy string) *mysqlRangePartitioner { 73 | return &mysqlRangePartitioner{ 74 | mysqlPartitioner: newMysqlPartitioner(dbName, tableName), 75 | rangeBy: rangeBy, 76 | } 77 | } 78 | 79 | // convert converts non-partitioned table to range partitioned table. 80 | func (mrp *mysqlRangePartitioner) convert(db *gorm.DB, initIndex int, threshold uint64) error { 81 | sql := fmt.Sprintf( 82 | "ALTER TABLE %v PARTITION BY RANGE (%v) (PARTITION %v VALUES LESS THAN (%v));", 83 | mrp.tableName, 84 | mrp.rangeBy, 85 | mrp.partitionName(initIndex), 86 | threshold, 87 | ) 88 | return db.Exec(sql).Error 89 | } 90 | 91 | func (mrp *mysqlRangePartitioner) addPartition(db *gorm.DB, index int, threshold uint64) error { 92 | sql := fmt.Sprintf( 93 | "ALTER TABLE %v ADD PARTITION (PARTITION %v VALUES LESS THAN (%v));", 94 | mrp.tableName, 95 | mrp.partitionName(index), 96 | threshold, 97 | ) 98 | return db.Exec(sql).Error 99 | } 100 | 101 | func (mrp *mysqlRangePartitioner) removePartition(db *gorm.DB, partiton *mysqlPartition) error { 102 | sql := fmt.Sprintf( 103 | "ALTER TABLE %v DROP PARTITION %v;", 104 | mrp.tableName, 105 | partiton.Name, 106 | ) 107 | return db.Exec(sql).Error 108 | } 109 | 110 | func (*mysqlRangePartitioner) indexOfPartition(partition *mysqlPartition) int { 111 | v, err := strconv.Atoi(strings.TrimPrefix(partition.Name, "p")) 112 | if err == nil { 113 | return v 114 | } 115 | 116 | return -1 117 | } 118 | 119 | func (*mysqlRangePartitioner) partitionName(index int) string { 120 | return fmt.Sprintf("p%d", index) 121 | } 122 | -------------------------------------------------------------------------------- /store/mysql/store_common.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/scroll-tech/rpc-gateway/store" 7 | "gorm.io/gorm" 8 | ) 9 | 10 | // baseStore provides basic store common operatition. 11 | type baseStore struct { 12 | db *gorm.DB 13 | } 14 | 15 | func newBaseStore(db *gorm.DB) *baseStore { 16 | return &baseStore{db} 17 | } 18 | 19 | func (baseStore) IsRecordNotFound(err error) bool { 20 | return errors.Is(err, gorm.ErrRecordNotFound) || errors.Is(err, store.ErrNotFound) 21 | } 22 | 23 | func (bs *baseStore) Close() error { 24 | if mysqlDb, err := bs.db.DB(); err != nil { 25 | return err 26 | } else { 27 | return mysqlDb.Close() 28 | } 29 | } 30 | 31 | func (bs *baseStore) exists(modelPtr interface{}, whereQuery string, args ...interface{}) (bool, error) { 32 | err := bs.db.Where(whereQuery, args...).First(modelPtr).Error 33 | if err == nil { 34 | return true, nil 35 | } 36 | 37 | if bs.IsRecordNotFound(err) { 38 | return false, nil 39 | } 40 | 41 | return false, err 42 | } 43 | -------------------------------------------------------------------------------- /store/mysql/store_common_partition.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "fmt" 5 | 6 | "gorm.io/gorm" 7 | "gorm.io/gorm/schema" 8 | ) 9 | 10 | // partitionedStore provides basic utitlity for partitioned table operations. 11 | type partitionedStore struct{} 12 | 13 | func (*partitionedStore) getPartitionedTableName(tabler schema.Tabler, partition uint32) string { 14 | return fmt.Sprintf("%v_%v", tabler.TableName(), partition) 15 | } 16 | 17 | func (ps *partitionedStore) createPartitionedTable(db *gorm.DB, modelPtr schema.Tabler, partition uint32) (bool, error) { 18 | tableName := ps.getPartitionedTableName(modelPtr, partition) 19 | migrator := db.Migrator() 20 | 21 | if migrator.HasTable(tableName) { 22 | return false, nil 23 | } 24 | 25 | if err := db.Table(modelPtr.TableName()).AutoMigrate(modelPtr); err != nil { 26 | return false, err 27 | } 28 | 29 | // gorm do not support dynamic table name, so rename to create partitioned tables. 30 | if err := migrator.RenameTable(modelPtr.TableName(), tableName); err != nil { 31 | return false, err 32 | } 33 | 34 | return true, nil 35 | } 36 | 37 | func (ps *partitionedStore) createPartitionedTables(db *gorm.DB, modelPtr schema.Tabler, partitionFrom, count uint32) (int, error) { 38 | var numCreated int 39 | 40 | for i, end := partitionFrom, partitionFrom+count; i < end; i++ { 41 | created, err := ps.createPartitionedTable(db, modelPtr, i) 42 | if err != nil { 43 | return numCreated, err 44 | } 45 | 46 | if created { 47 | numCreated++ 48 | } 49 | } 50 | 51 | return numCreated, nil 52 | } 53 | 54 | func (ps *partitionedStore) deletePartitionedTable(db *gorm.DB, modelPtr schema.Tabler, partition uint32) (bool, error) { 55 | tableName := ps.getPartitionedTableName(modelPtr, partition) 56 | migrator := db.Migrator() 57 | 58 | if !migrator.HasTable(tableName) { 59 | return false, nil 60 | } 61 | 62 | if err := migrator.DropTable(tableName); err != nil { 63 | return false, err 64 | } 65 | 66 | return true, nil 67 | } 68 | -------------------------------------------------------------------------------- /store/mysql/store_conf.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "crypto/md5" 5 | "encoding/json" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/pkg/errors" 10 | "github.com/scroll-tech/rpc-gateway/util/rate" 11 | "github.com/sirupsen/logrus" 12 | "gorm.io/gorm" 13 | "gorm.io/gorm/clause" 14 | ) 15 | 16 | const ( 17 | MysqlConfKeyReorgVersion = "reorg.version" 18 | 19 | rateLimitConfigStrategyPrefix = "ratelimit.strategy." 20 | rateLimitStrategySqlMatchPattern = rateLimitConfigStrategyPrefix + "%" 21 | ) 22 | 23 | // configuration tables 24 | type conf struct { 25 | ID uint32 26 | Name string `gorm:"unique;size:128;not null"` // config name 27 | Value string `gorm:"size:32698;not null"` // config value 28 | CreatedAt time.Time 29 | UpdatedAt time.Time 30 | } 31 | 32 | func (conf) TableName() string { 33 | return "configs" 34 | } 35 | 36 | type confStore struct { 37 | *baseStore 38 | } 39 | 40 | func newConfStore(db *gorm.DB) *confStore { 41 | return &confStore{ 42 | baseStore: newBaseStore(db), 43 | } 44 | } 45 | 46 | func (cs *confStore) LoadConfig(confNames ...string) (map[string]interface{}, error) { 47 | var confs []conf 48 | 49 | if err := cs.db.Where("name IN ?", confNames).Find(&confs).Error; err != nil { 50 | return nil, err 51 | } 52 | 53 | res := make(map[string]interface{}, len(confs)) 54 | for _, c := range confs { 55 | res[c.Name] = c.Value 56 | } 57 | 58 | return res, nil 59 | } 60 | 61 | func (cs *confStore) StoreConfig(confName string, confVal interface{}) error { 62 | return cs.db.Clauses(clause.OnConflict{ 63 | Columns: []clause.Column{{Name: "name"}}, 64 | DoUpdates: clause.Assignments(map[string]interface{}{"value": confVal}), 65 | }).Create(&conf{ 66 | Name: confName, 67 | Value: confVal.(string), 68 | }).Error 69 | } 70 | 71 | func (cs *confStore) GetReorgVersion() (int, error) { 72 | var result conf 73 | exists, err := cs.exists(&result, "name = ?", MysqlConfKeyReorgVersion) 74 | if err != nil { 75 | return 0, err 76 | } 77 | 78 | if !exists { 79 | return 0, nil 80 | } 81 | 82 | return strconv.Atoi(result.Value) 83 | } 84 | 85 | // thread unsafe 86 | func (cs *confStore) createOrUpdateReorgVersion(dbTx *gorm.DB) error { 87 | version, err := cs.GetReorgVersion() 88 | if err != nil { 89 | return err 90 | } 91 | 92 | newVersion := strconv.Itoa(version + 1) 93 | 94 | return cs.StoreConfig(MysqlConfKeyReorgVersion, newVersion) 95 | } 96 | 97 | // ratelimit config 98 | 99 | func (cs *confStore) LoadRateLimitConfigs() *rate.Config { 100 | var cfgs []conf 101 | if err := cs.db.Where("name LIKE ?", rateLimitStrategySqlMatchPattern).Find(&cfgs).Error; err != nil { 102 | logrus.WithError(err).Error("Failed to load rate limit config from db") 103 | return nil 104 | } 105 | 106 | if len(cfgs) == 0 { 107 | return &rate.Config{} 108 | } 109 | 110 | strategies := make(map[uint32]*rate.Strategy) 111 | 112 | // load ratelimit strategies 113 | for _, v := range cfgs { 114 | strategy, err := cs.loadRateLimitStrategy(v) 115 | if err != nil { 116 | logrus.WithField("cfg", v).WithError(err).Warn("Invalid rate limit strategy config") 117 | continue 118 | } 119 | 120 | if strategy != nil { 121 | strategies[v.ID] = strategy 122 | } 123 | } 124 | 125 | return &rate.Config{Strategies: strategies} 126 | } 127 | 128 | func (cs *confStore) loadRateLimitStrategy(cfg conf) (*rate.Strategy, error) { 129 | // eg., ratelimit.strategy.whitelist 130 | name := cfg.Name[len(rateLimitConfigStrategyPrefix):] 131 | if len(name) == 0 { 132 | return nil, errors.New("name is too short") 133 | } 134 | 135 | ruleMap := make(map[string][]int) 136 | data := []byte(cfg.Value) 137 | 138 | if err := json.Unmarshal(data, &ruleMap); err != nil { 139 | return nil, errors.WithMessage(err, "malformed json string for limit rule data") 140 | } 141 | 142 | strategy := rate.Strategy{ 143 | ID: cfg.ID, 144 | Name: name, 145 | Rules: make(map[string]rate.Option), 146 | } 147 | 148 | for name, value := range ruleMap { 149 | if len(value) != 2 { 150 | return nil, errors.New("invalid limit option (must be rate/burst integer pairs)") 151 | } 152 | 153 | strategy.Rules[name] = rate.NewOption(value[0], value[1]) 154 | } 155 | 156 | // calculate fingerprint 157 | strategy.MD5 = md5.Sum(data) 158 | return &strategy, nil 159 | } 160 | -------------------------------------------------------------------------------- /store/mysql/store_pruner.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/sirupsen/logrus" 8 | "gorm.io/gorm" 9 | "gorm.io/gorm/schema" 10 | ) 11 | 12 | // storePruner observes bn partition changes and prunes log partitions. 13 | type storePruner struct { 14 | // block number range partitioned store 15 | partitionedStore *bnPartitionedStore 16 | // channel to observe new entity bnPartition 17 | newBnPartitionObsChan chan *bnPartition 18 | // mapset to hold entity for which new bnPartition observed 19 | // entity => schema.Tabler 20 | bnPartitionObsEntitySet sync.Map 21 | } 22 | 23 | func newStorePruner(db *gorm.DB) *storePruner { 24 | pruner := &storePruner{ 25 | newBnPartitionObsChan: make(chan *bnPartition, 1), 26 | partitionedStore: newBnPartitionedStore(db), 27 | } 28 | 29 | go pruner.observe() 30 | return pruner 31 | } 32 | 33 | // observe observes bn partition changes and updates the mapset of entity which will 34 | // be tracked by the pruner. 35 | func (sp *storePruner) observe() { 36 | for partition := range sp.newBnPartitionObsChan { 37 | if partition.tabler != nil { 38 | sp.bnPartitionObsEntitySet.Store(partition.Entity, partition.tabler) 39 | } 40 | } 41 | } 42 | 43 | // schedulePrune periodically monitors and removes extra more than the max sepcified number of 44 | // archive bn partitions. Be noted this function will block caller thread. 45 | func (sp *storePruner) schedulePrune(config *Config) { 46 | ticker := time.NewTicker(time.Minute * 15) 47 | defer ticker.Stop() 48 | 49 | for range ticker.C { 50 | sp.bnPartitionObsEntitySet.Range(func(key, value interface{}) bool { 51 | entity := key.(string) 52 | tabler := value.(schema.Tabler) 53 | 54 | pruned, err := sp.partitionedStore.pruneArchivePartitions( 55 | entity, tabler, config.MaxBnRangedArchiveLogPartitions, 56 | ) 57 | 58 | logger := logrus.WithField("entity", entity) 59 | 60 | if err != nil { 61 | logger.WithError(err).Error("Failed to prune archive log partitions") 62 | } 63 | 64 | if len(pruned) > 0 { 65 | logger.WithField("prunedPartitions", pruned).Info("Archive partitions pruned") 66 | } 67 | 68 | if err == nil { 69 | sp.bnPartitionObsEntitySet.Delete(entity) 70 | 71 | // To minimize the db performance loss, we only remove extra archive partitions 72 | // for one entity at a time. 73 | if len(pruned) > 0 { 74 | return false 75 | } 76 | } 77 | 78 | // continue to next entity 79 | return true 80 | }) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /store/mysql/store_ratelimit.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "errors" 5 | "time" 6 | 7 | "github.com/scroll-tech/rpc-gateway/util/rate" 8 | "gorm.io/gorm" 9 | ) 10 | 11 | var ( 12 | errBreakFindInBatches = errors.New("break gorm DB `FindInBatches` loop") 13 | ) 14 | 15 | // RateLimit rate limit keyset table 16 | type RateLimit struct { 17 | ID uint32 18 | SID uint32 // strategy ID 19 | LimitType int `gorm:"default:0;not null"` // limit type 20 | LimitKey string `gorm:"unique;size:128;not null"` // limit key 21 | CreatedAt time.Time 22 | UpdatedAt time.Time 23 | } 24 | 25 | func (RateLimit) TableName() string { 26 | return "ratelimits" 27 | } 28 | 29 | type RateLimitStore struct { 30 | *baseStore 31 | } 32 | 33 | func NewRateLimitStore(db *gorm.DB) *RateLimitStore { 34 | return &RateLimitStore{ 35 | baseStore: newBaseStore(db), 36 | } 37 | } 38 | 39 | func (rls *RateLimitStore) LoadRateLimitKeyset(filter *rate.KeysetFilter) (res []*rate.KeyInfo, err error) { 40 | db := rls.db 41 | 42 | if len(filter.KeySet) > 0 { 43 | db = db.Where("limit_key IN (?)", filter.KeySet) 44 | } 45 | 46 | if len(filter.SIDs) > 0 { 47 | db = db.Where("sid IN (?)", filter.SIDs) 48 | } 49 | 50 | if db == rls.db && filter.Limit <= 0 { 51 | return nil, nil 52 | } 53 | 54 | var totals int 55 | var ratelimits []RateLimit 56 | 57 | rs := db.FindInBatches(&ratelimits, 200, func(tx *gorm.DB, batch int) error { 58 | for i := range ratelimits { 59 | res = append(res, &rate.KeyInfo{ 60 | Type: ratelimits[i].LimitType, 61 | Key: ratelimits[i].LimitKey, 62 | SID: ratelimits[i].SID, 63 | }) 64 | } 65 | 66 | totals += len(ratelimits) 67 | if filter.Limit > 0 && totals >= filter.Limit { 68 | // enough records, break the batch loop 69 | return errBreakFindInBatches 70 | } 71 | 72 | return nil 73 | }) 74 | 75 | if rs.Error != errBreakFindInBatches { 76 | err = rs.Error 77 | } 78 | 79 | return 80 | } 81 | -------------------------------------------------------------------------------- /store/mysql/store_user.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import "gorm.io/gorm" 4 | 5 | // User represents a VIP user that provide specific archive node to query historical 6 | // event logs. 7 | type User struct { 8 | ID uint32 9 | Name string `gorm:"size:256;not null;unique"` 10 | Description string `gorm:"size:1024"` 11 | ApiKey string `gorm:"size:256;not null;unique"` 12 | NodeUrl string `gorm:"size:256;not null"` 13 | } 14 | 15 | func (User) TableName() string { 16 | return "users" 17 | } 18 | 19 | type UserStore struct { 20 | *baseStore 21 | } 22 | 23 | func newUserStore(db *gorm.DB) *UserStore { 24 | return &UserStore{ 25 | baseStore: newBaseStore(db), 26 | } 27 | } 28 | 29 | func (us *UserStore) GetUserByKey(key string) (*User, bool, error) { 30 | var user User 31 | exists, err := us.exists(&user, "api_key = ?", key) 32 | return &user, exists, err 33 | } 34 | -------------------------------------------------------------------------------- /store/types.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "strings" 7 | 8 | "github.com/pkg/errors" 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | // Epoch data operation type 13 | type EpochOpType uint8 14 | 15 | const ( 16 | EpochOpPush EpochOpType = iota + 1 17 | EpochOpPop 18 | EpochOpDequeueBlock 19 | EpochOpDequeueTx 20 | EpochOpDequeueLog 21 | ) 22 | 23 | // Epoch data remove option 24 | type EpochRemoveOption uint8 25 | 26 | const ( 27 | EpochRemoveAll EpochRemoveOption = 0xff 28 | EpochRemoveBlock = 0x01 << 0 29 | EpochRemoveTransaction = 0x01 << 1 30 | EpochRemoveLog = 0x01 << 2 31 | ) 32 | 33 | // Epoch data type 34 | type EpochDataType uint 35 | 36 | const ( 37 | EpochDataNil EpochDataType = iota 38 | EpochTransaction 39 | EpochLog 40 | EpochBlock 41 | ) 42 | 43 | var ( 44 | // custom errors 45 | ErrNotFound = errors.New("not found") 46 | ErrUnsupported = errors.New("not supported") 47 | ErrEpochPivotSwitched = errors.New("epoch pivot switched") 48 | ErrContinousEpochRequired = errors.New("continous epoch required") 49 | ErrAlreadyPruned = errors.New("data already pruned") 50 | ErrChainReorged = errors.New("chain re-orged") 51 | 52 | // operationable epoch data types 53 | OpEpochDataTypes = []EpochDataType{ 54 | EpochBlock, 55 | EpochTransaction, 56 | EpochLog, 57 | } 58 | ) 59 | 60 | func (edt EpochDataType) Name() string { 61 | switch edt { 62 | case EpochTransaction: 63 | return "tx" 64 | case EpochLog: 65 | return "log" 66 | case EpochBlock: 67 | return "block" 68 | } 69 | 70 | return "unknown" 71 | } 72 | 73 | func (edt EpochDataType) ToRemoveOption() EpochRemoveOption { 74 | switch edt { 75 | case EpochTransaction: 76 | return EpochRemoveTransaction 77 | case EpochLog: 78 | return EpochRemoveLog 79 | case EpochBlock: 80 | return EpochRemoveBlock 81 | default: 82 | logrus.WithField("name", edt.Name()).Error("remove option for data type not supported") 83 | return EpochRemoveOption(0x01 << 7) 84 | } 85 | } 86 | 87 | func (edt EpochDataType) ToDequeOption() EpochOpType { 88 | switch edt { 89 | case EpochTransaction: 90 | return EpochOpDequeueTx 91 | case EpochLog: 92 | return EpochOpDequeueLog 93 | case EpochBlock: 94 | return EpochOpDequeueBlock 95 | default: 96 | logrus.WithField("name", edt.Name()).Error("Do not to support deque option") 97 | return EpochOpType(math.MaxUint8) 98 | } 99 | } 100 | 101 | // EpochDataOpNumAlters to record num of alters (add or delete) for epoch data op 102 | type EpochDataOpNumAlters map[EpochDataType]int64 103 | 104 | // EpochDataOpAffects to record affects for epoch data op 105 | type EpochDataOpAffects struct { 106 | OpType EpochOpType // op type 107 | PushUpFromEpoch uint64 // for push op 108 | PushUpToEpoch uint64 // for push op 109 | PopUntilEpoch uint64 // for pop op 110 | DequeueUntilEpoch uint64 // for dequeue op 111 | NumAlters EpochDataOpNumAlters // num of adds/deletes for epoch op 112 | } 113 | 114 | func NewEpochDataOpAffects(opType EpochOpType, opEpochs ...uint64) *EpochDataOpAffects { 115 | a := EpochDataOpAffects{ 116 | OpType: opType, 117 | NumAlters: EpochDataOpNumAlters{}, 118 | } 119 | 120 | switch opType { 121 | case EpochOpPush: 122 | a.PushUpFromEpoch, a.PushUpToEpoch = opEpochs[0], opEpochs[1] 123 | case EpochOpPop: 124 | a.PopUntilEpoch = opEpochs[0] 125 | case EpochOpDequeueTx: 126 | fallthrough 127 | case EpochOpDequeueBlock: 128 | fallthrough 129 | case EpochOpDequeueLog: 130 | a.DequeueUntilEpoch = opEpochs[0] 131 | } 132 | 133 | return &a 134 | } 135 | 136 | func (affects EpochDataOpAffects) String() string { 137 | strBuilder := &strings.Builder{} 138 | strBuilder.Grow(len(affects.NumAlters) * 30) 139 | 140 | for t, v := range affects.NumAlters { 141 | strBuilder.WriteString(fmt.Sprintf("%v:%v;", t.Name(), v)) 142 | } 143 | 144 | return strBuilder.String() 145 | } 146 | 147 | // Merge merges operation history into the receiver 148 | func (affects EpochDataOpAffects) Merge(na EpochDataOpNumAlters) { 149 | for k, v := range na { 150 | affects.NumAlters[k] += v 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /store/variadic_value.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import "github.com/Conflux-Chain/go-conflux-sdk/types" 4 | 5 | // VariadicValue represents an union value, including null, single value or multiple values. 6 | type VariadicValue struct { 7 | count int 8 | single string 9 | multiple map[string]bool 10 | } 11 | 12 | func NewVariadicValue(values ...string) VariadicValue { 13 | count := len(values) 14 | if count == 0 { 15 | return VariadicValue{0, "", nil} 16 | } 17 | 18 | if count == 1 { 19 | return VariadicValue{1, values[0], nil} 20 | } 21 | 22 | multiple := make(map[string]bool) 23 | 24 | for _, v := range values { 25 | multiple[v] = true 26 | } 27 | 28 | count = len(multiple) 29 | if count == 1 { 30 | return VariadicValue{1, values[0], nil} 31 | } 32 | 33 | return VariadicValue{count, "", multiple} 34 | } 35 | 36 | func newVariadicValueByHashes(hashes []types.Hash) VariadicValue { 37 | count := len(hashes) 38 | if count == 0 { 39 | return VariadicValue{0, "", nil} 40 | } 41 | 42 | if count == 1 { 43 | return VariadicValue{1, hashes[0].String(), nil} 44 | } 45 | 46 | values := make(map[string]bool) 47 | 48 | for _, v := range hashes { 49 | values[v.String()] = true 50 | } 51 | 52 | count = len(values) 53 | if count == 1 { 54 | return VariadicValue{1, hashes[0].String(), nil} 55 | } 56 | 57 | return VariadicValue{count, "", values} 58 | } 59 | 60 | func newVariadicValueByAddress(addresses []types.Address) VariadicValue { 61 | count := len(addresses) 62 | if count == 0 { 63 | return VariadicValue{0, "", nil} 64 | } 65 | 66 | if count == 1 { 67 | return VariadicValue{1, addresses[0].MustGetBase32Address(), nil} 68 | } 69 | 70 | values := make(map[string]bool) 71 | 72 | for _, v := range addresses { 73 | values[v.MustGetBase32Address()] = true 74 | } 75 | 76 | count = len(values) 77 | if count == 1 { 78 | return VariadicValue{1, addresses[0].MustGetBase32Address(), nil} 79 | } 80 | 81 | return VariadicValue{count, "", values} 82 | } 83 | 84 | func (vv *VariadicValue) ToSlice() []string { 85 | if vv.count == 1 { 86 | return []string{vv.single} 87 | } 88 | 89 | result := make([]string, 0, vv.count) 90 | for k := range vv.multiple { 91 | result = append(result, k) 92 | } 93 | 94 | return result 95 | } 96 | 97 | func (vv *VariadicValue) Count() int { 98 | return vv.count 99 | } 100 | 101 | func (vv *VariadicValue) IsNull() bool { 102 | return vv.count == 0 103 | } 104 | 105 | func (vv *VariadicValue) Single() (string, bool) { 106 | if vv.count == 1 { 107 | return vv.single, true 108 | } 109 | 110 | return "", false 111 | } 112 | 113 | func (vv *VariadicValue) FlatMultiple() ([]string, bool) { 114 | if vv.count < 2 { 115 | return nil, false 116 | } 117 | 118 | result := make([]string, 0, vv.count) 119 | 120 | for k := range vv.multiple { 121 | result = append(result, k) 122 | } 123 | 124 | return result, true 125 | } 126 | -------------------------------------------------------------------------------- /sync/catchup/config.go: -------------------------------------------------------------------------------- 1 | package catchup 2 | 3 | type config struct { 4 | // list of Conflux fullnodes to accelerate catching up until the latest stable epoch 5 | CfxPool []string 6 | // threshold for num of db rows per batch persistence 7 | DbRowsThreshold int `default:"2500"` 8 | // max number of db rows collected before persistence 9 | MaxDbRows int `default:"7500"` 10 | // capacity of channel per worker to buffer queried epoch data 11 | WorkerChanSize int `default:"5"` 12 | } 13 | -------------------------------------------------------------------------------- /sync/catchup/worker.go: -------------------------------------------------------------------------------- 1 | package catchup 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 9 | "github.com/scroll-tech/rpc-gateway/store" 10 | "github.com/scroll-tech/rpc-gateway/util/rpc" 11 | "github.com/sirupsen/logrus" 12 | ) 13 | 14 | type worker struct { 15 | // worker name 16 | name string 17 | // result channel to collect queried epoch data 18 | resultChan chan *store.EpochData 19 | // conflux sdk client delegated to fetch epoch data 20 | cfx sdk.ClientOperator 21 | } 22 | 23 | func mustNewWorker(name, nodeUrl string, chanSize int) *worker { 24 | return &worker{ 25 | name: name, 26 | resultChan: make(chan *store.EpochData, chanSize), 27 | cfx: rpc.MustNewCfxClient(nodeUrl), 28 | } 29 | } 30 | 31 | func (w *worker) Sync(ctx context.Context, wg *sync.WaitGroup, epochFrom, epochTo, stepN uint64) { 32 | defer wg.Done() 33 | 34 | for eno := epochFrom; eno <= epochTo; { 35 | epochData, ok := w.fetchEpoch(ctx, eno) 36 | if !ok { 37 | return 38 | } 39 | 40 | select { 41 | case <-ctx.Done(): 42 | return 43 | default: 44 | select { 45 | case <-ctx.Done(): 46 | return 47 | case w.resultChan <- epochData: 48 | eno += stepN 49 | } 50 | } 51 | } 52 | } 53 | 54 | func (w *worker) Close() { 55 | w.cfx.Close() 56 | close(w.resultChan) 57 | } 58 | 59 | func (w *worker) Data() <-chan *store.EpochData { 60 | return w.resultChan 61 | } 62 | 63 | func (w *worker) fetchEpoch(ctx context.Context, epochNo uint64) (*store.EpochData, bool) { 64 | for try := 1; ; try++ { 65 | select { 66 | case <-ctx.Done(): 67 | return nil, false 68 | default: 69 | } 70 | 71 | epochData, err := store.QueryEpochData(w.cfx, epochNo, true) 72 | if err == nil { 73 | return &epochData, true 74 | } 75 | 76 | logger := logrus.WithFields(logrus.Fields{ 77 | "epochNo": epochNo, "workerName": w.name, 78 | }).WithError(err) 79 | 80 | logf := logger.Debug 81 | if try%50 == 0 { 82 | logf = logger.Error 83 | } 84 | 85 | logf("Catch-up worker failed to fetch epoch") 86 | time.Sleep(time.Second) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /sync/epoch_pivot.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "github.com/Conflux-Chain/go-conflux-sdk/types" 5 | "github.com/pkg/errors" 6 | citypes "github.com/scroll-tech/rpc-gateway/types" 7 | ) 8 | 9 | // epochPivotWindow caches epoch pivot info with limited capacity. 10 | type epochPivotWindow struct { 11 | // hashmap to cache pivot hash of epoch (epoch number => pivot block hash) 12 | epochToPivotHash map[uint64]types.Hash 13 | // maximum number of epochs to hold 14 | capacity uint32 15 | // cached epoch range 16 | epochFrom, epochTo uint64 17 | } 18 | 19 | func newEpochPivotWindow(capacity uint32) *epochPivotWindow { 20 | win := &epochPivotWindow{capacity: capacity} 21 | win.reset() 22 | 23 | return win 24 | } 25 | 26 | func (win *epochPivotWindow) getPivotHash(epoch uint64) (types.Hash, bool) { 27 | pivotHash, ok := win.epochToPivotHash[epoch] 28 | return pivotHash, ok 29 | } 30 | 31 | func (win *epochPivotWindow) reset() { 32 | win.epochFrom = citypes.EpochNumberNil 33 | win.epochTo = citypes.EpochNumberNil 34 | 35 | win.epochToPivotHash = make(map[uint64]types.Hash) 36 | } 37 | 38 | func (win *epochPivotWindow) push(pivotBlock *types.Block) error { 39 | pivotEpochNum := pivotBlock.EpochNumber.ToInt().Uint64() 40 | 41 | if win.size() > 0 { // validate incoming pivot block 42 | if (win.epochTo + 1) != pivotEpochNum { 43 | return errors.Errorf( 44 | "incontinuous epoch pushed, expect %v got %v", win.epochTo+1, pivotEpochNum, 45 | ) 46 | } 47 | 48 | latestPivotHash, ok := win.epochToPivotHash[win.epochTo] 49 | if !ok || pivotBlock.ParentHash != latestPivotHash { 50 | return errors.Errorf( 51 | "mismatched parent hash, expect %v got %v", latestPivotHash, pivotBlock.ParentHash, 52 | ) 53 | } 54 | } 55 | 56 | // reclaim in case of memory blast 57 | for win.size() != 0 && win.size() >= win.capacity { 58 | delete(win.epochToPivotHash, win.epochFrom) 59 | win.epochFrom++ 60 | } 61 | 62 | // cache store epoch pivot hash 63 | win.epochToPivotHash[pivotEpochNum] = pivotBlock.Hash 64 | win.expandTo(pivotEpochNum) 65 | 66 | return nil 67 | } 68 | 69 | func (win *epochPivotWindow) expandTo(newEpoch uint64) { 70 | if !win.isSet() { 71 | win.epochFrom, win.epochTo = newEpoch, newEpoch 72 | } else if win.epochTo < newEpoch { 73 | win.epochTo = newEpoch 74 | } 75 | } 76 | 77 | func (win *epochPivotWindow) popn(epochUntil uint64) { 78 | if win.size() == 0 || win.epochTo < epochUntil { 79 | return 80 | } 81 | 82 | for win.epochTo >= epochUntil { 83 | delete(win.epochToPivotHash, win.epochTo) 84 | win.epochTo-- 85 | 86 | if win.size() == 0 { 87 | win.reset() 88 | return 89 | } 90 | } 91 | } 92 | 93 | func (win *epochPivotWindow) isSet() bool { 94 | return win.epochFrom != citypes.EpochNumberNil && win.epochTo != citypes.EpochNumberNil 95 | } 96 | 97 | func (win *epochPivotWindow) size() uint32 { 98 | if !win.isSet() || win.epochFrom > win.epochTo { 99 | return 0 100 | } 101 | 102 | return uint32(win.epochTo - win.epochFrom + 1) 103 | } 104 | -------------------------------------------------------------------------------- /sync/epoch_pivot_test.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "math/big" 5 | "testing" 6 | 7 | "github.com/Conflux-Chain/go-conflux-sdk/types" 8 | "github.com/ethereum/go-ethereum/common/hexutil" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestNewEpochPivotWindow(t *testing.T) { 13 | // create an uninitialized epoch pivot window 14 | w := newEpochPivotWindow(1000) 15 | assert.Equal(t, uint32(0), w.size()) 16 | assert.False(t, w.isSet()) 17 | 18 | // test expandTo 19 | w.expandTo(1000) 20 | assert.Equal(t, uint32(1), w.size()) 21 | assert.True(t, w.isSet()) 22 | 23 | w.expandTo(1001) 24 | assert.Equal(t, uint32(2), w.size()) 25 | 26 | // test reset 27 | w.reset() 28 | assert.Equal(t, uint32(0), w.size()) 29 | assert.False(t, w.isSet()) 30 | } 31 | 32 | func TestEpochPivotWindowPushPop(t *testing.T) { 33 | w := newEpochPivotWindow(2) 34 | assert.False(t, w.isSet()) 35 | 36 | // test push 37 | err := w.push(&types.Block{ 38 | BlockHeader: types.BlockHeader{ 39 | EpochNumber: (*hexutil.Big)(big.NewInt(0)), 40 | ParentHash: "", 41 | Hash: "epoch0", 42 | }, 43 | }) 44 | assert.NoError(t, err) 45 | assert.Equal(t, uint32(1), w.size()) 46 | assert.Equal(t, uint64(0), w.epochFrom) 47 | assert.Equal(t, uint64(0), w.epochTo) 48 | assert.True(t, w.isSet()) 49 | 50 | err = w.push(&types.Block{ 51 | BlockHeader: types.BlockHeader{ 52 | EpochNumber: (*hexutil.Big)(big.NewInt(1)), 53 | ParentHash: "epoch0", 54 | Hash: "epoch1", 55 | }, 56 | }) 57 | assert.NoError(t, err) 58 | assert.Equal(t, uint32(2), w.size()) 59 | assert.Equal(t, uint64(0), w.epochFrom) 60 | assert.Equal(t, uint64(1), w.epochTo) 61 | 62 | // test push error - mismatched parent hash 63 | err = w.push(&types.Block{ 64 | BlockHeader: types.BlockHeader{ 65 | EpochNumber: (*hexutil.Big)(big.NewInt(2)), 66 | ParentHash: "epoch1`", 67 | Hash: "epoch2`", 68 | }, 69 | }) 70 | assert.Error(t, err) 71 | t.Logf("push error: %v", err) 72 | 73 | // test push error - incontinuous epoch 74 | err = w.push(&types.Block{ 75 | BlockHeader: types.BlockHeader{ 76 | EpochNumber: (*hexutil.Big)(big.NewInt(20)), 77 | ParentHash: "epoch1", 78 | Hash: "epoch2", 79 | }, 80 | }) 81 | assert.Error(t, err) 82 | t.Logf("push error: %v", err) 83 | 84 | // test auto reclaim over capacity on push 85 | err = w.push(&types.Block{ 86 | BlockHeader: types.BlockHeader{ 87 | EpochNumber: (*hexutil.Big)(big.NewInt(2)), 88 | ParentHash: "epoch1", 89 | Hash: "epoch2", 90 | }, 91 | }) 92 | assert.NoError(t, err) 93 | assert.Equal(t, uint32(2), w.size()) 94 | assert.Equal(t, uint64(1), w.epochFrom) 95 | assert.Equal(t, uint64(2), w.epochTo) 96 | 97 | // test getPivotHash 98 | ph, ok := w.getPivotHash(0) 99 | assert.False(t, ok) 100 | assert.NotEqual(t, "epoch0", string(ph)) 101 | 102 | ph, _ = w.getPivotHash(1) 103 | assert.Equal(t, "epoch1", string(ph)) 104 | 105 | ph, _ = w.getPivotHash(2) 106 | assert.Equal(t, "epoch2", string(ph)) 107 | 108 | // test pop empty 109 | w.popn(3) 110 | assert.Equal(t, uint32(2), w.size()) 111 | 112 | // test pop underflow 113 | w.popn(0) 114 | assert.Equal(t, uint32(0), w.size()) 115 | assert.False(t, w.isSet()) 116 | } 117 | -------------------------------------------------------------------------------- /sync/epoch_window.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "github.com/scroll-tech/rpc-gateway/types" 5 | "github.com/scroll-tech/rpc-gateway/util" 6 | ) 7 | 8 | // epochWindow maintains a continuous epoch window with a fixed size capacity. 9 | type epochWindow struct { 10 | capacity uint32 11 | epochFrom, epochTo uint64 12 | } 13 | 14 | func newEpochWindow(capacity uint32) *epochWindow { 15 | return &epochWindow{ 16 | capacity: capacity, 17 | epochFrom: types.EpochNumberNil, 18 | epochTo: types.EpochNumberNil, 19 | } 20 | } 21 | 22 | // Reset epochFrom and epochTo of the epoch window to some specified epoch number. 23 | func (win *epochWindow) reset(epochFrom, epochTo uint64) { 24 | win.epochFrom, win.epochTo = epochFrom, epochTo 25 | } 26 | 27 | // Expand the epoch window from a smaller epoch number. 28 | func (win *epochWindow) expandFrom(epochNo uint64) { 29 | if win.isSet() { 30 | win.epochFrom = util.MinUint64(win.epochFrom, epochNo) 31 | } else { 32 | win.reset(epochNo, epochNo) 33 | } 34 | } 35 | 36 | // Expand the epoch window to a bigger epoch number. 37 | func (win *epochWindow) expandTo(epochNo uint64) { 38 | if win.isSet() { 39 | win.epochTo = util.MaxUint64(win.epochTo, epochNo) 40 | } else { 41 | win.reset(epochNo, epochNo) 42 | } 43 | } 44 | 45 | // Update epochTo of the epoch window to specified number no matter expansion or shrink. 46 | func (win *epochWindow) updateTo(epochTo uint64) { 47 | if win.isSet() { 48 | win.epochTo = epochTo 49 | } else { 50 | win.reset(epochTo, epochTo) 51 | } 52 | } 53 | 54 | // Peek if pivot switch will happen if new epoch appended to expand. 55 | func (win *epochWindow) peekWillPivotSwitch(epochNo uint64) bool { 56 | if win.isSet() && win.epochFrom > epochNo { 57 | return true 58 | } 59 | 60 | return false 61 | } 62 | 63 | // Peek if overflow will happen if new epoch appended to expand. 64 | func (win *epochWindow) peekWillOverflow(epochNo uint64) bool { 65 | if win.isSet() && epochNo >= win.epochFrom && (epochNo-win.epochFrom+1) > uint64(win.capacity) { 66 | return true 67 | } 68 | 69 | return false 70 | } 71 | 72 | // Peek sync info by shrinking no more than the specified size of epoch(s) from the epoch window. 73 | func (win *epochWindow) peekShrinkFrom(specSize uint32) (syncFrom uint64, syncSize uint32) { 74 | if win.isEmpty() { 75 | return 0, 0 76 | } 77 | 78 | return win.epochFrom, util.MinUint32(win.size(), specSize) 79 | } 80 | 81 | // Shrink no more than the specified size of epoch(s) from the epoch window. 82 | func (win *epochWindow) shrinkFrom(specSize uint32) (uint64, uint32) { 83 | if win.isEmpty() { 84 | return 0, 0 85 | } 86 | 87 | syncFrom, syncSize := win.epochFrom, util.MinUint32(win.size(), specSize) 88 | win.epochFrom += uint64(syncSize) 89 | 90 | return syncFrom, syncSize 91 | } 92 | 93 | // Check if the epoch window is empty. 94 | func (win *epochWindow) isEmpty() bool { 95 | return !win.isSet() || win.epochFrom > win.epochTo 96 | } 97 | 98 | // Check if the epoch window is set. 99 | func (win *epochWindow) isSet() bool { 100 | return win.epochFrom != types.EpochNumberNil && win.epochTo != types.EpochNumberNil 101 | } 102 | 103 | // Return the size of the sync window. 104 | func (win *epochWindow) size() uint32 { 105 | if !win.isSet() || win.epochFrom > win.epochTo { 106 | return 0 107 | } 108 | 109 | return uint32(win.epochTo - win.epochFrom + 1) 110 | } 111 | -------------------------------------------------------------------------------- /sync/epoch_window_test.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/scroll-tech/rpc-gateway/util" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestNewEpochWindow(t *testing.T) { 11 | ew := newEpochWindow(1000) // create an uninitialized epoch window 12 | assert.Equal(t, uint32(0), ew.size()) 13 | assert.True(t, ew.isEmpty()) 14 | assert.False(t, ew.isSet()) 15 | 16 | ew.reset(15023, 15923) 17 | assert.Equal(t, uint32(1+(15923-15023)), ew.size()) 18 | assert.False(t, ew.isEmpty()) 19 | assert.True(t, ew.isSet()) 20 | 21 | ew.reset(19252, 19102) 22 | assert.Equal(t, uint32(0), ew.size()) 23 | assert.True(t, ew.isEmpty()) 24 | assert.True(t, ew.isSet()) 25 | } 26 | 27 | func TestEpochWindowExpandingFrom(t *testing.T) { 28 | ew := newEpochWindow(1000) 29 | 30 | ew.expandFrom(14575) // expand uninitialized epoch window 31 | assert.Equal(t, uint32(1), ew.size()) 32 | assert.False(t, ew.isEmpty()) 33 | assert.True(t, ew.isSet()) 34 | 35 | ew.expandFrom(14580) // expand epoch window from some invalid upper bound 36 | assert.Equal(t, uint32(1), ew.size()) 37 | 38 | ew.expandFrom(14570) // expand epoch window from some valid lower bound 39 | assert.Equal(t, uint32(1+(14575-14570)), ew.size()) 40 | } 41 | 42 | func TestEpochWindowExpandingTo(t *testing.T) { 43 | ew := newEpochWindow(1000) 44 | 45 | ew.expandTo(14570) // expand uninitialized epoch window 46 | assert.Equal(t, uint32(1), ew.size()) 47 | assert.False(t, ew.isEmpty()) 48 | assert.True(t, ew.isSet()) 49 | 50 | ew.expandTo(14565) // expand epoch window from some invalid lower bound 51 | assert.Equal(t, uint32(1), ew.size()) 52 | 53 | ew.expandTo(14579) // expand epoch window to some valid upper bound 54 | assert.Equal(t, uint32(1+(14579-14570)), ew.size()) 55 | } 56 | 57 | func TestEpochWindowExpanding(t *testing.T) { 58 | ew := newEpochWindow(1000) 59 | 60 | ew.expandFrom(14935) // expand uninitialized epoch window 61 | assert.Equal(t, uint32(1), ew.size()) 62 | assert.False(t, ew.isEmpty()) 63 | assert.True(t, ew.isSet()) 64 | 65 | ew.expandTo(14970) // expand epoch window to some valid upper bound 66 | assert.Equal(t, uint32(1+(14970-14935)), ew.size()) 67 | } 68 | 69 | func TestEpochWindowPeekWillPivotSwitch(t *testing.T) { 70 | ew := newEpochWindow(1000) 71 | assert.False(t, ew.peekWillPivotSwitch(0)) 72 | 73 | ew.reset(15923, 15793) 74 | assert.False(t, ew.peekWillPivotSwitch(15983)) 75 | assert.False(t, ew.peekWillPivotSwitch(15923)) 76 | assert.True(t, ew.peekWillPivotSwitch(15922)) 77 | } 78 | 79 | func TestEpochWindowPeekWillOverflow(t *testing.T) { 80 | ew := newEpochWindow(1000) 81 | assert.False(t, ew.peekWillOverflow(13202)) 82 | 83 | ew.reset(21723, 21883) 84 | assert.False(t, ew.peekWillOverflow(21953)) 85 | assert.True(t, ew.peekWillOverflow(23293)) 86 | } 87 | 88 | func TestEpochWindowShrinkFrom(t *testing.T) { 89 | ew := newEpochWindow(1000) 90 | sf, ss := ew.peekShrinkFrom(10) 91 | assert.Equal(t, uint64(0), sf) 92 | assert.Equal(t, uint32(0), ss) 93 | 94 | ew.reset(23892, 24503) 95 | sf, ss = ew.peekShrinkFrom(5000) 96 | assert.Equal(t, ew.epochFrom, sf) 97 | assert.Equal(t, util.MinUint32(ew.size(), 5000), ss) 98 | 99 | sf2, ss2 := ew.shrinkFrom(5000) 100 | assert.Equal(t, sf, sf2) 101 | assert.Equal(t, ss, ss2) 102 | assert.True(t, ew.isEmpty()) 103 | } 104 | -------------------------------------------------------------------------------- /sync/sync_db_test.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "math" 5 | "testing" 6 | 7 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 8 | "github.com/scroll-tech/rpc-gateway/store" 9 | citypes "github.com/scroll-tech/rpc-gateway/types" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestFindFirstRevertedEpochInRange(t *testing.T) { 14 | syncer := &DatabaseSyncer{} 15 | 16 | testCases := []struct { 17 | epochRange citypes.RangeUint64 18 | firstReverted uint64 19 | expected uint64 20 | }{ 21 | { // first reverted in middle of epoch range 22 | epochRange: citypes.RangeUint64{From: 10, To: 50}, 23 | firstReverted: 20, 24 | expected: 20, 25 | }, 26 | { // first reverted in right edge of epoch range 27 | epochRange: citypes.RangeUint64{From: 10, To: 50}, 28 | firstReverted: 50, 29 | expected: 50, 30 | }, 31 | { // first reverted in left edge of epoch range 32 | epochRange: citypes.RangeUint64{From: 10, To: 50}, 33 | firstReverted: 1, 34 | expected: 10, 35 | }, 36 | { // first reverted out of right side of epoch range 37 | epochRange: citypes.RangeUint64{From: 10, To: 50}, 38 | firstReverted: 51, 39 | expected: 0, 40 | }, 41 | { // first reverted out of left side of epoch range 42 | epochRange: citypes.RangeUint64{From: 10, To: 50}, 43 | firstReverted: 1, 44 | expected: 10, 45 | }, 46 | } 47 | 48 | for i, tc := range testCases { 49 | t.Logf(">>>>>> run testcase %v", i+1) 50 | 51 | // Epoch reverted checker 52 | checker := func(cfx sdk.ClientOperator, s store.StackOperable, epochNo uint64) (bool, error) { 53 | t.Logf("check epoch: %v", epochNo) 54 | if epochNo >= tc.firstReverted { 55 | return true, nil 56 | } 57 | return false, nil 58 | } 59 | res, err := findFirstRevertedEpochInRange(syncer.cfx, syncer.db, tc.epochRange, checker) 60 | assert.Nil(t, err) 61 | assert.Equal(t, tc.expected, res) 62 | } 63 | } 64 | 65 | func TestEnsureEpochRangeNotRerverted(t *testing.T) { 66 | syncer := &DatabaseSyncer{} 67 | 68 | testCases := []struct { 69 | epochRange citypes.RangeUint64 70 | firstReverted uint64 71 | expectedPrunedEpochFrom uint64 72 | }{ 73 | { // first reverted in middle of epoch range 74 | epochRange: citypes.RangeUint64{From: 100, To: 501}, 75 | firstReverted: 200, 76 | expectedPrunedEpochFrom: 200, 77 | }, 78 | { // first reverted in left edge of epoch range 79 | epochRange: citypes.RangeUint64{From: 100, To: 501}, 80 | firstReverted: 100, 81 | expectedPrunedEpochFrom: 100, 82 | }, 83 | { // first reverted in right edge of epoch range 84 | epochRange: citypes.RangeUint64{From: 100, To: 501}, 85 | firstReverted: 501, 86 | expectedPrunedEpochFrom: 501, 87 | }, 88 | { // first reverted out right side of epoch range 89 | epochRange: citypes.RangeUint64{From: 100, To: 501}, 90 | firstReverted: 600, 91 | expectedPrunedEpochFrom: math.MaxUint64, // should never prune at all 92 | }, 93 | { // first reverted out left side of epoch range 94 | epochRange: citypes.RangeUint64{From: 100, To: 501}, 95 | firstReverted: 10, 96 | expectedPrunedEpochFrom: 100, 97 | }, 98 | } 99 | 100 | for i, tc := range testCases { 101 | t.Logf(">>>>>> run testcase %v", i+1) 102 | 103 | // Epoch reverted checker 104 | checker := func(cfx sdk.ClientOperator, s store.StackOperable, epochNo uint64) (bool, error) { 105 | t.Logf("check epoch: %v", epochNo) 106 | if epochNo >= tc.firstReverted { 107 | return true, nil 108 | } 109 | return false, nil 110 | } 111 | searcher := func(cfx sdk.ClientOperator, s store.StackOperable, epochRange citypes.RangeUint64) (uint64, error) { 112 | return findFirstRevertedEpochInRange(syncer.cfx, syncer.db, epochRange, checker) 113 | } 114 | pruner := func(s store.StackOperable, epochRange citypes.RangeUint64) error { 115 | assert.Equal(t, tc.expectedPrunedEpochFrom, epochRange.From) 116 | return nil 117 | } 118 | err := ensureEpochRangeNotRerverted(syncer.cfx, syncer.db, tc.epochRange, searcher, pruner) 119 | assert.Nil(t, err) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /types/gastation.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "github.com/ethereum/go-ethereum/common/hexutil" 5 | ) 6 | 7 | type GasStationPrice struct { 8 | Fast *hexutil.Big `json:"fast"` // Recommended fast gas price in drip 9 | Fastest *hexutil.Big `json:"fastest"` // Recommended fastest gas price in drip 10 | SafeLow *hexutil.Big `json:"safeLow"` // Recommended safe gas price in drip 11 | Average *hexutil.Big `json:"average"` // Recommended average gas price in drip 12 | } 13 | -------------------------------------------------------------------------------- /types/range.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | ) 7 | 8 | type RangeUint64 struct { 9 | From uint64 10 | To uint64 11 | } 12 | 13 | func (r RangeUint64) String() string { 14 | return fmt.Sprintf("[%v, %v]", r.From, r.To) 15 | } 16 | 17 | func (r *RangeUint64) ToSlice() []uint64 { 18 | if r.From != r.To { 19 | return []uint64{r.From, r.To} 20 | } 21 | 22 | return []uint64{r.From} 23 | } 24 | 25 | // Constant placehold for uninitialized (or unset) epoch number 26 | const EpochNumberNil uint64 = math.MaxUint64 27 | 28 | var EpochRangeNil RangeUint64 = RangeUint64{From: EpochNumberNil, To: EpochNumberNil} 29 | -------------------------------------------------------------------------------- /util/alert/alert_test.go: -------------------------------------------------------------------------------- 1 | package alert 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/spf13/viper" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestDingTalkEnvConfig(t *testing.T) { 12 | mustInitViperFromConfig() 13 | 14 | dtWebHook := "dingtalk_webhook_url" 15 | dtSecret := "dingtalk_secret" 16 | 17 | os.Setenv("INFURA_ALERT_DINGTALK_WEBHOOK", dtWebHook) 18 | os.Setenv("INFURA_ALERT_DINGTALK_SECRET", dtSecret) 19 | 20 | assert.Equal(t, viper.GetString("alert.dingtalk.webhook"), dtWebHook) 21 | assert.Equal(t, viper.GetString("alert.dingtalk.secret"), dtSecret) 22 | } 23 | -------------------------------------------------------------------------------- /util/alert/dingtalk.go: -------------------------------------------------------------------------------- 1 | package alert 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | "github.com/Conflux-Chain/go-conflux-util/viper" 9 | "github.com/royeo/dingrobot" 10 | ) 11 | 12 | const ( 13 | dingTalkAlertMsgTpl = "logrus alert notification\ntags:\t%v;\nlevel:\t%v;\nbrief:\t%v;\ndetail:\t%v;\ntime:\t%v\n" 14 | ) 15 | 16 | type config struct { 17 | // custom tags are usually used to differentiate between different networks and enviroments 18 | // such as mainnet/testnet, prod/test/dev or any custom info for more details. 19 | CustomTags []string `default:"[testnet,dev]"` 20 | DingTalk struct { 21 | Enabled bool 22 | WebHook string 23 | Secret string 24 | AtMobiles []string 25 | IsAtAll bool 26 | } 27 | } 28 | 29 | var ( 30 | conf config 31 | 32 | dingTalkCustomTagsStr string 33 | dingRobot dingrobot.Roboter 34 | ) 35 | 36 | func InitDingRobot() { 37 | viper.MustUnmarshalKey("alert", &conf) 38 | 39 | if conf.DingTalk.Enabled { 40 | dingTalkCustomTagsStr = strings.Join(conf.CustomTags, "/") 41 | dingRobot = dingrobot.NewRobot(conf.DingTalk.WebHook) 42 | dingRobot.SetSecret(conf.DingTalk.Secret) 43 | } 44 | } 45 | 46 | func SendDingTalkTextMessage(level, brief, detail string) error { 47 | if dingRobot == nil { 48 | return nil 49 | } 50 | 51 | nowStr := time.Now().Format("2006-01-02T15:04:05-0700") 52 | msg := fmt.Sprintf(dingTalkAlertMsgTpl, dingTalkCustomTagsStr, level, brief, detail, nowStr) 53 | 54 | return dingRobot.SendText(msg, conf.DingTalk.AtMobiles, conf.DingTalk.IsAtAll) 55 | } 56 | -------------------------------------------------------------------------------- /util/alert/logrus_hook.go: -------------------------------------------------------------------------------- 1 | package alert 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | type LogrusAlertHook struct { 10 | levels []logrus.Level 11 | } 12 | 13 | func NewLogrusAlertHook(lvls []logrus.Level) *LogrusAlertHook { 14 | return &LogrusAlertHook{levels: lvls} 15 | } 16 | 17 | func (hook *LogrusAlertHook) Levels() []logrus.Level { 18 | return hook.levels 19 | } 20 | 21 | func (hook *LogrusAlertHook) Fire(logEntry *logrus.Entry) error { 22 | level := logEntry.Level.String() 23 | brief := logEntry.Message 24 | 25 | formatter := &logrus.JSONFormatter{} 26 | detailBytes, _ := formatter.Format(logEntry) 27 | // Trim last newline char to uniform message format 28 | detail := strings.TrimSuffix(string(detailBytes), "\n") 29 | 30 | return SendDingTalkTextMessage(level, brief, detail) 31 | } 32 | -------------------------------------------------------------------------------- /util/alert/logrus_hook_test.go: -------------------------------------------------------------------------------- 1 | package alert 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/sirupsen/logrus" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | func mustInitViperFromConfig() { 13 | viper.AutomaticEnv() 14 | viper.SetEnvPrefix("infura") 15 | viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) 16 | 17 | viper.SetConfigName("config") 18 | viper.SetConfigType("yml") 19 | viper.GetViper().AddConfigPath("../config") 20 | 21 | if err := viper.ReadInConfig(); err != nil { 22 | panic(errors.WithMessage(err, "Failed to initialize viper")) 23 | } 24 | } 25 | 26 | func TestLogrusAddHooks(t *testing.T) { 27 | mustInitViperFromConfig() 28 | // Add alert hook for logrus fatal/warn/error level 29 | hookLevels := []logrus.Level{logrus.FatalLevel, logrus.WarnLevel, logrus.ErrorLevel} 30 | logrus.AddHook(NewLogrusAlertHook(hookLevels)) 31 | 32 | // Need to manually check if the message sent to dingtalk group chat 33 | logrus.Warn("Test logrus add hooks warn") 34 | logrus.Error("Test logrus add hooks error") 35 | logrus.Fatal("Test logrus add hooks fatal") 36 | 37 | } 38 | -------------------------------------------------------------------------------- /util/blacklist/contract_addr.go: -------------------------------------------------------------------------------- 1 | package blacklist 2 | 3 | import ( 4 | "encoding/json" 5 | "strings" 6 | 7 | "github.com/Conflux-Chain/go-conflux-sdk/types/cfxaddress" 8 | "github.com/sirupsen/logrus" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | var ( 13 | // blacklisted contract address set 14 | blacklistedAddressSet = make(map[string]*BlacklistedAddrInfo) 15 | ) 16 | 17 | // BlacklistedAddrInfo is used to store blacklisted contract address info. 18 | type BlacklistedAddrInfo struct { 19 | Address string 20 | // epoch until which the contract address is ignored, if 0 means forever. 21 | Epoch uint64 22 | } 23 | 24 | func init() { 25 | // Load blacklisted contract address. 26 | var blackListAddrStrs string 27 | if err := viper.UnmarshalKey("sync.blackListAddrs", &blackListAddrStrs); err != nil { 28 | logrus.WithError(err).Fatal("Failed to load blacklisted contract address") 29 | } 30 | 31 | if len(blackListAddrStrs) == 0 { 32 | return 33 | } 34 | 35 | var addrInfos []*BlacklistedAddrInfo 36 | if err := json.Unmarshal(([]byte)(blackListAddrStrs), &addrInfos); err != nil { 37 | logrus.WithError(err).Fatal("Failed to parse blacklisted contract address json") 38 | } 39 | 40 | for i := range addrInfos { 41 | blacklistedAddressSet[strings.ToLower(addrInfos[i].Address)] = addrInfos[i] 42 | logrus.WithField("addrInfo", addrInfos[i]).Info("Loaded blacklisted contract address") 43 | } 44 | } 45 | 46 | // Check if address blacklisted or not for specific epoch height. 47 | func IsAddressBlacklisted(addr *cfxaddress.Address, epochs ...uint64) bool { 48 | if len(blacklistedAddressSet) == 0 { 49 | return false 50 | } 51 | 52 | addrStr := addr.MustGetBase32Address() 53 | addrStr = strings.ToLower(addrStr) 54 | 55 | info, exists := blacklistedAddressSet[addrStr] 56 | if !exists { 57 | return false 58 | } 59 | 60 | return len(epochs) == 0 || info.Epoch == 0 || epochs[0] <= info.Epoch 61 | } 62 | -------------------------------------------------------------------------------- /util/blockchain_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/ethereum/go-ethereum/rpc" 7 | "github.com/openweb3/web3go" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestGetShortIdOfHash(t *testing.T) { 12 | v := GetShortIdOfHash("0x255aeaf1dbc7d18feeb232f99fdca8adc72d68f5a5da9081b0315507f8005674") 13 | assert.NotZero(t, v) 14 | 15 | v = GetShortIdOfHash("0x000") 16 | assert.NotZero(t, v) 17 | 18 | v = GetShortIdOfHash("") 19 | assert.NotZero(t, v) 20 | 21 | v = GetShortIdOfHash("0x") 22 | assert.NotZero(t, v) 23 | } 24 | 25 | func TestNormalizeEthBlockNumber(t *testing.T) { 26 | w3c, err := web3go.NewClient("http://evmtestnet.confluxrpc.com") 27 | assert.NoError(t, err) 28 | 29 | latestBlockNum := rpc.LatestBlockNumber 30 | blockNum, err := NormalizeEthBlockNumber(w3c, &latestBlockNum, rpc.BlockNumber(0)) 31 | assert.NoError(t, err) 32 | assert.NotNil(t, blockNum) 33 | 34 | pendingBlockNum := rpc.PendingBlockNumber 35 | blockNum, err = NormalizeEthBlockNumber(w3c, &pendingBlockNum, rpc.BlockNumber(0)) 36 | if blockNum != nil { 37 | assert.NoError(t, err) 38 | } else { 39 | assert.NoError(t, err) 40 | } 41 | 42 | earlistBlockNum := rpc.EarliestBlockNumber 43 | blockNum, err = NormalizeEthBlockNumber(w3c, &earlistBlockNum, rpc.BlockNumber(0)) 44 | if blockNum != nil { 45 | assert.NoError(t, err) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /util/encoding.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "github.com/ethereum/go-ethereum/rlp" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func MustMarshalJson(v interface{}) []byte { 11 | if IsInterfaceValNil(v) { 12 | return nil 13 | } 14 | 15 | data, err := json.Marshal(v) 16 | if err != nil { 17 | logrus.WithError(err).Fatalf("Failed to marshal data to JSON, value = %+v", v) 18 | } 19 | return data 20 | } 21 | 22 | func MustUnmarshalJson(data []byte, v interface{}) { 23 | if err := json.Unmarshal(data, v); err != nil { 24 | logrus.WithError(err).Fatalf( 25 | "Failed to unmarshal JSON data, v = %v, data = %x", v, data, 26 | ) 27 | } 28 | } 29 | 30 | func MustMarshalRLP(v interface{}) []byte { 31 | if IsInterfaceValNil(v) { 32 | return nil 33 | } 34 | 35 | data, err := rlp.EncodeToBytes(v) 36 | if err != nil { 37 | logrus.WithError(err).Fatalf("Failed to marshal data to RLP, value = %+v", v) 38 | } 39 | 40 | return data 41 | } 42 | 43 | func MustUnmarshalRLP(data []byte, v interface{}) { 44 | if err := rlp.DecodeBytes(data, v); err != nil { 45 | logrus.WithError(err).Fatalf( 46 | "Failed to unmarshal RLP data, v = %v, data = %x", v, data, 47 | ) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /util/gasstation/types.go: -------------------------------------------------------------------------------- 1 | package gasstation 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/Conflux-Chain/go-conflux-sdk/types" 7 | ) 8 | 9 | type gasPriceUsedStats struct { 10 | // min, max, median, mean statistics index 11 | min float64 12 | mean float64 13 | max float64 14 | median float64 15 | } 16 | 17 | type gasPriceUsedSample struct { 18 | tx *types.Transaction 19 | epochNo uint64 20 | minedDuration time.Duration 21 | } 22 | 23 | type gasPriceSamplingWindow struct { 24 | startEpoch uint64 // window start epoch 25 | endEpoch uint64 // window end epoch 26 | capacity uint64 // window capacity 27 | epochToTxs map[uint64][]*types.Hash // epoch => [tx hashes...] 28 | epochToPivotBlocks map[uint64]*types.BlockSummary // epoch => pivot block 29 | txToEstimateSamples map[types.Hash]*gasPriceUsedSample // tx hash => gas price used sample 30 | } 31 | 32 | func newGasPriceSamplingWindow(capacity uint64) *gasPriceSamplingWindow { 33 | return &gasPriceSamplingWindow{ 34 | capacity: capacity, 35 | epochToTxs: make(map[uint64][]*types.Hash), 36 | epochToPivotBlocks: make(map[uint64]*types.BlockSummary), 37 | txToEstimateSamples: make(map[types.Hash]*gasPriceUsedSample), 38 | } 39 | } 40 | 41 | func (win *gasPriceSamplingWindow) isEmpty() bool { 42 | // initial set or empty window? 43 | return win.endEpoch == 0 || (win.startEpoch > win.endEpoch) 44 | } 45 | 46 | func (win *gasPriceSamplingWindow) size() uint64 { 47 | if win.isEmpty() { 48 | return 0 49 | } 50 | 51 | return win.endEpoch - win.startEpoch + 1 52 | } 53 | 54 | func (win *gasPriceSamplingWindow) expandTo(newEpoch uint64) bool { 55 | if win.isEmpty() { 56 | win.startEpoch = newEpoch 57 | win.endEpoch = newEpoch 58 | 59 | return true 60 | } 61 | 62 | if win.endEpoch >= newEpoch { 63 | return false 64 | } 65 | 66 | win.endEpoch = newEpoch 67 | 68 | for win.size() > win.capacity { // in case of window overflow 69 | epochNo := win.startEpoch 70 | 71 | txHashes := win.epochToTxs[epochNo] 72 | delete(win.epochToTxs, epochNo) 73 | 74 | for _, txh := range txHashes { 75 | delete(win.txToEstimateSamples, *txh) 76 | } 77 | 78 | delete(win.epochToPivotBlocks, epochNo) 79 | 80 | win.startEpoch++ 81 | } 82 | 83 | return true 84 | } 85 | -------------------------------------------------------------------------------- /util/lru.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | lru "github.com/hashicorp/golang-lru" 8 | ) 9 | 10 | // expirableValue is used to hold value with expiration 11 | type expirableValue struct { 12 | value interface{} 13 | expiresAt time.Time 14 | } 15 | 16 | // ExpirableLruCache naive implementation of LRU cache with fixed TTL expiration duration. 17 | // This cache uses a lazy eviction policy, by which the expired entry will be purged when 18 | // it's being looked up. 19 | type ExpirableLruCache struct { 20 | lru *lru.Cache 21 | mu sync.Mutex 22 | ttl time.Duration 23 | } 24 | 25 | func NewExpirableLruCache(size int, ttl time.Duration) *ExpirableLruCache { 26 | cache, _ := lru.New(size) 27 | return &ExpirableLruCache{lru: cache, ttl: ttl} 28 | } 29 | 30 | // Add adds a value to the cache. Returns true if an eviction occurred. 31 | func (c *ExpirableLruCache) Add(key, value interface{}) bool { 32 | c.mu.Lock() 33 | defer c.mu.Unlock() 34 | 35 | ev := &expirableValue{ 36 | value: value, 37 | expiresAt: time.Now().Add(c.ttl), 38 | } 39 | 40 | return c.lru.Add(key, ev) 41 | } 42 | 43 | // Get looks up a key's value from the cache. Will purge the entry and return nil 44 | // if the entry expired. 45 | func (c *ExpirableLruCache) Get(key interface{}) (interface{}, bool) { 46 | c.mu.Lock() 47 | defer c.mu.Unlock() 48 | 49 | cv, ok := c.lru.Get(key) // not found 50 | if !ok { 51 | return nil, false 52 | } 53 | 54 | ev := cv.(*expirableValue) 55 | if ev.expiresAt.Before(time.Now()) { // expired 56 | c.lru.Remove(key) 57 | return nil, false 58 | } 59 | 60 | return ev.value, true 61 | } 62 | -------------------------------------------------------------------------------- /util/map.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | type ConcurrentMap struct { 8 | sync.Map 9 | mu sync.Mutex 10 | } 11 | 12 | func (m *ConcurrentMap) LoadOrStoreFn( 13 | key interface{}, factory func(k interface{}) interface{}, 14 | ) (actual interface{}, loaded bool) { 15 | if val, ok := m.Load(key); ok { 16 | return val, true 17 | } 18 | 19 | m.mu.Lock() 20 | defer m.mu.Unlock() 21 | 22 | // double check 23 | if val, ok := m.Load(key); ok { 24 | return val, true 25 | } 26 | 27 | val := factory(key) 28 | m.Store(key, val) 29 | 30 | return val, false 31 | } 32 | 33 | func (m *ConcurrentMap) LoadOrStoreFnErr( 34 | key interface{}, factory func(k interface{}) (interface{}, error), 35 | ) (actual interface{}, loaded bool, err error) { 36 | if val, ok := m.Load(key); ok { 37 | return val, true, nil 38 | } 39 | 40 | m.mu.Lock() 41 | defer m.mu.Unlock() 42 | 43 | // double check 44 | if val, ok := m.Load(key); ok { 45 | return val, true, nil 46 | } 47 | 48 | val, err := factory(key) 49 | if err != nil { 50 | return nil, false, err 51 | } 52 | 53 | m.Store(key, val) 54 | 55 | return val, false, nil 56 | } 57 | -------------------------------------------------------------------------------- /util/math.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "math/rand" 5 | "time" 6 | ) 7 | 8 | func MaxUint64(a, b uint64) uint64 { 9 | if a > b { 10 | return a 11 | } 12 | return b 13 | } 14 | 15 | func MinUint64(a, b uint64) uint64 { 16 | if a < b { 17 | return a 18 | } 19 | return b 20 | } 21 | 22 | func MinInt(a, b int) int { 23 | if a < b { 24 | return a 25 | } 26 | 27 | return b 28 | } 29 | 30 | func MaxInt(a, b int) int { 31 | if a > b { 32 | return a 33 | } 34 | return b 35 | } 36 | 37 | func MinUint32(a, b uint32) uint32 { 38 | if a < b { 39 | return a 40 | } 41 | 42 | return b 43 | } 44 | 45 | func RandUint64(limit uint64) uint64 { 46 | if limit == 0 { 47 | return 0 48 | } 49 | 50 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 51 | return (r.Uint64() % limit) 52 | } 53 | -------------------------------------------------------------------------------- /util/metrics/block.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/ethereum/go-ethereum/rpc" 5 | "github.com/openweb3/web3go/client" 6 | "github.com/openweb3/web3go/types" 7 | ) 8 | 9 | // InputBlockMetric is used to add metrics for input block parameter. 10 | type InputBlockMetric struct{} 11 | 12 | func (metric *InputBlockMetric) updateBlockNumberIgnoreDefault(blockNum *types.BlockNumber, method string, eth *client.RpcEthClient) { 13 | // mark percentage for most popular values 14 | Registry.RPC.InputBlock(method, "latest").Mark(blockNum != nil && *blockNum == rpc.LatestBlockNumber) 15 | Registry.RPC.InputBlock(method, "pending").Mark(blockNum != nil && *blockNum == rpc.PendingBlockNumber) 16 | // metric.updatePercentage(method, "earliest", blockNum != nil && *blockNum == rpc.EarliestBlockNumber) 17 | 18 | // block number 19 | isNum := blockNum != nil && *blockNum > 0 20 | Registry.RPC.InputBlock(method, "number").Mark(isNum) 21 | 22 | if !isNum { 23 | return 24 | } 25 | 26 | // update block gap against latest if block number specified 27 | if latestBlockNum, err := eth.BlockNumber(); err == nil && latestBlockNum != nil { 28 | gap := latestBlockNum.Int64() - int64(*blockNum) 29 | Registry.RPC.InputBlockGap(method).Update(gap) 30 | } 31 | } 32 | 33 | func (metric *InputBlockMetric) Update1(blockNum *types.BlockNumber, method string, eth *client.RpcEthClient) { 34 | Registry.RPC.InputBlock(method, "default").Mark(blockNum == nil) 35 | metric.updateBlockNumberIgnoreDefault(blockNum, method, eth) 36 | } 37 | 38 | func (metric *InputBlockMetric) Update2(blockNumOrHash *types.BlockNumberOrHash, method string, eth *client.RpcEthClient) { 39 | Registry.RPC.InputBlock(method, "default").Mark(blockNumOrHash == nil) 40 | Registry.RPC.InputBlock(method, "hash").Mark(blockNumOrHash != nil && blockNumOrHash.BlockHash != nil) 41 | 42 | var blockNum *types.BlockNumber 43 | if blockNumOrHash != nil { 44 | blockNum = blockNumOrHash.BlockNumber 45 | } 46 | metric.updateBlockNumberIgnoreDefault(blockNum, method, eth) 47 | } 48 | -------------------------------------------------------------------------------- /util/metrics/epoch.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 5 | "github.com/Conflux-Chain/go-conflux-sdk/types" 6 | ) 7 | 8 | // InputEpochMetric is used to add metrics for input epoch parameter. 9 | type InputEpochMetric struct{} 10 | 11 | // Update marks the percentage for different epochs. If epoch number specified, 12 | // add statistic for epoch gap against latest mined. 13 | func (metric *InputEpochMetric) Update(epoch *types.Epoch, method string, cfx sdk.ClientOperator) { 14 | // mark percentage for most popular values 15 | Registry.RPC.InputEpoch(method, "default").Mark(epoch == nil) 16 | Registry.RPC.InputEpoch(method, types.EpochLatestMined.String()).Mark(types.EpochLatestMined.Equals(epoch)) 17 | Registry.RPC.InputEpoch(method, types.EpochLatestState.String()).Mark(types.EpochLatestState.Equals(epoch)) 18 | 19 | // epoch number 20 | var isNum bool 21 | if epoch != nil { 22 | _, isNum = epoch.ToInt() 23 | } 24 | Registry.RPC.InputEpoch(method, "number").Mark(isNum) 25 | 26 | // other cases 27 | Registry.RPC.InputEpoch(method, "others").Mark(epoch != nil && !isNum && 28 | !types.EpochLatestMined.Equals(epoch) && 29 | !types.EpochLatestState.Equals(epoch)) 30 | 31 | if epoch == nil { 32 | return 33 | } 34 | 35 | // update epoch gap against latest_mined if epoch number specified 36 | if num, ok := epoch.ToInt(); ok { 37 | if latestMined, err := cfx.GetEpochNumber(types.EpochLatestMined); err == nil && latestMined != nil { 38 | gap := latestMined.ToInt().Int64() - num.Int64() 39 | Registry.RPC.InputEpochGap(method).Update(gap) 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /util/metrics/percentage.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/ethereum/go-ethereum/metrics" 8 | ) 9 | 10 | // Percentage implements the GaugeFloat64 interface for percentage statistic. 11 | type Percentage interface { 12 | Mark(marked bool) 13 | Value() float64 // e.g. 99.38 means 99.38% 14 | } 15 | 16 | // NewPercentage constructs a new standard percentage metric. 17 | func NewPercentage() Percentage { 18 | if !metrics.Enabled { 19 | return &noopPercentage{} 20 | } 21 | 22 | return &standardPercentage{} 23 | } 24 | 25 | // GetOrRegisterPercentage returns an existing Percentage or constructs and registers a new standard Percentage. 26 | func GetOrRegisterPercentage(name string, args ...interface{}) Percentage { 27 | return getOrRegisterPercentage(NewPercentage, name, args...) 28 | } 29 | 30 | // getOrRegisterPercentage gets or constructs Percentage with specified factory. 31 | func getOrRegisterPercentage(factory func() Percentage, name string, args ...interface{}) Percentage { 32 | metricName := fmt.Sprintf(name, args...) 33 | return InfuraRegistry.GetOrRegister(metricName, factory).(Percentage) 34 | } 35 | 36 | // noopPercentage is no-op implementation for Percentage interface. 37 | type noopPercentage struct{} 38 | 39 | func (p *noopPercentage) Mark(marked bool) {} 40 | func (p *noopPercentage) Value() float64 { return 0 } 41 | func (p *noopPercentage) Update(float64) {} 42 | func (p *noopPercentage) Snapshot() metrics.GaugeFloat64 { return p } 43 | 44 | type percentageData struct { 45 | total uint64 46 | marks uint64 47 | } 48 | 49 | func (data *percentageData) update(marked bool) { 50 | data.total++ 51 | if marked { 52 | data.marks++ 53 | } 54 | } 55 | 56 | // 10.19 means 10.19% 57 | func (data *percentageData) value() float64 { 58 | if data.total == 0 { 59 | // percentage is 0 when never marked 60 | return 0 61 | } 62 | 63 | return float64(data.marks*10000/data.total) / 100 64 | } 65 | 66 | // standardPercentage is the standard implementation for Percentage interface. 67 | type standardPercentage struct { 68 | data percentageData 69 | mu sync.Mutex 70 | } 71 | 72 | func (p *standardPercentage) Mark(marked bool) { 73 | p.mu.Lock() 74 | defer p.mu.Unlock() 75 | 76 | p.data.update(marked) 77 | } 78 | 79 | // Value implements the metrics.GaugeFloat64 interface. 80 | func (p *standardPercentage) Value() float64 { 81 | p.mu.Lock() 82 | defer p.mu.Unlock() 83 | 84 | return p.data.value() 85 | } 86 | 87 | // Update implements the metrics.GaugeFloat64 interface. 88 | func (p *standardPercentage) Update(float64) { 89 | panic("Update called on a standardPercentage") 90 | } 91 | 92 | // Snapshot implements the metrics.GaugeFloat64 interface. 93 | func (p *standardPercentage) Snapshot() metrics.GaugeFloat64 { 94 | return metrics.GaugeFloat64Snapshot(p.Value()) 95 | } 96 | -------------------------------------------------------------------------------- /util/metrics/report.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/Conflux-Chain/go-conflux-util/viper" 7 | "github.com/ethereum/go-ethereum/metrics" 8 | "github.com/ethereum/go-ethereum/metrics/influxdb" 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | // This package should be imported before any metric (e.g. timer, histogram) created. 13 | // Because, `metrics.Enabled` in go-ethereum is `false` by default, which leads to noop 14 | // metric created for static variables in any package. 15 | // 16 | // In addition, this package should be imported after the initialization of viper and logrus. 17 | func Init() { 18 | var config struct { 19 | Enabled bool `default:"true"` 20 | Influxdb struct { 21 | Host string `default:"http://127.0.0.1:8086"` 22 | DB string `default:"infura_test"` 23 | Username string 24 | Password string 25 | } 26 | Report struct { 27 | Enabled bool 28 | Interval time.Duration `default:"10s"` 29 | } 30 | } 31 | 32 | viper.MustUnmarshalKey("metrics", &config) 33 | 34 | metrics.Enabled = config.Enabled 35 | 36 | if !metrics.Enabled || !config.Report.Enabled { 37 | return 38 | } 39 | 40 | go influxdb.InfluxDB( 41 | InfuraRegistry, 42 | config.Report.Interval, 43 | config.Influxdb.Host, 44 | config.Influxdb.DB, 45 | config.Influxdb.Username, 46 | config.Influxdb.Password, 47 | "", // namespace 48 | ) 49 | 50 | logrus.Info("Start to report metrics to influxdb periodically") 51 | } 52 | -------------------------------------------------------------------------------- /util/metrics/service/metrics.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/ethereum/go-ethereum/metrics" 7 | infuraMetrics "github.com/scroll-tech/rpc-gateway/util/metrics" 8 | ) 9 | 10 | type clientMetric struct { 11 | name string // metric name to update on server side 12 | updater Updater 13 | } 14 | 15 | // Counter 16 | type Counter struct { 17 | clientMetric 18 | metrics.Counter 19 | } 20 | 21 | func (counter *Counter) Clear() { 22 | counter.Counter.Clear() 23 | counter.updater.ClearCounter(counter.name) 24 | } 25 | 26 | func (counter *Counter) Dec(i int64) { 27 | counter.Counter.Dec(i) 28 | counter.updater.DecCounter(counter.name, i) 29 | } 30 | 31 | func (counter *Counter) Inc(i int64) { 32 | counter.Counter.Inc(i) 33 | counter.updater.IncCounter(counter.name, i) 34 | } 35 | 36 | // Gauge 37 | type Gauge struct { 38 | clientMetric 39 | metrics.Gauge 40 | } 41 | 42 | func (gauge *Gauge) Update(v int64) { 43 | gauge.Gauge.Update(v) 44 | gauge.updater.UpdateGauge(gauge.name, v) 45 | } 46 | 47 | func (gauge *Gauge) Dec(i int64) { 48 | gauge.Gauge.Dec(i) 49 | gauge.updater.DecGauge(gauge.name, i) 50 | } 51 | 52 | func (gauge *Gauge) Inc(i int64) { 53 | gauge.Gauge.Inc(i) 54 | gauge.updater.IncGauge(gauge.name, i) 55 | } 56 | 57 | // GaugeFloat64 58 | type GaugeFloat64 struct { 59 | clientMetric 60 | metrics.GaugeFloat64 61 | } 62 | 63 | func (gauge *GaugeFloat64) Update(v float64) { 64 | gauge.GaugeFloat64.Update(v) 65 | gauge.updater.UpdateGaugeFloat64(gauge.name, v) 66 | } 67 | 68 | // Meter 69 | type Meter struct { 70 | clientMetric 71 | metrics.Meter 72 | } 73 | 74 | func (meter *Meter) Mark(n int64) { 75 | meter.Meter.Mark(n) 76 | meter.updater.MarkMeter(meter.name, n) 77 | } 78 | 79 | func (meter *Meter) Stop() { 80 | meter.Meter.Stop() 81 | meter.updater.StopMeter(meter.name) 82 | } 83 | 84 | // Histogram 85 | type Histogram struct { 86 | clientMetric 87 | metrics.Histogram 88 | } 89 | 90 | func (h *Histogram) Clear() { 91 | h.Histogram.Clear() 92 | h.updater.ClearHistogram(h.name) 93 | } 94 | 95 | func (h *Histogram) Update(v int64) { 96 | h.Histogram.Update(v) 97 | h.updater.UpdateHistogram(h.name, v) 98 | } 99 | 100 | // Timer 101 | type Timer struct { 102 | clientMetric 103 | metrics.Timer 104 | } 105 | 106 | func (timer *Timer) Time(f func()) { 107 | start := time.Now() 108 | f() 109 | timer.UpdateSince(start) 110 | } 111 | 112 | func (timer *Timer) Update(d time.Duration) { 113 | timer.Timer.Update(d) 114 | timer.updater.UpdateTimer(timer.name, d.Nanoseconds()) 115 | } 116 | 117 | func (timer *Timer) UpdateSince(ts time.Time) { 118 | timer.Update(time.Since(ts)) 119 | } 120 | 121 | func (timer *Timer) Stop() { 122 | timer.Timer.Stop() 123 | timer.updater.StopTimer(timer.name) 124 | } 125 | 126 | // Percentage 127 | type Percentage struct { 128 | clientMetric 129 | infuraMetrics.Percentage 130 | } 131 | 132 | func (p *Percentage) Mark(marked bool) { 133 | p.Percentage.Mark(marked) 134 | p.updater.MarkPercentage(p.name, marked) 135 | } 136 | 137 | // TimeWindowPercentage 138 | type TimeWindowPercentage struct { 139 | clientMetric 140 | infuraMetrics.Percentage 141 | slots int 142 | interval time.Duration 143 | } 144 | 145 | func (p *TimeWindowPercentage) Mark(marked bool) { 146 | p.Percentage.Mark(marked) 147 | p.updater.MarkTimeWindowPercentage(p.name, marked, p.slots, p.interval.Nanoseconds()) 148 | } 149 | -------------------------------------------------------------------------------- /util/metrics/service/registry.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/ethereum/go-ethereum/metrics" 8 | infuraMetrics "github.com/scroll-tech/rpc-gateway/util/metrics" 9 | ) 10 | 11 | type Registry struct { 12 | metrics.Registry 13 | updater Updater 14 | } 15 | 16 | func NewRegistry(updater Updater) metrics.Registry { 17 | return &Registry{ 18 | Registry: metrics.NewRegistry(), 19 | updater: updater, 20 | } 21 | } 22 | 23 | func (r *Registry) GetOrRegisterCounter(name string, args ...interface{}) metrics.Counter { 24 | metricName := fmt.Sprintf(name, args...) 25 | return r.Registry.GetOrRegister(name, func() metrics.Counter { 26 | return &Counter{clientMetric{metricName, r.updater}, metrics.NewCounter()} 27 | }).(metrics.Counter) 28 | } 29 | 30 | func (r *Registry) GetOrRegisterGauge(name string, args ...interface{}) metrics.Gauge { 31 | metricName := fmt.Sprintf(name, args...) 32 | return r.Registry.GetOrRegister(name, func() metrics.Gauge { 33 | return &Gauge{clientMetric{metricName, r.updater}, metrics.NewGauge()} 34 | }).(metrics.Gauge) 35 | } 36 | 37 | func (r *Registry) GetOrRegisterGaugeFloat64(name string, args ...interface{}) metrics.GaugeFloat64 { 38 | metricName := fmt.Sprintf(name, args...) 39 | return r.Registry.GetOrRegister(name, func() metrics.GaugeFloat64 { 40 | return &GaugeFloat64{clientMetric{metricName, r.updater}, metrics.NewGaugeFloat64()} 41 | }).(metrics.GaugeFloat64) 42 | } 43 | 44 | func (r *Registry) GetOrRegisterMeter(name string, args ...interface{}) metrics.Meter { 45 | metricName := fmt.Sprintf(name, args...) 46 | return r.Registry.GetOrRegister(name, func() metrics.Meter { 47 | return &Meter{clientMetric{metricName, r.updater}, metrics.NewMeter()} 48 | }).(metrics.Meter) 49 | } 50 | 51 | func (r *Registry) GetOrRegisterHistogram(name string, args ...interface{}) metrics.Histogram { 52 | metricName := fmt.Sprintf(name, args...) 53 | return r.Registry.GetOrRegister(name, func() metrics.Histogram { 54 | return &Histogram{clientMetric{metricName, r.updater}, infuraMetrics.NewHistogram()} 55 | }).(metrics.Histogram) 56 | } 57 | 58 | func (r *Registry) GetOrRegisterTimer(name string, args ...interface{}) metrics.Timer { 59 | metricName := fmt.Sprintf(name, args...) 60 | return r.Registry.GetOrRegister(name, func() metrics.Timer { 61 | return &Timer{clientMetric{metricName, r.updater}, metrics.NewTimer()} 62 | }).(metrics.Timer) 63 | } 64 | 65 | func (r *Registry) GetOrRegisterPercentage(name string, args ...interface{}) infuraMetrics.Percentage { 66 | metricName := fmt.Sprintf(name, args...) 67 | return r.Registry.GetOrRegister(name, func() infuraMetrics.Percentage { 68 | return &Percentage{clientMetric{metricName, r.updater}, infuraMetrics.NewPercentage()} 69 | }).(infuraMetrics.Percentage) 70 | } 71 | 72 | func (r *Registry) GetOrRegisterTimeWindowPercentageDefault(name string, args ...interface{}) infuraMetrics.Percentage { 73 | return r.GetOrRegisterTimeWindowPercentage(10, time.Minute, name, args...) 74 | } 75 | 76 | func (r *Registry) GetOrRegisterTimeWindowPercentage(slots int, slotInterval time.Duration, name string, args ...interface{}) infuraMetrics.Percentage { 77 | metricName := fmt.Sprintf(name, args...) 78 | return r.Registry.GetOrRegister(name, func() infuraMetrics.Percentage { 79 | return &TimeWindowPercentage{clientMetric{metricName, r.updater}, infuraMetrics.NewPercentage(), slots, slotInterval} 80 | }).(infuraMetrics.Percentage) 81 | } 82 | -------------------------------------------------------------------------------- /util/metrics/service/server.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "sort" 5 | "time" 6 | 7 | "github.com/scroll-tech/rpc-gateway/util/metrics" 8 | ) 9 | 10 | const Namespace = "metrics" 11 | 12 | type MetricsAPI struct{} 13 | 14 | // Query APIs 15 | func (api *MetricsAPI) List() []string { 16 | var names []string 17 | 18 | for k := range metrics.InfuraRegistry.GetAll() { 19 | names = append(names, k) 20 | } 21 | 22 | sort.Strings(names) 23 | 24 | return names 25 | } 26 | 27 | func (api *MetricsAPI) Get(name string) map[string]interface{} { 28 | return metrics.InfuraRegistry.GetAll()[name] 29 | } 30 | 31 | func (api *MetricsAPI) All() map[string]map[string]interface{} { 32 | names := api.List() 33 | 34 | content := make(map[string]map[string]interface{}) 35 | 36 | for _, v := range names { 37 | content[v] = metrics.InfuraRegistry.GetAll()[v] 38 | } 39 | 40 | return content 41 | } 42 | 43 | // Counter 44 | func (api *MetricsAPI) ClearCounter(name string) { 45 | metrics.GetOrRegisterCounter(name).Clear() 46 | } 47 | 48 | func (api *MetricsAPI) DecCounter(name string, i int64) { 49 | metrics.GetOrRegisterCounter(name).Dec(i) 50 | } 51 | 52 | func (api *MetricsAPI) IncCounter(name string, i int64) { 53 | metrics.GetOrRegisterCounter(name).Inc(i) 54 | } 55 | 56 | // Gauge 57 | func (api *MetricsAPI) UpdateGauge(name string, v int64) { 58 | metrics.GetOrRegisterGauge(name).Update(v) 59 | } 60 | 61 | func (api *MetricsAPI) DecGauge(name string, i int64) { 62 | metrics.GetOrRegisterGauge(name).Dec(i) 63 | } 64 | 65 | func (api *MetricsAPI) IncGauge(name string, i int64) { 66 | metrics.GetOrRegisterGauge(name).Inc(i) 67 | } 68 | 69 | // GaugeFloat64 70 | func (api *MetricsAPI) UpdateGaugeFloat64(name string, v float64) { 71 | metrics.GetOrRegisterGaugeFloat64(name).Update(v) 72 | } 73 | 74 | // Meter 75 | func (api *MetricsAPI) MarkMeter(name string, n int64) { 76 | metrics.GetOrRegisterMeter(name).Mark(n) 77 | } 78 | 79 | func (api *MetricsAPI) StopMeter(name string) { 80 | metrics.GetOrRegisterMeter(name).Stop() 81 | } 82 | 83 | // Histogram 84 | func (api *MetricsAPI) ClearHistogram(name string) { 85 | metrics.GetOrRegisterHistogram(name).Clear() 86 | } 87 | 88 | func (api *MetricsAPI) UpdateHistogram(name string, v int64) { 89 | metrics.GetOrRegisterHistogram(name).Update(v) 90 | } 91 | 92 | // Timer 93 | func (api *MetricsAPI) UpdateTimer(name string, v int64) { 94 | metrics.GetOrRegisterTimer(name).Update(time.Duration(v)) 95 | } 96 | 97 | func (api *MetricsAPI) StopTimer(name string) { 98 | metrics.GetOrRegisterTimer(name).Stop() 99 | } 100 | 101 | // Percentage 102 | func (api *MetricsAPI) MarkPercentage(name string, marked bool) { 103 | metrics.GetOrRegisterPercentage(name).Mark(marked) 104 | } 105 | 106 | // TimeWindowPercentage 107 | func (api *MetricsAPI) MarkTimeWindowPercentageDefault(name string, marked bool) { 108 | metrics.GetOrRegisterTimeWindowPercentageDefault(name).Mark(marked) 109 | } 110 | 111 | func (api *MetricsAPI) MarkTimeWindowPercentage(name string, marked bool, slots int, slotIntervalNanos int64) { 112 | metrics.GetOrRegisterTimeWindowPercentage(time.Duration(slotIntervalNanos), slots, name).Mark(marked) 113 | } 114 | -------------------------------------------------------------------------------- /util/metrics/service/updater.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | // TODO implements Updater interface 4 | // 1. Implement as a RPC client. 5 | // 2. Batch RPC every 100 milliseconds or number of metrics exceeds 1000. 6 | 7 | type Updater interface { 8 | ClearCounter(name string) 9 | DecCounter(name string, i int64) 10 | IncCounter(name string, i int64) 11 | 12 | UpdateGauge(name string, v int64) 13 | DecGauge(name string, i int64) 14 | IncGauge(name string, i int64) 15 | 16 | UpdateGaugeFloat64(name string, v float64) 17 | 18 | MarkMeter(name string, n int64) 19 | StopMeter(name string) 20 | 21 | ClearHistogram(name string) 22 | UpdateHistogram(name string, v int64) 23 | 24 | UpdateTimer(name string, v int64) 25 | StopTimer(name string) 26 | 27 | MarkPercentage(name string, marked bool) 28 | 29 | MarkTimeWindowPercentageDefault(name string, marked bool) 30 | MarkTimeWindowPercentage(name string, marked bool, slots int, slotIntervalNanos int64) 31 | } 32 | -------------------------------------------------------------------------------- /util/metrics/timer_updater.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/ethereum/go-ethereum/metrics" 7 | ) 8 | 9 | // TimerUpdater is used to update timer metric with native defer syntax. 10 | type TimerUpdater struct { 11 | underlying metrics.Timer 12 | start time.Time 13 | } 14 | 15 | // NewTimerUpdater creates an instance to update timer metric. 16 | func NewTimerUpdater(timer metrics.Timer) TimerUpdater { 17 | return TimerUpdater{ 18 | underlying: timer, 19 | start: time.Now(), 20 | } 21 | } 22 | 23 | // NewTimerUpdaterByName creates an instance to update timer metric 24 | // of specified name. 25 | func NewTimerUpdaterByName(name string) TimerUpdater { 26 | return TimerUpdater{ 27 | underlying: metrics.GetOrRegisterTimer(name, nil), 28 | start: time.Now(), 29 | } 30 | } 31 | 32 | // Update updates the underlying timer metric. 33 | func (updater *TimerUpdater) Update() { 34 | updater.underlying.UpdateSince(updater.start) 35 | } 36 | 37 | // Update updates the underlying timer metric with duration. 38 | func (updater *TimerUpdater) UpdateDuration(duration time.Duration) { 39 | updater.underlying.Update(duration) 40 | } 41 | -------------------------------------------------------------------------------- /util/metrics/util.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/ethereum/go-ethereum/metrics" 7 | ) 8 | 9 | // Note, must use metrics.DefaultRegistry from geth, since go-rpc-provider depends on it 10 | // for rpc metrics by default. When RPC middleware supported at server side, we can use 11 | // a custom metrics registry. 12 | var InfuraRegistry = metrics.DefaultRegistry 13 | 14 | func GetOrRegisterCounter(nameFormat string, nameArgs ...interface{}) metrics.Counter { 15 | name := fmt.Sprintf(nameFormat, nameArgs...) 16 | return metrics.GetOrRegisterCounter(name, InfuraRegistry) 17 | } 18 | 19 | func GetOrRegisterGauge(nameFormat string, nameArgs ...interface{}) metrics.Gauge { 20 | name := fmt.Sprintf(nameFormat, nameArgs...) 21 | return metrics.GetOrRegisterGauge(name, InfuraRegistry) 22 | } 23 | 24 | func GetOrRegisterGaugeFloat64(nameFormat string, nameArgs ...interface{}) metrics.GaugeFloat64 { 25 | name := fmt.Sprintf(nameFormat, nameArgs...) 26 | return metrics.GetOrRegisterGaugeFloat64(name, InfuraRegistry) 27 | } 28 | 29 | func GetOrRegisterMeter(nameFormat string, nameArgs ...interface{}) metrics.Meter { 30 | name := fmt.Sprintf(nameFormat, nameArgs...) 31 | return metrics.GetOrRegisterMeter(name, InfuraRegistry) 32 | } 33 | 34 | func NewHistogram() metrics.Histogram { 35 | return metrics.NewHistogram(metrics.NewExpDecaySample(1024, 0.015)) 36 | } 37 | 38 | func GetOrRegisterHistogram(nameFormat string, nameArgs ...interface{}) metrics.Histogram { 39 | name := fmt.Sprintf(nameFormat, nameArgs...) 40 | return InfuraRegistry.GetOrRegister(name, NewHistogram).(metrics.Histogram) 41 | } 42 | 43 | func GetOrRegisterTimer(nameFormat string, nameArgs ...interface{}) metrics.Timer { 44 | name := fmt.Sprintf(nameFormat, nameArgs...) 45 | return metrics.GetOrRegisterTimer(name, InfuraRegistry) 46 | } 47 | -------------------------------------------------------------------------------- /util/rate/limit.go: -------------------------------------------------------------------------------- 1 | package rate 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "golang.org/x/time/rate" 8 | ) 9 | 10 | type VisitContext struct { 11 | Ip string // visiter IP 12 | Key string // visiter key 13 | Resource string // visited resource (also used as limit rule) 14 | } 15 | 16 | type Limiter interface { 17 | Allow(vc *VisitContext, n int) bool 18 | GC(timeout time.Duration) 19 | Update(option Option) bool 20 | } 21 | 22 | // IpLimiter limiting by IP address 23 | type IpLimiter struct { 24 | *visitLimiter 25 | } 26 | 27 | func NewIpLimiter(option Option) *IpLimiter { 28 | return &IpLimiter{ 29 | visitLimiter: newVisitLimiter(option.Rate, option.Burst), 30 | } 31 | } 32 | 33 | func (l *IpLimiter) Allow(vc *VisitContext, n int) bool { 34 | return l.visitLimiter.Allow(vc.Ip, n) 35 | } 36 | 37 | // KeyLimiter limiting by limit key 38 | type KeyLimiter struct { 39 | *visitLimiter 40 | } 41 | 42 | func NewKeyLimiter(option Option) *KeyLimiter { 43 | return &KeyLimiter{ 44 | visitLimiter: newVisitLimiter(option.Rate, option.Burst), 45 | } 46 | } 47 | 48 | func (l *KeyLimiter) Allow(vc *VisitContext, n int) bool { 49 | return l.visitLimiter.Allow(vc.Key, n) 50 | } 51 | 52 | type Option struct { 53 | Rate rate.Limit 54 | Burst int 55 | } 56 | 57 | func NewOption(r int, b int) Option { 58 | return Option{ 59 | Rate: rate.Limit(r), 60 | Burst: b, 61 | } 62 | } 63 | 64 | type visitor struct { 65 | limiter *rate.Limiter // token bucket 66 | lastSeen time.Time // used for GC when visitor inactive for a while 67 | } 68 | 69 | // visitLimiter is used to limit visit requests in terms of specific entity using 70 | // token bucket algorithm. 71 | type visitLimiter struct { 72 | Option 73 | 74 | // limit entity (eg., IP or limit key etc.) => visitor 75 | visitors map[string]*visitor 76 | 77 | mu sync.Mutex 78 | } 79 | 80 | func newVisitLimiter(rate rate.Limit, burst int) *visitLimiter { 81 | return &visitLimiter{ 82 | Option: Option{rate, burst}, 83 | visitors: make(map[string]*visitor), 84 | } 85 | } 86 | 87 | func (l *visitLimiter) Allow(entity string, n int) bool { 88 | l.mu.Lock() 89 | defer l.mu.Unlock() 90 | 91 | v, ok := l.visitors[entity] 92 | if !ok { 93 | v = &visitor{ 94 | limiter: rate.NewLimiter(l.Rate, l.Burst), 95 | } 96 | l.visitors[entity] = v 97 | } 98 | 99 | v.lastSeen = time.Now() 100 | 101 | return v.limiter.AllowN(v.lastSeen, n) 102 | } 103 | 104 | func (l *visitLimiter) GC(timeout time.Duration) { 105 | now := time.Now() 106 | 107 | l.mu.Lock() 108 | defer l.mu.Unlock() 109 | 110 | for entity, v := range l.visitors { 111 | if v.lastSeen.Add(timeout).Before(now) { 112 | delete(l.visitors, entity) 113 | } 114 | } 115 | } 116 | 117 | func (l *visitLimiter) Update(option Option) bool { 118 | l.mu.Lock() 119 | defer l.mu.Unlock() 120 | 121 | if l.Rate == option.Rate && l.Burst == option.Burst { 122 | return false 123 | } 124 | 125 | l.Option = option 126 | 127 | for _, visitor := range l.visitors { 128 | visitor.limiter.SetLimit(option.Rate) 129 | visitor.limiter.SetBurst(option.Burst) 130 | } 131 | 132 | return true 133 | } 134 | -------------------------------------------------------------------------------- /util/rate/registry_reload.go: -------------------------------------------------------------------------------- 1 | package rate 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | type Config struct { 10 | Strategies map[uint32]*Strategy // limit strategies 11 | } 12 | 13 | type KeyInfo struct { 14 | SID uint32 // bound strategy ID 15 | Key string // limit key 16 | Type int // limit type 17 | } 18 | 19 | type KeysetFilter struct { 20 | SIDs []uint32 // strategy IDs 21 | KeySet []string // limit keyset 22 | Limit int // result limit size (<= 0 means none) 23 | } 24 | 25 | // KeysetLoader limit keyset loader 26 | type KeysetLoader func(filter *KeysetFilter) ([]*KeyInfo, error) 27 | 28 | func (m *Registry) AutoReload(interval time.Duration, reloader func() *Config, kloader KeysetLoader) { 29 | ticker := time.NewTicker(interval) 30 | defer ticker.Stop() 31 | 32 | // init registry key loader 33 | m.initKeyLoader(kloader) 34 | 35 | // warm up limit key cache for better performance 36 | m.warmUpKeyCache(kloader) 37 | 38 | // load immediately at first 39 | rconf := reloader() 40 | m.reloadOnce(rconf) 41 | 42 | // load periodically 43 | for range ticker.C { 44 | rconf := reloader() 45 | m.reloadOnce(rconf) 46 | 47 | // TODO: re-validate most recently used limit keys to refresh cache. 48 | } 49 | } 50 | 51 | func (m *Registry) reloadOnce(rconf *Config) { 52 | if rconf == nil { 53 | return 54 | } 55 | 56 | m.mu.Lock() 57 | defer m.mu.Unlock() 58 | 59 | // refresh rate limit strategies 60 | m.refreshStrategies(rconf.Strategies) 61 | } 62 | 63 | func (m *Registry) refreshStrategies(strategies map[uint32]*Strategy) { 64 | // remove limiter sets 65 | for sid, strategy := range m.strategies { 66 | if _, ok := strategies[sid]; !ok { 67 | m.removeStrategy(strategy) 68 | logrus.WithField("strategy", strategy).Info("RateLimit strategy removed") 69 | } 70 | } 71 | 72 | // add or update limiter sets 73 | for sid, strategy := range strategies { 74 | s, ok := m.strategies[sid] 75 | if !ok { // add 76 | m.addStrategy(strategy) 77 | logrus.WithField("strategy", strategy).Info("RateLimit strategy added") 78 | continue 79 | } 80 | 81 | if s.MD5 != strategy.MD5 { // update 82 | m.updateStrategy(strategy) 83 | logrus.WithField("strategy", strategy).Info("RateLimit strategy updated") 84 | } 85 | } 86 | } 87 | 88 | func (m *Registry) initKeyLoader(kloader KeysetLoader) { 89 | m.mu.Lock() 90 | defer m.mu.Unlock() 91 | 92 | m.keyLoader = func(key string) (*KeyInfo, error) { 93 | kinfos, err := kloader(&KeysetFilter{KeySet: []string{key}}) 94 | if err == nil && len(kinfos) > 0 { 95 | return kinfos[0], nil 96 | } 97 | 98 | return nil, err 99 | } 100 | } 101 | 102 | func (m *Registry) warmUpKeyCache(kloader KeysetLoader) { 103 | kis, err := kloader(&KeysetFilter{Limit: (LimitKeyCacheSize * 3 / 4)}) 104 | if err != nil { 105 | logrus.WithError(err).Warn("Failed to load limit keyset to warm up cache") 106 | return 107 | } 108 | 109 | for i := range kis { 110 | m.keyCache.Add(kis[i].Key, kis[i]) 111 | } 112 | 113 | logrus.WithField("totalKeys", len(kis)).Info("Limit keyset loaded to cache") 114 | } 115 | -------------------------------------------------------------------------------- /util/relay/relay.go: -------------------------------------------------------------------------------- 1 | package relay 2 | 3 | import ( 4 | "time" 5 | 6 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 7 | "github.com/Conflux-Chain/go-conflux-util/viper" 8 | "github.com/ethereum/go-ethereum/common/hexutil" 9 | "github.com/scroll-tech/rpc-gateway/util" 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | type TxnRelayerConfig struct { 14 | BufferSize int `default:"2000"` 15 | Concurrency int `default:"1"` 16 | Retry int `default:"3"` 17 | RetryInterval time.Duration `default:"1s"` 18 | RequestTimeout time.Duration `default:"3s"` 19 | NodeUrls []string 20 | } 21 | 22 | // TxnRelayer relays raw transaction by broadcasting to node pool 23 | // of different regions to accelerate P2P diffusion. 24 | type TxnRelayer struct { 25 | poolClients []*sdk.Client // fullnode pool 26 | txnQueue chan hexutil.Bytes // transactions queued to relay 27 | config *TxnRelayerConfig 28 | } 29 | 30 | func MustNewTxnRelayerFromViper() *TxnRelayer { 31 | relayer, err := NewTxnRelayerFromViper() 32 | if err != nil { 33 | logrus.WithError(err).Fatal("Failed to new transaction relayer from viper") 34 | } 35 | 36 | return relayer 37 | } 38 | 39 | func NewTxnRelayerFromViper() (*TxnRelayer, error) { 40 | var relayConf TxnRelayerConfig 41 | viper.MustUnmarshalKey("relay", &relayConf) 42 | return NewTxnRelayer(&relayConf) 43 | } 44 | 45 | func NewTxnRelayer(relayConf *TxnRelayerConfig) (*TxnRelayer, error) { 46 | if len(relayConf.NodeUrls) == 0 { 47 | return &TxnRelayer{}, nil 48 | } 49 | 50 | var cfxClients []*sdk.Client 51 | 52 | for _, url := range relayConf.NodeUrls { 53 | cfx, err := sdk.NewClient(url, sdk.ClientOption{ 54 | RetryCount: relayConf.Retry, 55 | RetryInterval: relayConf.RetryInterval, 56 | RequestTimeout: relayConf.RequestTimeout, 57 | }) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | cfxClients = append(cfxClients, cfx) 63 | } 64 | 65 | relayer := &TxnRelayer{ 66 | poolClients: cfxClients, 67 | txnQueue: make(chan hexutil.Bytes, relayConf.BufferSize), 68 | config: relayConf, 69 | } 70 | 71 | // start concurrency worker(s) 72 | concurrency := util.MaxInt(relayConf.Concurrency, 1) 73 | for i := 0; i < concurrency; i++ { 74 | go func() { 75 | for rawTxn := range relayer.txnQueue { 76 | relayer.doRelay(rawTxn) 77 | } 78 | }() 79 | } 80 | 81 | return relayer, nil 82 | } 83 | 84 | // AsyncRelay relays raw transaction broadcasting asynchronously. 85 | func (relayer *TxnRelayer) AsyncRelay(signedTx hexutil.Bytes) bool { 86 | if len(relayer.poolClients) == 0 { 87 | return true 88 | } 89 | 90 | if len(relayer.txnQueue) == relayer.config.BufferSize { // queue is full? 91 | return false 92 | } 93 | 94 | relayer.txnQueue <- signedTx 95 | return true 96 | } 97 | 98 | func (relayer *TxnRelayer) doRelay(signedTx hexutil.Bytes) { 99 | for _, client := range relayer.poolClients { 100 | txHash, err := client.SendRawTransaction(signedTx) 101 | 102 | logrus.WithFields(logrus.Fields{ 103 | "nodeUrl": client.GetNodeURL(), "txHash": txHash, 104 | }).WithError(err).Trace("Raw transaction relayed") 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /util/rpc/client_cfx.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "time" 5 | 6 | sdk "github.com/Conflux-Chain/go-conflux-sdk" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | type cfxClientOption struct { 11 | baseClientOption 12 | *sdk.ClientOption 13 | } 14 | 15 | func (o *cfxClientOption) SetRetryCount(retry int) { 16 | o.RetryCount = retry 17 | } 18 | 19 | func (o *cfxClientOption) SetRetryInterval(retryInterval time.Duration) { 20 | o.RetryInterval = retryInterval 21 | } 22 | 23 | func (o *cfxClientOption) SetRequestTimeout(reqTimeout time.Duration) { 24 | o.RequestTimeout = reqTimeout 25 | } 26 | 27 | func (o *cfxClientOption) SetMaxConnsPerHost(maxConns int) { 28 | o.MaxConnectionPerHost = maxConns 29 | } 30 | 31 | func MustNewCfxClientFromViper(options ...ClientOption) *sdk.Client { 32 | return MustNewCfxClient(cfxClientCfg.Http, options...) 33 | } 34 | 35 | func MustNewCfxWsClientFromViper(options ...ClientOption) *sdk.Client { 36 | return MustNewCfxClient(cfxClientCfg.WS, options...) 37 | } 38 | 39 | func MustNewCfxClient(url string, options ...ClientOption) *sdk.Client { 40 | cfx, err := NewCfxClient(url, options...) 41 | if err != nil { 42 | logrus.WithField("url", url).WithError(err).Fatal("Failed to create CFX client") 43 | } 44 | 45 | return cfx 46 | } 47 | 48 | func NewCfxClient(url string, options ...ClientOption) (*sdk.Client, error) { 49 | opt := &cfxClientOption{ 50 | ClientOption: &sdk.ClientOption{ 51 | RetryCount: cfxClientCfg.Retry, 52 | RetryInterval: cfxClientCfg.RetryInterval, 53 | RequestTimeout: cfxClientCfg.RequestTimeout, 54 | MaxConnectionPerHost: cfxClientCfg.MaxConnsPerHost, 55 | }, 56 | } 57 | 58 | for _, o := range options { 59 | o(opt) 60 | } 61 | 62 | cfx, err := sdk.NewClient(url, *opt.ClientOption) 63 | if err == nil && opt.hookMetrics { 64 | HookMiddlewares(cfx.Provider(), url, "cfx") 65 | } 66 | 67 | return cfx, err 68 | } 69 | -------------------------------------------------------------------------------- /util/rpc/client_eth.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "time" 5 | 6 | providers "github.com/openweb3/go-rpc-provider/provider_wrapper" 7 | "github.com/openweb3/web3go" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | type ethClientOption struct { 12 | baseClientOption 13 | web3go.ClientOption 14 | } 15 | 16 | func (o *ethClientOption) SetRetryCount(retry int) { 17 | o.RetryCount = retry 18 | } 19 | 20 | func (o *ethClientOption) SetRetryInterval(retryInterval time.Duration) { 21 | o.RetryInterval = retryInterval 22 | } 23 | 24 | func (o *ethClientOption) SetRequestTimeout(reqTimeout time.Duration) { 25 | o.RequestTimeout = reqTimeout 26 | } 27 | 28 | func (o *ethClientOption) SetMaxConnsPerHost(maxConns int) { 29 | o.MaxConnectionPerHost = maxConns 30 | } 31 | 32 | func MustNewEthClientFromViper(options ...ClientOption) *web3go.Client { 33 | return MustNewEthClient(ethClientCfg.Http, options...) 34 | } 35 | 36 | func MustNewEthClient(url string, options ...ClientOption) *web3go.Client { 37 | eth, err := NewEthClient(url, options...) 38 | if err != nil { 39 | logrus.WithField("url", url).WithError(err).Fatal("Failed to create ETH client") 40 | } 41 | 42 | return eth 43 | } 44 | 45 | func NewEthClient(url string, options ...ClientOption) (*web3go.Client, error) { 46 | opt := ethClientOption{ 47 | ClientOption: web3go.ClientOption{ 48 | Option: providers.Option{ 49 | RetryCount: ethClientCfg.Retry, 50 | RetryInterval: ethClientCfg.RetryInterval, 51 | RequestTimeout: ethClientCfg.RequestTimeout, 52 | MaxConnectionPerHost: ethClientCfg.MaxConnsPerHost, 53 | }, 54 | }, 55 | } 56 | 57 | for _, o := range options { 58 | o(&opt) 59 | } 60 | 61 | eth, err := web3go.NewClientWithOption(url, opt.ClientOption) 62 | if err == nil && opt.hookMetrics { 63 | HookMiddlewares(eth.Provider(), url, "eth") 64 | } 65 | 66 | return eth, err 67 | } 68 | -------------------------------------------------------------------------------- /util/rpc/client_middlewares.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "time" 7 | 8 | providers "github.com/openweb3/go-rpc-provider/provider_wrapper" 9 | "github.com/openweb3/go-rpc-provider/utils" 10 | "github.com/scroll-tech/rpc-gateway/util/metrics" 11 | "github.com/sirupsen/logrus" 12 | ) 13 | 14 | func Url2NodeName(url string) string { 15 | nodeName := strings.ToLower(url) 16 | nodeName = strings.TrimPrefix(nodeName, "http://") 17 | nodeName = strings.TrimPrefix(nodeName, "https://") 18 | nodeName = strings.TrimPrefix(nodeName, "ws://") 19 | nodeName = strings.TrimPrefix(nodeName, "wss://") 20 | return strings.TrimPrefix(nodeName, "/") 21 | } 22 | 23 | func HookMiddlewares(provider *providers.MiddlewarableProvider, url, space string) { 24 | nodeName := Url2NodeName(url) 25 | provider.HookCallContext(middlewareLog(nodeName, space)) 26 | provider.HookCallContext(middlewareMetrics(nodeName, space)) 27 | } 28 | 29 | func middlewareMetrics(fullnode, space string) providers.CallContextMiddleware { 30 | return func(handler providers.CallContextFunc) providers.CallContextFunc { 31 | return func(ctx context.Context, result interface{}, method string, args ...interface{}) error { 32 | start := time.Now() 33 | 34 | err := handler(ctx, result, method, args...) 35 | 36 | metrics.Registry.RPC.FullnodeQps(space, method, err).UpdateSince(start) 37 | 38 | // overall error rate for each full node 39 | metrics.Registry.RPC.FullnodeErrorRate().Mark(err != nil) 40 | metrics.Registry.RPC.FullnodeErrorRate(fullnode).Mark(err != nil) 41 | nonRpcErr := err != nil && !utils.IsRPCJSONError(err) // generally io error 42 | metrics.Registry.RPC.FullnodeNonRpcErrorRate().Mark(nonRpcErr) 43 | metrics.Registry.RPC.FullnodeNonRpcErrorRate(fullnode).Mark(nonRpcErr) 44 | 45 | return err 46 | } 47 | } 48 | } 49 | 50 | func middlewareLog(fullnode, space string) providers.CallContextMiddleware { 51 | return func(handler providers.CallContextFunc) providers.CallContextFunc { 52 | return func(ctx context.Context, result interface{}, method string, args ...interface{}) error { 53 | if !logrus.IsLevelEnabled(logrus.DebugLevel) { 54 | return handler(ctx, result, method, args...) 55 | } 56 | 57 | logger := logrus.WithFields(logrus.Fields{ 58 | "fullnode": fullnode, 59 | "space": space, 60 | "method": method, 61 | "args": args, 62 | }) 63 | 64 | logger.Debug("RPC enter") 65 | 66 | start := time.Now() 67 | err := handler(ctx, result, method, args...) 68 | logger = logger.WithField("elapsed", time.Since(start)) 69 | 70 | if err != nil { 71 | logger = logger.WithError(err) 72 | } else if logrus.IsLevelEnabled(logrus.TraceLevel) { 73 | logger = logger.WithField("result", result) 74 | } 75 | 76 | logger.Debug("RPC leave") 77 | 78 | return err 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /util/rpc/config.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/Conflux-Chain/go-conflux-util/viper" 7 | ) 8 | 9 | var ( 10 | cfxClientCfg clientConfig 11 | ethClientCfg clientConfig 12 | ) 13 | 14 | type clientConfig struct { 15 | WS string 16 | Http string 17 | Retry int 18 | RetryInterval time.Duration `default:"1s"` 19 | RequestTimeout time.Duration `default:"3s"` 20 | MaxConnsPerHost int `default:"1024"` 21 | } 22 | 23 | type ClientOptioner interface { 24 | SetRetryCount(retry int) 25 | SetRetryInterval(retryInterval time.Duration) 26 | SetRequestTimeout(reqTimeout time.Duration) 27 | SetMaxConnsPerHost(maxConns int) 28 | SetHookMetrics(hook bool) 29 | } 30 | 31 | type baseClientOption struct { 32 | hookMetrics bool 33 | } 34 | 35 | func (o *baseClientOption) SetHookMetrics(hook bool) { 36 | o.hookMetrics = hook 37 | } 38 | 39 | type ClientOption func(opt ClientOptioner) 40 | 41 | func WithClientRetryCount(retry int) ClientOption { 42 | return func(opt ClientOptioner) { 43 | opt.SetRetryCount(retry) 44 | } 45 | } 46 | 47 | func WithClientRequestTimeout(reqTimeout time.Duration) ClientOption { 48 | return func(opt ClientOptioner) { 49 | opt.SetRequestTimeout(reqTimeout) 50 | } 51 | } 52 | 53 | func WithClientRetryInterval(retryInterval time.Duration) ClientOption { 54 | return func(opt ClientOptioner) { 55 | opt.SetRetryInterval(retryInterval) 56 | } 57 | } 58 | 59 | func WithClientMaxConnsPerHost(maxConns int) ClientOption { 60 | return func(opt ClientOptioner) { 61 | opt.SetMaxConnsPerHost(maxConns) 62 | } 63 | } 64 | 65 | func WithClientHookMetrics(hook bool) ClientOption { 66 | return func(opt ClientOptioner) { 67 | opt.SetHookMetrics(hook) 68 | } 69 | } 70 | 71 | func init() { 72 | viper.MustUnmarshalKey("cfx", &cfxClientCfg) 73 | viper.MustUnmarshalKey("eth", ðClientCfg) 74 | } 75 | -------------------------------------------------------------------------------- /util/rpc/handlers/handler.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "net/http" 5 | ) 6 | 7 | type Middleware func(next http.Handler) http.Handler 8 | 9 | type CtxKey string 10 | 11 | const ( 12 | CtxKeyRealIP = CtxKey("Infura-Real-IP") 13 | CtxKeyRateRegistry = CtxKey("Infura-Rate-Limit-Registry") 14 | CtxAccessToken = CtxKey("Infura-Access-Token") 15 | ) 16 | -------------------------------------------------------------------------------- /util/rpc/handlers/ip.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "net" 7 | "net/http" 8 | "net/url" 9 | "strings" 10 | ) 11 | 12 | // Remote IP Address with Go: 13 | // https://husobee.github.io/golang/ip-address/2015/12/17/remote-ip-go.html 14 | 15 | //ipRange - a structure that holds the start and end of a range of ip addresses 16 | type ipRange struct { 17 | start net.IP 18 | end net.IP 19 | } 20 | 21 | // inRange - check to see if a given ip address is within a range given 22 | func inRange(r ipRange, ipAddress net.IP) bool { 23 | // strcmp type byte comparison 24 | if bytes.Compare(ipAddress, r.start) >= 0 && bytes.Compare(ipAddress, r.end) < 0 { 25 | return true 26 | } 27 | return false 28 | } 29 | 30 | var privateRanges = []ipRange{ 31 | { 32 | start: net.ParseIP("10.0.0.0"), 33 | end: net.ParseIP("10.255.255.255"), 34 | }, 35 | { 36 | start: net.ParseIP("100.64.0.0"), 37 | end: net.ParseIP("100.127.255.255"), 38 | }, 39 | { 40 | start: net.ParseIP("172.16.0.0"), 41 | end: net.ParseIP("172.31.255.255"), 42 | }, 43 | { 44 | start: net.ParseIP("192.0.0.0"), 45 | end: net.ParseIP("192.0.0.255"), 46 | }, 47 | { 48 | start: net.ParseIP("192.168.0.0"), 49 | end: net.ParseIP("192.168.255.255"), 50 | }, 51 | { 52 | start: net.ParseIP("198.18.0.0"), 53 | end: net.ParseIP("198.19.255.255"), 54 | }, 55 | } 56 | 57 | // isPrivateSubnet - check to see if this ip is in a private subnet 58 | func isPrivateSubnet(ipAddress net.IP) bool { 59 | // my use case is only concerned with ipv4 atm 60 | if ipCheck := ipAddress.To4(); ipCheck != nil { 61 | // iterate over all our ranges 62 | for _, r := range privateRanges { 63 | // check if this ip is in a private range 64 | if inRange(r, ipAddress) { 65 | return true 66 | } 67 | } 68 | } 69 | return false 70 | } 71 | 72 | // GetIPAddress returns the remote IP address. 73 | func GetIPAddress(r *http.Request) string { 74 | for _, h := range []string{"X-Forwarded-For", "X-Real-Ip"} { 75 | addresses := strings.Split(r.Header.Get(h), ",") 76 | // march from right to left until we get a public address 77 | // that will be the address right before our proxy. 78 | for i := len(addresses) - 1; i >= 0; i-- { 79 | ip := strings.TrimSpace(addresses[i]) 80 | // header can contain spaces too, strip those out. 81 | realIP := net.ParseIP(ip) 82 | if !realIP.IsGlobalUnicast() || isPrivateSubnet(realIP) { 83 | // bad address, go to next 84 | continue 85 | } 86 | return ip 87 | } 88 | } 89 | 90 | if idx := strings.Index(r.RemoteAddr, ":"); idx != -1 { 91 | return r.RemoteAddr[:idx] 92 | } 93 | 94 | return r.RemoteAddr 95 | } 96 | 97 | func RealIP(next http.Handler) http.Handler { 98 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 99 | ip := GetIPAddress(r) 100 | ctx := context.WithValue(r.Context(), CtxKeyRealIP, ip) 101 | next.ServeHTTP(w, r.WithContext(ctx)) 102 | }) 103 | } 104 | 105 | func GetIPAddressFromContext(ctx context.Context) (string, bool) { 106 | val, ok := ctx.Value(CtxKeyRealIP).(string) 107 | return val, ok 108 | } 109 | 110 | func GetAccessToken(r *http.Request) string { 111 | if r == nil || r.URL == nil { 112 | return "" 113 | } 114 | 115 | // access token path pattern: 116 | // http://example.com/${accessToken}... 117 | key := strings.TrimLeft(r.URL.EscapedPath(), "/") 118 | if idx := strings.Index(key, "/"); idx > 0 { 119 | key = key[:idx] 120 | } 121 | 122 | if key, err := url.PathUnescape(key); err == nil { 123 | return key 124 | } 125 | 126 | return "" 127 | } 128 | 129 | func GetAccessTokenFromContext(ctx context.Context) (string, bool) { 130 | val, ok := ctx.Value(CtxAccessToken).(string) 131 | return val, ok 132 | } 133 | -------------------------------------------------------------------------------- /util/rpc/handlers/rate_limit.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | 7 | "github.com/scroll-tech/rpc-gateway/util/rate" 8 | ) 9 | 10 | func RateLimit(registry *rate.Registry) Middleware { 11 | return func(next http.Handler) http.Handler { 12 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 13 | ctx := context.WithValue(r.Context(), CtxKeyRateRegistry, registry) 14 | next.ServeHTTP(w, r.WithContext(ctx)) 15 | }) 16 | } 17 | } 18 | 19 | func RateLimitAllow(ctx context.Context, name string, n int) bool { 20 | registry, ok := ctx.Value(CtxKeyRateRegistry).(*rate.Registry) 21 | if !ok { 22 | return true 23 | } 24 | 25 | ip, ok := GetIPAddressFromContext(ctx) 26 | if !ok { // ip is mandatory 27 | return true 28 | } 29 | 30 | // access token is optional 31 | token, _ := GetAccessTokenFromContext(ctx) 32 | 33 | vc := &rate.VisitContext{ 34 | Ip: ip, Resource: name, Key: token, 35 | } 36 | 37 | limiter, ok := registry.Get(vc) 38 | if !ok { 39 | return true 40 | } 41 | 42 | return limiter.Allow(vc, n) 43 | } 44 | -------------------------------------------------------------------------------- /util/rpc/middlewares/billing.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "github.com/Conflux-Chain/go-conflux-util/viper" 5 | web3pay "github.com/Conflux-Chain/web3pay-service/client" 6 | "github.com/openweb3/go-rpc-provider" 7 | "github.com/scroll-tech/rpc-gateway/util/rpc/handlers" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | func MustNewWeb3PayClient() (*web3pay.Client, bool) { 12 | var config struct { 13 | web3pay.ClientConfig `mapstructure:",squash"` 14 | Enabled bool 15 | } 16 | viper.MustUnmarshalKey("web3pay", &config) 17 | 18 | if !config.Enabled { 19 | return nil, false 20 | } 21 | 22 | client, err := web3pay.NewClient(config.ClientConfig) 23 | if err != nil { 24 | logrus.WithError(err).Fatal("Failed to new Web3Pay client") 25 | } 26 | 27 | return client, true 28 | } 29 | 30 | func Billing(client *web3pay.Client) rpc.HandleCallMsgMiddleware { 31 | mwoption := web3pay.NewOw3BillingMiddlewareOptionWithClient(client) 32 | mwoption.ApiKeyProvider = handlers.GetAccessTokenFromContext 33 | return web3pay.Openweb3BillingMiddleware(mwoption) 34 | } 35 | -------------------------------------------------------------------------------- /util/rpc/middlewares/log.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/openweb3/go-rpc-provider" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | func LogBatch(next rpc.HandleBatchFunc) rpc.HandleBatchFunc { 12 | return func(ctx context.Context, msgs []*rpc.JsonRpcMessage) []*rpc.JsonRpcMessage { 13 | if !logrus.IsLevelEnabled(logrus.DebugLevel) { 14 | return next(ctx, msgs) 15 | } 16 | 17 | logrus.WithField("batch", len(msgs)).Debug("Batch RPC enter") 18 | 19 | start := time.Now() 20 | resp := next(ctx, msgs) 21 | 22 | logrus.WithFields(logrus.Fields{ 23 | "batch": len(resp), 24 | "elapsed": time.Since(start), 25 | }).Debug("Batch RPC leave") 26 | 27 | return resp 28 | } 29 | } 30 | 31 | func Log(next rpc.HandleCallMsgFunc) rpc.HandleCallMsgFunc { 32 | return func(ctx context.Context, msg *rpc.JsonRpcMessage) *rpc.JsonRpcMessage { 33 | if !logrus.IsLevelEnabled(logrus.DebugLevel) { 34 | return next(ctx, msg) 35 | } 36 | 37 | logger := logrus.WithField("input", msg) 38 | logger.Debug("RPC enter") 39 | 40 | start := time.Now() 41 | resp := next(ctx, msg) 42 | logger = logger.WithField("elapsed", time.Since(start)) 43 | 44 | if resp.Error != nil { 45 | logger = logger.WithField(logrus.ErrorKey, resp.Error.Error()) 46 | } else if logrus.IsLevelEnabled(logrus.TraceLevel) { 47 | logger = logger.WithField("output", resp) 48 | } 49 | 50 | logger.Debug("RPC leave") 51 | 52 | return resp 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /util/rpc/middlewares/metrics.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/openweb3/go-rpc-provider" 8 | "github.com/scroll-tech/rpc-gateway/util/metrics" 9 | ) 10 | 11 | func MetricsBatch(next rpc.HandleBatchFunc) rpc.HandleBatchFunc { 12 | return func(ctx context.Context, msgs []*rpc.JsonRpcMessage) []*rpc.JsonRpcMessage { 13 | start := time.Now() 14 | resp := next(ctx, msgs) 15 | 16 | metrics.Registry.RPC.BatchLatency().Update(time.Since(start).Nanoseconds()) 17 | metrics.Registry.RPC.BatchSize().Update(int64(len(msgs))) 18 | 19 | return resp 20 | } 21 | } 22 | 23 | func Metrics(next rpc.HandleCallMsgFunc) rpc.HandleCallMsgFunc { 24 | return func(ctx context.Context, msg *rpc.JsonRpcMessage) *rpc.JsonRpcMessage { 25 | start := time.Now() 26 | resp := next(ctx, msg) 27 | metrics.Registry.RPC.UpdateDuration(msg.Method, resp.Error, start) 28 | return resp 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /util/rpc/middlewares/rate_limit.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | web3pay "github.com/Conflux-Chain/web3pay-service/client" 8 | "github.com/openweb3/go-rpc-provider" 9 | "github.com/scroll-tech/rpc-gateway/util/rpc/handlers" 10 | ) 11 | 12 | var ( 13 | errRateLimit = errors.New("too many requests") 14 | ) 15 | 16 | func RateLimitBatch(next rpc.HandleBatchFunc) rpc.HandleBatchFunc { 17 | return func(ctx context.Context, msgs []*rpc.JsonRpcMessage) []*rpc.JsonRpcMessage { 18 | if handlers.RateLimitAllow(ctx, "rpc_batch", len(msgs)) { 19 | return next(ctx, msgs) 20 | } 21 | 22 | var responses []*rpc.JsonRpcMessage 23 | for _, v := range msgs { 24 | responses = append(responses, v.ErrorResponse(errRateLimit)) 25 | } 26 | 27 | return responses 28 | } 29 | } 30 | 31 | func RateLimit(next rpc.HandleCallMsgFunc) rpc.HandleCallMsgFunc { 32 | return func(ctx context.Context, msg *rpc.JsonRpcMessage) *rpc.JsonRpcMessage { 33 | // check billing status 34 | if bs, ok := web3pay.BillingStatusFromContext(ctx); ok && bs.Success() { 35 | // serve directly on billing successfully, otherwise fallback to rate limit 36 | return next(ctx, msg) 37 | } 38 | 39 | // overall rate limit 40 | if !handlers.RateLimitAllow(ctx, "rpc_all", 1) { 41 | return msg.ErrorResponse(errRateLimit) 42 | } 43 | 44 | // single method rate limit 45 | if !handlers.RateLimitAllow(ctx, msg.Method, 1) { 46 | return msg.ErrorResponse(errRateLimit) 47 | } 48 | 49 | return next(ctx, msg) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /util/rpc/middlewares/recover.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "runtime/debug" 7 | 8 | "github.com/openweb3/go-rpc-provider" 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | var ( 13 | errMiddlewareCrashed = errors.New("RPC middleware crashed") 14 | ) 15 | 16 | func Recover(next rpc.HandleCallMsgFunc) rpc.HandleCallMsgFunc { 17 | return func(ctx context.Context, msg *rpc.JsonRpcMessage) (resp *rpc.JsonRpcMessage) { 18 | defer func() { 19 | if err := recover(); err != nil { 20 | resp = msg.ErrorResponse(errMiddlewareCrashed) 21 | 22 | debug.PrintStack() 23 | 24 | logrus.WithFields(logrus.Fields{ 25 | "inputMsg": newHumanReadableRpcMessage(msg), 26 | "panicErr": err, 27 | }).Error("RPC middleware panic recovered") 28 | } 29 | }() 30 | 31 | return next(ctx, msg) 32 | } 33 | } 34 | 35 | type humanReadableRpcMessage struct { 36 | Version string 37 | ID string 38 | Method string 39 | Params string 40 | Error error 41 | Result string 42 | } 43 | 44 | func newHumanReadableRpcMessage(msg *rpc.JsonRpcMessage) *humanReadableRpcMessage { 45 | return &humanReadableRpcMessage{ 46 | ID: string(msg.ID), 47 | Version: msg.Version, 48 | Method: msg.Method, 49 | Params: string(msg.Params), 50 | Error: msg.Error, 51 | Result: string(msg.Result), 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /util/rpc/server.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "net/http" 7 | "sync" 8 | "time" 9 | 10 | "github.com/ethereum/go-ethereum/node" 11 | "github.com/openweb3/go-rpc-provider" 12 | "github.com/scroll-tech/rpc-gateway/util/rpc/handlers" 13 | "github.com/sirupsen/logrus" 14 | "github.com/spf13/viper" 15 | ) 16 | 17 | type Protocol string 18 | 19 | const ( 20 | ProtocolHttp = "HTTP" 21 | ProtocolWS = "WS" 22 | ) 23 | 24 | var ( 25 | // DefaultShutdownTimeout is default timeout to shutdown RPC server. 26 | DefaultShutdownTimeout = 3 * time.Second 27 | 28 | // defaultWsPingInterval the default websocket ping/pong heartbeating interval. 29 | defaultWsPingInterval = 10 * time.Second 30 | ) 31 | 32 | // Server serves JSON RPC services. 33 | type Server struct { 34 | name string 35 | servers map[Protocol]*http.Server 36 | } 37 | 38 | // MustNewServer creates an instance of Server with specified RPC services. 39 | func MustNewServer(name string, rpcs map[string]interface{}, middlewares ...handlers.Middleware) *Server { 40 | handler := rpc.NewServer() 41 | servedApis := make([]string, 0, len(rpcs)) 42 | 43 | for namespace, impl := range rpcs { 44 | if err := handler.RegisterName(namespace, impl); err != nil { 45 | logrus.WithError(err).WithField("namespace", namespace).Fatal("Failed to register rpc service") 46 | } 47 | servedApis = append(servedApis, namespace) 48 | } 49 | 50 | logrus.WithFields(logrus.Fields{ 51 | "APIs": servedApis, 52 | "name": name, 53 | }).Info("RPC server APIs registered") 54 | 55 | httpServer := http.Server{ 56 | Handler: node.NewHTTPHandlerStack(handler, []string{"*"}, []string{"*"}), 57 | } 58 | 59 | viper.SetDefault("rpc.wsPingInterval", defaultWsPingInterval) 60 | wsServer := http.Server{ 61 | Handler: handler.WebsocketHandler([]string{"*"}, rpc.WebsocketOption{ 62 | WsPingInterval: viper.GetDuration("rpc.wsPingInterval"), 63 | }), 64 | } 65 | 66 | for i := len(middlewares) - 1; i >= 0; i-- { 67 | httpServer.Handler = middlewares[i](httpServer.Handler) 68 | wsServer.Handler = middlewares[i](wsServer.Handler) 69 | } 70 | 71 | return &Server{ 72 | name: name, 73 | servers: map[Protocol]*http.Server{ 74 | ProtocolHttp: &httpServer, 75 | ProtocolWS: &wsServer, 76 | }, 77 | } 78 | } 79 | 80 | // MustServe serves RPC server in blocking way or panics if failed. 81 | func (s *Server) MustServe(endpoint string, protocol Protocol) { 82 | logger := logrus.WithFields(logrus.Fields{ 83 | "name": s.name, 84 | "endpoint": endpoint, 85 | "protocol": protocol, 86 | }) 87 | 88 | server, ok := s.servers[protocol] 89 | if !ok { 90 | logger.Fatal("RPC protocol unsupported") 91 | } 92 | 93 | listener, err := net.Listen("tcp", endpoint) 94 | if err != nil { 95 | logger.WithError(err).Fatal("Failed to listen to endpoint") 96 | } 97 | 98 | logger.Info("JSON RPC server started") 99 | 100 | server.Serve(listener) 101 | } 102 | 103 | // MustServeGraceful serves RPC server in a goroutine until graceful shutdown. 104 | func (s *Server) MustServeGraceful( 105 | ctx context.Context, wg *sync.WaitGroup, endpoint string, protocol Protocol, 106 | ) { 107 | wg.Add(1) 108 | defer wg.Done() 109 | 110 | go s.MustServe(endpoint, protocol) 111 | 112 | <-ctx.Done() 113 | 114 | s.shutdown(protocol) 115 | } 116 | 117 | func (s *Server) shutdown(protocol Protocol) { 118 | ctx, cancel := context.WithTimeout(context.Background(), DefaultShutdownTimeout) 119 | defer cancel() 120 | 121 | logger := logrus.WithFields(logrus.Fields{ 122 | "name": s.name, 123 | "protocol": protocol, 124 | }) 125 | 126 | if err := s.servers[protocol].Shutdown(ctx); err != nil { 127 | logger.WithError(err).Error("Failed to shutdown RPC server") 128 | } else { 129 | logger.Info("Succeed to shutdown RPC server") 130 | } 131 | } 132 | 133 | func (s *Server) String() string { return s.name } 134 | -------------------------------------------------------------------------------- /util/types.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "reflect" 4 | 5 | // Helper function to check if interface value is nil, since "i == nil" checks nil interface case only. 6 | // Refer to https://mangatmodi.medium.com/go-check-nil-interface-the-right-way-d142776edef1 for more details. 7 | func IsInterfaceValNil(i interface{}) bool { 8 | if i == nil { 9 | return true 10 | } 11 | 12 | switch reflect.TypeOf(i).Kind() { 13 | case reflect.Ptr, reflect.Map, reflect.Array, reflect.Chan, reflect.Slice, reflect.Func: 14 | return reflect.ValueOf(i).IsNil() 15 | } 16 | 17 | return false 18 | } 19 | --------------------------------------------------------------------------------