├── services ├── bidcollect │ ├── website │ │ ├── static │ │ │ ├── styles.css │ │ │ └── favicon │ │ │ │ ├── favicon.ico │ │ │ │ ├── favicon-16x16.png │ │ │ │ ├── favicon-32x32.png │ │ │ │ ├── apple-touch-icon.png │ │ │ │ ├── android-chrome-192x192.png │ │ │ │ └── android-chrome-512x512.png │ │ ├── templates │ │ │ ├── index_root.html │ │ │ ├── index_files.html │ │ │ └── base.html │ │ ├── htmldata.go │ │ ├── utils.go │ │ ├── devserver.go │ │ └── generator.go │ ├── types │ │ ├── consts.go │ │ ├── types_test.go │ │ └── types.go │ ├── webserver │ │ ├── handler.go │ │ └── webserver.go │ ├── bidcollector.go │ ├── ultrasound-stream.go │ ├── data-api-poller.go │ ├── bid-processor.go │ └── getheader-poller.go └── website │ ├── types.go │ ├── html.go │ ├── templates │ ├── base.html │ └── daily-stats.html │ ├── utils_test.go │ └── webserver_data.go ├── static ├── images │ ├── cows1.jpg │ ├── robot1.jpg │ ├── robot4.jpg │ ├── ogimage.png │ ├── robot4-c.png │ ├── logo-space.png │ └── logo1-square.png ├── favicon │ ├── favicon.ico │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ ├── apple-touch-icon.png │ ├── android-chrome-192x192.png │ ├── android-chrome-512x512.png │ └── site.webmanifest ├── README.md └── sortable.min.js ├── docs ├── img │ └── bidcollect-overview.png └── 2024-06_bidcollect.md ├── vars ├── relays.go ├── builder_addresses.go ├── builder_aliases_test.go ├── vars.go ├── builder_aliases.go └── config.go ├── scripts ├── bidcollect │ ├── s3 │ │ ├── get-folders.sh │ │ ├── get-files.sh │ │ └── upload-file-to-r2.sh │ ├── bids-combine-and-upload-yesterday.sh │ └── bids-combine-and-upload.sh ├── backfill-stats.sh ├── backfill.sh ├── send-pushover-notification.sh └── website-healthcheck.sh ├── common ├── errors.go ├── utils_test.go ├── logging.go ├── ultrasoundbid.go ├── ultrasoundbid_test.go ├── eth_node.go ├── request.go ├── relayentry.go ├── ultrasoundbid_encoding.go └── utils.go ├── main.go ├── database ├── migrations │ ├── migration.go │ ├── 004_add_block_timestamp.go │ ├── 002_add_blob_count.go │ ├── 003_add_blob_index.go │ └── 001_init_database.go ├── util_test.go ├── database_test.go ├── vars │ └── tables.go ├── util.go ├── typesconv.go └── types.go ├── cmd ├── version.go ├── service │ ├── service.go │ ├── website.go │ ├── bidcollect.go │ └── backfill_runner.go ├── core │ ├── core.go │ └── data-api-backfill.go ├── util │ ├── util.go │ └── update-extradata.go └── root.go ├── .github ├── pull_request_template.md └── workflows │ ├── checks.yml │ └── release.yml ├── .env.example ├── .gitignore ├── staticcheck.conf ├── Dockerfile ├── config-hoodi.yaml ├── config-mainnet.yaml ├── .golangci.yaml ├── Makefile ├── go.mod └── README.md /services/bidcollect/website/static/styles.css: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /static/images/cows1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/images/cows1.jpg -------------------------------------------------------------------------------- /static/images/robot1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/images/robot1.jpg -------------------------------------------------------------------------------- /static/images/robot4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/images/robot4.jpg -------------------------------------------------------------------------------- /static/favicon/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/favicon/favicon.ico -------------------------------------------------------------------------------- /static/images/ogimage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/images/ogimage.png -------------------------------------------------------------------------------- /static/images/robot4-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/images/robot4-c.png -------------------------------------------------------------------------------- /static/images/logo-space.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/images/logo-space.png -------------------------------------------------------------------------------- /static/images/logo1-square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/images/logo1-square.png -------------------------------------------------------------------------------- /docs/img/bidcollect-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/docs/img/bidcollect-overview.png -------------------------------------------------------------------------------- /static/favicon/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/favicon/favicon-16x16.png -------------------------------------------------------------------------------- /static/favicon/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/favicon/favicon-32x32.png -------------------------------------------------------------------------------- /static/favicon/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/favicon/apple-touch-icon.png -------------------------------------------------------------------------------- /static/favicon/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/favicon/android-chrome-192x192.png -------------------------------------------------------------------------------- /static/favicon/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/static/favicon/android-chrome-512x512.png -------------------------------------------------------------------------------- /services/bidcollect/website/static/favicon/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/services/bidcollect/website/static/favicon/favicon.ico -------------------------------------------------------------------------------- /static/README.md: -------------------------------------------------------------------------------- 1 | https://purecss.io/start/ 2 | https://purecss.io/layouts/marketing/ 3 | https://github.com/pure-css/pure/tree/master/site/static/layouts/marketing -------------------------------------------------------------------------------- /services/bidcollect/website/static/favicon/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/services/bidcollect/website/static/favicon/favicon-16x16.png -------------------------------------------------------------------------------- /services/bidcollect/website/static/favicon/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/services/bidcollect/website/static/favicon/favicon-32x32.png -------------------------------------------------------------------------------- /services/bidcollect/website/static/favicon/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/services/bidcollect/website/static/favicon/apple-touch-icon.png -------------------------------------------------------------------------------- /vars/relays.go: -------------------------------------------------------------------------------- 1 | package vars 2 | 3 | // Relay URLs - populated from config file 4 | var ( 5 | RelayFlashbots string 6 | RelayUltrasound string 7 | RelayURLs []string 8 | ) 9 | -------------------------------------------------------------------------------- /services/bidcollect/website/static/favicon/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/services/bidcollect/website/static/favicon/android-chrome-192x192.png -------------------------------------------------------------------------------- /services/bidcollect/website/static/favicon/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashbots/relayscan/HEAD/services/bidcollect/website/static/favicon/android-chrome-512x512.png -------------------------------------------------------------------------------- /scripts/bidcollect/s3/get-folders.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | aws --profile r2 s3 ls s3://relayscan-bidarchive/$1 --endpoint-url "https://${CLOUDFLARE_R2_ACCOUNT_ID}.r2.cloudflarestorage.com" | awk '{ print $2 }' -------------------------------------------------------------------------------- /vars/builder_addresses.go: -------------------------------------------------------------------------------- 1 | package vars 2 | 3 | // BuilderAddresses maps coinbase addresses to their owned addresses 4 | // Populated from config file 5 | var BuilderAddresses map[string]map[string]bool 6 | -------------------------------------------------------------------------------- /common/errors.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | var ( 9 | ErrMissingRelayPubkey = fmt.Errorf("missing relay public key") 10 | ErrURLEmpty = errors.New("url is empty") 11 | ) 12 | -------------------------------------------------------------------------------- /scripts/backfill-stats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | dir=$( dirname -- "$0"; ) 4 | cd $dir 5 | cd .. 6 | source .env.prod 7 | ./relayscan core update-builder-stats --backfill --daily --verbose 2>&1 | /usr/bin/tee -a /var/log/relayscan-stats.log 8 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/flashbots/relayscan/cmd" 5 | "github.com/flashbots/relayscan/vars" 6 | ) 7 | 8 | var Version = "dev" // is set during build process 9 | 10 | func main() { 11 | vars.Version = Version 12 | cmd.Execute() 13 | } 14 | -------------------------------------------------------------------------------- /scripts/backfill.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | dir=$( dirname -- "$0"; ) 4 | cd $dir 5 | cd .. 6 | source .env.prod 7 | ./relayscan core data-api-backfill 2>&1 | /usr/bin/tee /var/log/relayscan.log 8 | ./relayscan core check-payload-value 2>&1 | /usr/bin/tee -a /var/log/relayscan.log 9 | -------------------------------------------------------------------------------- /scripts/bidcollect/s3/get-files.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # require one argument 3 | if [ $# -ne 1 ]; then 4 | echo "Usage: $0 " 5 | exit 1 6 | fi 7 | 8 | aws --profile r2 s3 ls s3://relayscan-bidarchive/$1 --endpoint-url "https://${CLOUDFLARE_R2_ACCOUNT_ID}.r2.cloudflarestorage.com" -------------------------------------------------------------------------------- /scripts/send-pushover-notification.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ -z "$1" ]; then 3 | echo "Usage: $0 " 4 | exit 1 5 | fi 6 | 7 | curl -s \ 8 | --form-string "token=$PUSHOVER_APP_TOKEN" \ 9 | --form-string "user=$PUSHOVER_APP_KEY" \ 10 | --form-string "message=$1" \ 11 | https://api.pushover.net/1/messages.json 12 | -------------------------------------------------------------------------------- /services/website/types.go: -------------------------------------------------------------------------------- 1 | package website 2 | 3 | import "github.com/flashbots/relayscan/database" 4 | 5 | type HTTPErrorResp struct { 6 | Code int `json:"code"` 7 | Message string `json:"message"` 8 | } 9 | 10 | type TopBuilderDisplayEntry struct { 11 | Info *database.TopBuilderEntry `json:"info"` 12 | Children []*database.TopBuilderEntry `json:"children"` 13 | } 14 | -------------------------------------------------------------------------------- /database/migrations/migration.go: -------------------------------------------------------------------------------- 1 | // Package migrations contains all the migration files 2 | package migrations 3 | 4 | import ( 5 | migrate "github.com/rubenv/sql-migrate" 6 | ) 7 | 8 | var Migrations = migrate.MemoryMigrationSource{ 9 | Migrations: []*migrate.Migration{ 10 | Migration001InitDatabase, 11 | Migration002AddBlobCount, 12 | Migration003AddBlobIndexes, 13 | Migration004AddBlockTimestamp, 14 | }, 15 | } 16 | -------------------------------------------------------------------------------- /database/util_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestSlotTimeConversion(t *testing.T) { 10 | slot := 8901362 11 | slotTime := slotToTime(uint64(slot)) //nolint:gosec 12 | require.Equal(t, 1713640367, int(slotTime.Unix())) 13 | convertedSlot := timeToSlot(slotTime) 14 | require.Equal(t, uint64(slot), convertedSlot) //nolint:gosec 15 | } 16 | -------------------------------------------------------------------------------- /cmd/version.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/flashbots/relayscan/vars" 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var versionCmd = &cobra.Command{ 11 | Use: "version", 12 | Short: "Print the version number the relay application", 13 | Long: `All software has versions. This is the boost relay's`, 14 | Run: func(cmd *cobra.Command, args []string) { 15 | fmt.Printf("relayscan %s\n", vars.Version) 16 | }, 17 | } 18 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## 📝 Summary 2 | 3 | 4 | 5 | ## ⛱ Motivation and Context 6 | 7 | 8 | 9 | ## 📚 References 10 | 11 | 12 | 13 | --- 14 | 15 | ## ✅ I have run these commands 16 | 17 | * [ ] `make lint` 18 | * [ ] `make test` 19 | * [ ] `go mod tidy` 20 | -------------------------------------------------------------------------------- /database/database_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | // var ( 4 | // runDBTests = os.Getenv("RUN_DB_TESTS") == "1" 5 | // testDBDSN = common.GetEnv("TEST_DB_DSN", "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable") 6 | // ) 7 | 8 | // func TestPostgresStuff(t *testing.T) { 9 | // if !runDBTests { 10 | // t.Skip("Skipping database tests") 11 | // } 12 | 13 | // db, err := NewDatabaseService(testDBDSN) 14 | // require.NoError(t, err) 15 | 16 | // } 17 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | export DB_TABLE_PREFIX="mainnet" 2 | export POSTGRES_DSN="postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable" 3 | export ETH_NODE_URI="http://localhost:8545" 4 | # export ETH_NODE_BACKUP_URI="https://mainnet.infura.io/v3/API_KEY" # optional 5 | 6 | # Beacon node is required for bid collection 7 | # export BEACON_URI="http://localhost:3500" 8 | 9 | # Genesis time is used to calculate slot numbers, if use devnet/custom chain, set this 10 | # export GENESIS="1606824023" 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | .env* 17 | !/.env.example 18 | 19 | /relayscan 20 | /backfill.sh 21 | /tmp/ 22 | /static_dev/ 23 | /relayscan 24 | /deploy* 25 | /test.csv 26 | /csv/ 27 | /build/ -------------------------------------------------------------------------------- /vars/builder_aliases_test.go: -------------------------------------------------------------------------------- 1 | package vars 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestBuilderAliases(t *testing.T) { 10 | require.Equal(t, "penguinbuild.org", BuilderNameFromExtraData("@penguinbuild.org")) 11 | require.Equal(t, "foobar", BuilderNameFromExtraData("foobar")) 12 | require.Equal(t, "builder0x69", BuilderNameFromExtraData("@builder0x69")) 13 | require.Equal(t, "bob the builder", BuilderNameFromExtraData("s1e2xf")) 14 | } 15 | -------------------------------------------------------------------------------- /services/bidcollect/website/templates/index_root.html: -------------------------------------------------------------------------------- 1 | {{ define "content" }} 2 | 3 |
4 |
5 |

Ethereum Mainnet

6 | 11 | 12 | 13 |
14 |
15 |
16 |

17 |

The data is dedicated to the public domain under the CC-0 license.

18 |

19 | {{ end }} -------------------------------------------------------------------------------- /database/migrations/004_add_block_timestamp.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "github.com/flashbots/relayscan/database/vars" 5 | migrate "github.com/rubenv/sql-migrate" 6 | ) 7 | 8 | var migration004SQL = ` 9 | ALTER TABLE ` + vars.TableDataAPIPayloadDelivered + ` ADD block_timestamp timestamp DEFAULT NULL; 10 | ` 11 | 12 | var Migration004AddBlockTimestamp = &migrate.Migration{ 13 | Id: "004-add-block-timestamp", 14 | Up: []string{migration004SQL}, 15 | 16 | DisableTransactionUp: false, 17 | DisableTransactionDown: true, 18 | } 19 | -------------------------------------------------------------------------------- /static/favicon/site.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "short_name": "", 4 | "icons": [ 5 | { 6 | "src": "/static/favicon/android-chrome-192x192.png", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | }, 10 | { 11 | "src": "/static/favicon/android-chrome-512x512.png", 12 | "sizes": "512x512", 13 | "type": "image/png" 14 | } 15 | ], 16 | "theme_color": "#ffffff", 17 | "background_color": "#ffffff", 18 | "display": "standalone" 19 | } -------------------------------------------------------------------------------- /database/migrations/002_add_blob_count.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "github.com/flashbots/relayscan/database/vars" 5 | migrate "github.com/rubenv/sql-migrate" 6 | ) 7 | 8 | var migration002SQL = ` 9 | ALTER TABLE ` + vars.TableDataAPIPayloadDelivered + ` ADD num_blob_txs int DEFAULT NULL; 10 | ALTER TABLE ` + vars.TableDataAPIPayloadDelivered + ` ADD num_blobs int DEFAULT NULL; 11 | ` 12 | 13 | var Migration002AddBlobCount = &migrate.Migration{ 14 | Id: "002-add-blob-count", 15 | Up: []string{migration002SQL}, 16 | 17 | DisableTransactionUp: false, 18 | DisableTransactionDown: true, 19 | } 20 | -------------------------------------------------------------------------------- /scripts/bidcollect/s3/upload-file-to-r2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | src=$1 3 | target=$2 4 | if [ -z "$src" ]; then 5 | echo "Usage: $0 ["] 6 | exit 1 7 | fi 8 | 9 | # auto-fill target if not given 10 | if [ -z "$target" ]; then 11 | # remove "/mnt/data/relayscan-bidarchive/" prefix from src and make it the S3 prefix 12 | target="/ethereum/mainnet/${src#"/mnt/data/relayscan-bidarchive/"}" 13 | fi 14 | 15 | echo "uploading $src to S3 $target ..." 16 | aws --profile r2 s3 cp $src s3://relayscan-bidarchive$target --endpoint-url "https://${CLOUDFLARE_R2_ACCOUNT_ID}.r2.cloudflarestorage.com" 17 | -------------------------------------------------------------------------------- /cmd/service/service.go: -------------------------------------------------------------------------------- 1 | // Package service contains code for the service subcommands 2 | package service 3 | 4 | import ( 5 | "github.com/flashbots/relayscan/common" 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | var ( 10 | log = common.Logger 11 | beaconNodeURI string 12 | ) 13 | 14 | var ServiceCmd = &cobra.Command{ 15 | Use: "service", 16 | Short: "service subcommand", 17 | Run: func(cmd *cobra.Command, args []string) { 18 | _ = cmd.Help() 19 | }, 20 | } 21 | 22 | func init() { 23 | ServiceCmd.AddCommand(websiteCmd) 24 | ServiceCmd.AddCommand(bidCollectCmd) 25 | ServiceCmd.AddCommand(backfillRunnerCmd) 26 | } 27 | -------------------------------------------------------------------------------- /staticcheck.conf: -------------------------------------------------------------------------------- 1 | checks = ["all"] 2 | # checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"] 3 | initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS"] 4 | dot_import_whitelist = ["github.com/mmcloughlin/avo/build", "github.com/mmcloughlin/avo/operand", "github.com/mmcloughlin/avo/reg"] 5 | http_status_code_whitelist = ["200", "400", "404", "500"] 6 | -------------------------------------------------------------------------------- /cmd/core/core.go: -------------------------------------------------------------------------------- 1 | // Package core contains code for the core subcommands 2 | package core 3 | 4 | import ( 5 | "github.com/flashbots/relayscan/common" 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | var ( 10 | log = common.Logger 11 | check = common.Check 12 | numThreads uint64 = 10 13 | slot uint64 14 | ) 15 | 16 | var CoreCmd = &cobra.Command{ 17 | Use: "core", 18 | Short: "core subcommand", 19 | Run: func(cmd *cobra.Command, args []string) { 20 | _ = cmd.Help() 21 | }, 22 | } 23 | 24 | func init() { 25 | CoreCmd.AddCommand(checkPayloadValueCmd) 26 | CoreCmd.AddCommand(backfillDataAPICmd) 27 | CoreCmd.AddCommand(updateBuilderStatsCmd) 28 | } 29 | -------------------------------------------------------------------------------- /cmd/util/util.go: -------------------------------------------------------------------------------- 1 | // Package util contains code for the util subcommands 2 | package util 3 | 4 | import ( 5 | "github.com/flashbots/relayscan/common" 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | var ( 10 | log = common.Logger 11 | numThreads uint64 = 10 12 | 13 | // Printer for pretty printing numbers 14 | // printer = message.NewPrinter(language.English) 15 | 16 | ethNodeURI string 17 | ethNodeBackupURI string 18 | ) 19 | 20 | var UtilCmd = &cobra.Command{ 21 | Use: "util", 22 | Short: "util subcommand", 23 | Run: func(cmd *cobra.Command, args []string) { 24 | _ = cmd.Help() 25 | }, 26 | } 27 | 28 | func init() { 29 | UtilCmd.AddCommand(backfillExtradataCmd) 30 | } 31 | -------------------------------------------------------------------------------- /database/migrations/003_add_blob_index.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "github.com/flashbots/relayscan/database/vars" 5 | migrate "github.com/rubenv/sql-migrate" 6 | ) 7 | 8 | var migration003SQL = ` 9 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_num_blob_txs_idx ON ` + vars.TableDataAPIPayloadDelivered + `("num_blob_txs"); 10 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_num_blobs_idx ON ` + vars.TableDataAPIPayloadDelivered + `("num_blobs"); 11 | ` 12 | 13 | var Migration003AddBlobIndexes = &migrate.Migration{ 14 | Id: "003-add-blob-indexes", 15 | Up: []string{migration003SQL}, 16 | 17 | DisableTransactionUp: false, 18 | DisableTransactionDown: true, 19 | } 20 | -------------------------------------------------------------------------------- /services/bidcollect/types/consts.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | const ( 4 | SourceTypeGetHeader = 0 5 | SourceTypeDataAPI = 1 6 | SourceTypeUltrasoundStream = 2 7 | 8 | UltrasoundStreamDefaultURL = "ws://relay-builders-eu.ultrasound.money/ws/v1/top_bid" 9 | InitialBackoffSec = 5 10 | MaxBackoffSec = 120 11 | 12 | // bucketMinutes is the number of minutes to write into each CSV file (i.e. new file created for every X minutes bucket) 13 | BucketMinutes = 60 14 | 15 | // channel size for bid collector inputs 16 | BidCollectorInputChannelSize = 1000 17 | 18 | RedisChannel = "bidcollect/bids" 19 | ) 20 | 21 | var ( 22 | // csvFileEnding = relaycommon.GetEnv("CSV_FILE_END", "tsv") 23 | // csvSeparator = relaycommon.GetEnv("CSV_SEP", "\t") 24 | ) 25 | -------------------------------------------------------------------------------- /database/vars/tables.go: -------------------------------------------------------------------------------- 1 | // Package vars contains the database variables such as dynamic table names 2 | package vars 3 | 4 | import ( 5 | relaycommon "github.com/flashbots/mev-boost-relay/common" 6 | ) 7 | 8 | var ( 9 | tableBase = relaycommon.GetEnv("DB_TABLE_PREFIX", "rsdev") 10 | 11 | TableMigrations = tableBase + "_migrations" 12 | TableSignedBuilderBid = tableBase + "_signed_builder_bid" 13 | TableDataAPIPayloadDelivered = tableBase + "_data_api_payload_delivered" 14 | TableDataAPIBuilderBid = tableBase + "_data_api_builder_bid" 15 | TableError = tableBase + "_error" 16 | TableBlockBuilder = tableBase + "_blockbuilder" 17 | TableBlockBuilderInclusionStats = tableBase + "_blockbuilder_stats_inclusion" 18 | ) 19 | -------------------------------------------------------------------------------- /common/utils_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/dustin/go-humanize" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestSlotToTime(t *testing.T) { 11 | require.Equal(t, int64(1685923199), SlotToTime(6591598).Unix()) 12 | } 13 | 14 | func TestTimeToSlot(t *testing.T) { 15 | require.Equal(t, uint64(6591598), TimeToSlot(SlotToTime(6591598))) 16 | } 17 | 18 | func TestBytesFormat(t *testing.T) { 19 | n := uint64(795025173) 20 | 21 | s := humanize.Bytes(n) 22 | require.Equal(t, "795 MB", s) 23 | 24 | s = humanize.IBytes(n) 25 | require.Equal(t, "758 MiB", s) 26 | 27 | s = HumanBytes(n) 28 | require.Equal(t, "758 MB", s) 29 | 30 | s = HumanBytes(n * 10) 31 | require.Equal(t, "7.4 GB", s) 32 | 33 | s = HumanBytes(n / 1000) 34 | require.Equal(t, "776 KB", s) 35 | } 36 | -------------------------------------------------------------------------------- /common/logging.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/flashbots/relayscan/vars" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | var Logger = LogSetup(vars.LogJSON, vars.DefaultLogLevel, vars.LogDebug) 11 | 12 | func LogSetup(json bool, logLevel string, logDebug bool) *logrus.Entry { 13 | log := logrus.NewEntry(logrus.New()) 14 | log.Logger.SetOutput(os.Stdout) 15 | 16 | if json { 17 | log.Logger.SetFormatter(&logrus.JSONFormatter{}) 18 | } else { 19 | log.Logger.SetFormatter(&logrus.TextFormatter{ 20 | FullTimestamp: true, 21 | }) 22 | } 23 | 24 | if logDebug { 25 | logLevel = "debug" 26 | } 27 | if logLevel != "" { 28 | lvl, err := logrus.ParseLevel(logLevel) 29 | if err != nil { 30 | log.Fatalf("Invalid loglevel: %s", logLevel) 31 | } 32 | log.Logger.SetLevel(lvl) 33 | } 34 | return log 35 | } 36 | -------------------------------------------------------------------------------- /scripts/bidcollect/bids-combine-and-upload-yesterday.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This is a quick and dirty script to create a daily archive for yesterday and upload to Cloudflare R2 and AWS S3. 4 | # 5 | set -o errexit 6 | set -o nounset 7 | set -o pipefail 8 | if [[ "${TRACE-0}" == "1" ]]; then 9 | set -o xtrace 10 | fi 11 | 12 | # print current date 13 | echo "now: $(date)" 14 | 15 | # get yesterday's date 16 | d=$(date -d yesterday '+%Y-%m-%d') 17 | echo "upload for: $d" 18 | 19 | # change to project root directory 20 | cd "$(dirname "$0")" 21 | cd ../../ 22 | 23 | # load environment variables 24 | source .env.prod 25 | 26 | # archive and upload! 27 | ./scripts/bidcollect/bids-combine-and-upload.sh "/mnt/data/relayscan-bids/$d/" 28 | 29 | # update website 30 | echo "" 31 | echo "Updating the file listing website..." 32 | ./relayscan service bidcollect --build-website --build-website-upload 33 | -------------------------------------------------------------------------------- /common/ultrasoundbid.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import "math/big" 4 | 5 | // https://github.com/ultrasoundmoney/docs/blob/main/top-bid-websocket.md 6 | 7 | type ( 8 | U64 [8]byte 9 | Hash [32]byte 10 | PublicKey [48]byte 11 | Address [20]byte 12 | U256 [32]byte 13 | ) 14 | 15 | func (n *U256) String() string { 16 | return new(big.Int).SetBytes(ReverseBytes(n[:])).String() 17 | } 18 | 19 | type UltrasoundStreamBid struct { 20 | Timestamp uint64 `json:"timestamp"` 21 | Slot uint64 `json:"slot"` 22 | BlockNumber uint64 `json:"block_number"` 23 | BlockHash Hash `json:"block_hash" ssz-size:"32"` 24 | ParentHash Hash `json:"parent_hash" ssz-size:"32"` 25 | BuilderPubkey PublicKey `json:"builder_pubkey" ssz-size:"48"` 26 | FeeRecipient Address `json:"fee_recipient" ssz-size:"20"` 27 | Value U256 `json:"value" ssz-size:"32"` 28 | } 29 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | FROM golang:1.24 as builder 3 | ARG VERSION 4 | WORKDIR /build 5 | 6 | # Cache for the modules 7 | COPY go.mod ./ 8 | COPY go.sum ./ 9 | ENV GOCACHE=/root/.cache/go-build 10 | RUN --mount=type=cache,target=/root/.cache/go-build go mod download 11 | 12 | # Now adding all the code and start building 13 | ADD . . 14 | RUN --mount=type=cache,target=/root/.cache/go-build CGO_ENABLED=0 GOOS=linux go build -trimpath -ldflags "-s -X cmd.Version=${VERSION} -X main.Version=${VERSION}" -v -o relayscan main.go 15 | 16 | FROM alpine:latest 17 | WORKDIR /app 18 | COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 19 | COPY --from=builder /build/relayscan /app/relayscan 20 | COPY --from=builder /build/config-* /app/ 21 | COPY --from=builder /build/services/website/templates/ /app/services/website/templates/ 22 | COPY --from=builder /build/static/ /app/static/ 23 | 24 | ENV LISTEN_ADDR=":8080" 25 | EXPOSE 8080 26 | CMD ["/app/relayscan"] 27 | -------------------------------------------------------------------------------- /database/util.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "net/url" 5 | "time" 6 | 7 | "github.com/flashbots/relayscan/vars" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | func MustConnectPostgres(log *logrus.Entry, dsn string) *DatabaseService { 12 | dbURL, err := url.Parse(dsn) 13 | if err != nil { 14 | return nil 15 | } 16 | log.Infof("Connecting to Postgres database at %s%s ...", dbURL.Host, dbURL.Path) 17 | db, err := NewDatabaseService(dsn) 18 | if err != nil { 19 | log.WithError(err).Fatalf("Failed to connect to Postgres database at %s%s", dbURL.Host, dbURL.Path) 20 | } 21 | log.Infof("Connected to Postgres database at %s%s ✅", dbURL.Host, dbURL.Path) 22 | return db 23 | } 24 | 25 | func slotToTime(slot uint64) time.Time { 26 | timestamp := (slot * 12) + uint64(vars.Genesis) 27 | return time.Unix(int64(timestamp), 0).UTC() //nolint:gosec 28 | } 29 | 30 | func timeToSlot(t time.Time) uint64 { 31 | return uint64(t.UTC().Unix()-int64(vars.Genesis)) / 12 //nolint:gosec 32 | } 33 | -------------------------------------------------------------------------------- /vars/vars.go: -------------------------------------------------------------------------------- 1 | // Package vars contains global variables and configuration 2 | package vars 3 | 4 | import ( 5 | "os" 6 | 7 | "github.com/flashbots/go-utils/cli" 8 | relaycommon "github.com/flashbots/mev-boost-relay/common" 9 | ) 10 | 11 | var ( 12 | Version = "dev" // is set during build process 13 | LogDebug = os.Getenv("DEBUG") != "" 14 | LogJSON = os.Getenv("LOG_JSON") != "" 15 | Genesis = 1_606_824_023 // mainnet default, overwritten by config file 16 | 17 | DefaultBeaconURI = relaycommon.GetEnv("BEACON_URI", "http://localhost:3500") 18 | DefaultPostgresDSN = relaycommon.GetEnv("POSTGRES_DSN", "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable") 19 | DefaultLogLevel = relaycommon.GetEnv("LOG_LEVEL", "info") 20 | DefaultEthNodeURI = relaycommon.GetEnv("ETH_NODE_URI", "") 21 | DefaultEthBackupNodeURI = relaycommon.GetEnv("ETH_NODE_BACKUP_URI", "") 22 | 23 | DefaultBackfillRunnerInterval = cli.GetEnvInt("BACKFILL_RUNNER_INTERVAL_MIN", 5) 24 | DefaultBackfillRunnerNumThreads = cli.GetEnvInt("BACKFILL_RUNNER_NUM_THREADS", 10) 25 | ) 26 | -------------------------------------------------------------------------------- /vars/builder_aliases.go: -------------------------------------------------------------------------------- 1 | package vars 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | ) 7 | 8 | // BuilderGroups maps builder name to a function that returns if an input string (extra_data) is an alias 9 | var BuilderGroups = map[string]func(string) bool{ 10 | "penguinbuild.org": func(in string) bool { 11 | return strings.Contains(in, "penguinbuild.org") 12 | }, 13 | "builder0x69": func(in string) bool { 14 | return strings.Contains(in, "builder0x69") 15 | }, 16 | "rsync-builder.xyz": func(in string) bool { 17 | return strings.Contains(in, "rsync") 18 | }, 19 | "bob the builder": func(in string) bool { 20 | match, _ := regexp.MatchString("s[0-9]+e[0-9].*(t|f)", in) 21 | return match 22 | }, 23 | "BuilderNet": func(in string) bool { 24 | return strings.Contains(in, "BuilderNet") 25 | }, 26 | } 27 | 28 | // BuilderNameFromExtraData returns the builder name from the extra_data field 29 | func BuilderNameFromExtraData(extraData string) string { 30 | for builder, aliasFunc := range BuilderGroups { 31 | if aliasFunc(extraData) { 32 | return builder 33 | } 34 | } 35 | return extraData 36 | } 37 | -------------------------------------------------------------------------------- /static/sortable.min.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("click", function (b) { 2 | try { 3 | var p = function (a) { return v && a.getAttribute("data-sort-alt") || a.getAttribute("data-sort") || a.innerText }, q = function (a, c) { a.className = a.className.replace(w, "") + c }, f = function (a, c) { return a.nodeName === c ? a : f(a.parentNode, c) }, w = / dir-(u|d) /, v = b.shiftKey || b.altKey, e = f(b.target, "TH"), r = f(e, "TR"), g = f(r, "TABLE"); if (/\bsortable\b/.test(g.className)) { 4 | var l, d = r.cells; for (b = 0; b < d.length; b++)d[b] === e ? l = e.getAttribute("data-sort-col") || b : q(d[b], ""); d = " dir-d "; if (-1 !== 5 | e.className.indexOf(" dir-d ") || -1 !== g.className.indexOf("asc") && -1 == e.className.indexOf(" dir-u ")) d = " dir-u "; q(e, d); var m = g.tBodies[0], n = [].slice.call(m.rows, 0), t = " dir-u " === d; n.sort(function (a, c) { var h = p((t ? a : c).cells[l]), k = p((t ? c : a).cells[l]); return h.length && k.length && !isNaN(h - k) ? h - k : h.localeCompare(k) }); for (var u = m.cloneNode(); n.length;)u.appendChild(n.splice(0, 1)[0]); g.replaceChild(u, m) 6 | } 7 | } catch (a) { } 8 | }); -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | // Package cmd contains the cobra command line setup 2 | package cmd 3 | 4 | import ( 5 | "fmt" 6 | "os" 7 | 8 | "github.com/flashbots/relayscan/cmd/core" 9 | "github.com/flashbots/relayscan/cmd/service" 10 | "github.com/flashbots/relayscan/cmd/util" 11 | "github.com/flashbots/relayscan/vars" 12 | "github.com/spf13/cobra" 13 | ) 14 | 15 | var configFile string 16 | 17 | var rootCmd = &cobra.Command{ 18 | Short: "relayscan", 19 | Long: `https://github.com/flashbots/relayscan`, 20 | PersistentPreRunE: func(cmd *cobra.Command, args []string) error { 21 | if configFile == "" { 22 | configFile = "config-mainnet.yaml" 23 | } 24 | return vars.LoadConfig(configFile) 25 | }, 26 | Run: func(cmd *cobra.Command, args []string) { 27 | fmt.Printf("relayscan %s\n", vars.Version) 28 | _ = cmd.Help() 29 | }, 30 | } 31 | 32 | func init() { 33 | rootCmd.PersistentFlags().StringVar(&configFile, "config", os.Getenv("CONFIG_FILE"), "path to config file (default: config-mainnet.yaml)") 34 | } 35 | 36 | func Execute() { 37 | rootCmd.AddCommand(versionCmd) 38 | rootCmd.AddCommand(core.CoreCmd) 39 | rootCmd.AddCommand(util.UtilCmd) 40 | rootCmd.AddCommand(service.ServiceCmd) 41 | 42 | if err := rootCmd.Execute(); err != nil { 43 | fmt.Println(err) 44 | os.Exit(1) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /common/ultrasoundbid_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "math/big" 5 | "testing" 6 | 7 | "github.com/ethereum/go-ethereum/common/hexutil" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestValueDecoding(t *testing.T) { 12 | expected := "55539751698389157" 13 | hex := "0xa558e5221c51c500000000000000000000000000000000000000000000000000" 14 | hexBytes := hexutil.MustDecode(hex) 15 | value := new(big.Int).SetBytes(ReverseBytes(hexBytes[:])).String() 16 | require.Equal(t, expected, value) 17 | } 18 | 19 | func TestUltrasoundBidSSZDecoding(t *testing.T) { 20 | hex := "0x704b87ce8f010000a94b8c0000000000b6043101000000002c02b28fd8fdb45fd6ac43dd04adad1449a35b64247b1ed23a723a1fcf6cac074d0668c9e0912134628c32a54854b952234ebb6c1fdd6b053566ac2d2a09498da03b00ddb78b2c111450a5417a8c368c40f1f140cdf97d95b7fa9565467e0bbbe27877d08e01c69b4e5b02b144e6a265df99a0839818b3f120ebac9b73f82b617dc6a5556c71794b1a9c5400000000000000000000000000000000000000000000000000" 21 | bytes := hexutil.MustDecode(hex) 22 | bid := new(UltrasoundStreamBid) 23 | err := bid.UnmarshalSSZ(bytes) 24 | require.NoError(t, err) 25 | 26 | require.Equal(t, uint64(1717156924272), bid.Timestamp) 27 | require.Equal(t, uint64(9194409), bid.Slot) 28 | require.Equal(t, uint64(19989686), bid.BlockNumber) 29 | require.Equal(t, "0x2c02b28fd8fdb45fd6ac43dd04adad1449a35b64247b1ed23a723a1fcf6cac07", hexutil.Encode(bid.BlockHash[:])) 30 | } 31 | -------------------------------------------------------------------------------- /services/bidcollect/website/templates/index_files.html: -------------------------------------------------------------------------------- 1 | {{ define "content" }} 2 | {{ $day:="" }} 3 | {{ $class:="even" }} 4 | {{ $change:="" }} 5 | 6 |
7 |
8 | {{ .CurrentNetwork }} 9 |

{{ .CurrentMonth }}

10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | {{ range .Files }} 19 | {{ $dayTmp:=.Filename|substr10 }} 20 | {{ if ne $day $dayTmp }} 21 | {{ $change = "1" }} 22 | {{ $day = $dayTmp }} 23 | {{ if ne $class "even" }} 24 | {{ $class = "even" }} 25 | {{ else }} 26 | {{ $class = "odd" }} 27 | {{ end }} 28 | {{ else }} 29 | {{ $change = "" }} 30 | {{ end }} 31 | 32 | 37 | 38 | 39 | {{ end }} 40 | 41 |
../
33 | {{ if eq $change "1" }}{{ end }} 34 | 35 | {{ .Filename }} 36 | {{ .Size | humanBytes }}
42 | 43 |
44 |
45 |

46 | The data is dedicated to the public domain under the CC-0 license. 47 |

48 | {{ end }} -------------------------------------------------------------------------------- /.github/workflows/checks.yml: -------------------------------------------------------------------------------- 1 | name: Checks 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | jobs: 10 | test: 11 | name: Test 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Set up Go 15 | uses: actions/setup-go@v6 16 | with: 17 | go-version: ^1.24 18 | 19 | - name: Check out code into the Go module directory 20 | uses: actions/checkout@v5 21 | 22 | - name: Run unit tests and generate the coverage report 23 | run: make test-race 24 | 25 | lint: 26 | name: Lint 27 | runs-on: ubuntu-latest 28 | steps: 29 | - name: Check out code into the Go module directory 30 | uses: actions/checkout@v5 31 | 32 | - name: Set up Go 33 | uses: actions/setup-go@v6 34 | with: 35 | go-version: ^1.24 36 | 37 | - name: Download dependencies 38 | run: go mod download 39 | 40 | - name: Install gofumpt 41 | run: go install mvdan.cc/gofumpt@v0.6.0 42 | 43 | - name: Install staticcheck 44 | run: go install honnef.co/go/tools/cmd/staticcheck@2025.1.1 45 | 46 | - name: Install golangci-lint 47 | run: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.1.2 48 | 49 | - name: Lint 50 | run: make lint 51 | 52 | - name: Ensure go mod tidy runs without changes 53 | run: | 54 | go mod tidy 55 | git diff-index HEAD 56 | git diff-index --quiet HEAD 57 | -------------------------------------------------------------------------------- /cmd/service/website.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "os" 5 | 6 | relaycommon "github.com/flashbots/mev-boost-relay/common" 7 | "github.com/flashbots/relayscan/database" 8 | "github.com/flashbots/relayscan/services/website" 9 | "github.com/flashbots/relayscan/vars" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | var ( 14 | websiteDefaultListenAddr = relaycommon.GetEnv("LISTEN_ADDR", "localhost:9060") 15 | websiteListenAddr string 16 | websiteDev = os.Getenv("DEV") == "1" 17 | ) 18 | 19 | func init() { 20 | // rootCmd.AddCommand(websiteCmd) 21 | websiteCmd.Flags().StringVar(&websiteListenAddr, "listen-addr", websiteDefaultListenAddr, "listen address for webserver") 22 | websiteCmd.Flags().BoolVar(&websiteDev, "dev", websiteDev, "development mode") 23 | } 24 | 25 | var websiteCmd = &cobra.Command{ 26 | Use: "website", 27 | Short: "Start the website server", 28 | Run: func(cmd *cobra.Command, args []string) { 29 | var err error 30 | 31 | // Connect to Postgres 32 | db := database.MustConnectPostgres(log, vars.DefaultPostgresDSN) 33 | 34 | // Create the website service 35 | opts := &website.WebserverOpts{ 36 | ListenAddress: websiteListenAddr, 37 | DB: db, 38 | Log: log, 39 | Dev: websiteDev, 40 | } 41 | 42 | srv, err := website.NewWebserver(opts) 43 | if err != nil { 44 | log.WithError(err).Fatal("failed to create service") 45 | } 46 | 47 | // Start the server 48 | log.Infof("Webserver starting on %s (%s) ...", websiteListenAddr, vars.Version) 49 | log.Fatal(srv.StartServer()) 50 | }, 51 | } 52 | -------------------------------------------------------------------------------- /services/bidcollect/webserver/handler.go: -------------------------------------------------------------------------------- 1 | package webserver 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "strings" 7 | 8 | "github.com/flashbots/relayscan/services/bidcollect/types" 9 | "github.com/google/uuid" 10 | ) 11 | 12 | type SSESubscription struct { 13 | uid string 14 | msgC chan string 15 | } 16 | 17 | func (srv *Server) handleSSESubscription(w http.ResponseWriter, r *http.Request) { 18 | // SSE server for transactions 19 | srv.log.Info("SSE connection opened for transactions") 20 | 21 | // Set CORS headers to allow all origins. You may want to restrict this to specific origins in a production environment. 22 | w.Header().Set("Access-Control-Allow-Origin", "*") 23 | w.Header().Set("Access-Control-Expose-Headers", "Content-Type") 24 | 25 | w.Header().Set("Content-Type", "text/event-stream") 26 | w.Header().Set("Cache-Control", "no-cache") 27 | w.Header().Set("Connection", "keep-alive") 28 | 29 | subscriber := SSESubscription{ 30 | uid: uuid.New().String(), 31 | msgC: make(chan string, 100), 32 | } 33 | srv.addSubscriber(&subscriber) 34 | 35 | // Send CSV header 36 | helloMsg := strings.Join(types.CommonBidCSVFields, ",") + "\n" 37 | fmt.Fprint(w, helloMsg) //nolint:errcheck 38 | w.(http.Flusher).Flush() //nolint:forcetypeassert 39 | 40 | // Wait for txs or end of request... 41 | for { 42 | select { 43 | case <-r.Context().Done(): 44 | srv.log.Info("SSE closed, removing subscriber") 45 | srv.removeSubscriber(&subscriber) 46 | return 47 | 48 | case msg := <-subscriber.msgC: 49 | fmt.Fprintf(w, "%s\n", msg) //nolint:errcheck 50 | w.(http.Flusher).Flush() //nolint:forcetypeassert 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /services/bidcollect/types/types_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestSourceTypes(t *testing.T) { 11 | require.Equal(t, 0, SourceTypeGetHeader) 12 | require.Equal(t, 1, SourceTypeDataAPI) 13 | require.Equal(t, 2, SourceTypeUltrasoundStream) 14 | } 15 | 16 | func TestCSVHasNotChanged(t *testing.T) { 17 | // The specific field ordering is used in many places throughout the ecosystem and must not be changed. 18 | expectedResult := "source_type,received_at_ms,timestamp_ms,slot,slot_t_ms,value,block_hash,parent_hash,builder_pubkey,block_number,block_fee_recipient,relay,proposer_pubkey,proposer_fee_recipient,optimistic_submission" 19 | currentResult := strings.Join(CommonBidCSVFields, ",") 20 | require.Equal(t, expectedResult, currentResult) 21 | 22 | bid := CommonBid{ 23 | SourceType: SourceTypeGetHeader, 24 | ReceivedAtMs: 1, 25 | TimestampMs: 2, 26 | Slot: 3, 27 | BlockNumber: 4, 28 | BlockHash: "5", 29 | ParentHash: "6", 30 | BuilderPubkey: "7", 31 | Value: "8", 32 | BlockFeeRecipient: "9", 33 | Relay: "10", 34 | ProposerPubkey: "11", 35 | ProposerFeeRecipient: "12", 36 | OptimisticSubmission: true, 37 | } 38 | asCSV := bid.ToCSVLine(",") 39 | expected := "0,1,2,3,-1606824058998,8,5,6,7,4,9,10,11,12," 40 | require.Equal(t, expected, asCSV) 41 | 42 | // When source type is data-api, then optimistic field is included 43 | bid.SourceType = SourceTypeDataAPI 44 | asCSV = bid.ToCSVLine(",") 45 | expected = "1,1,2,3,-1606824058998,8,5,6,7,4,9,10,11,12,true" 46 | require.Equal(t, expected, asCSV) 47 | } 48 | -------------------------------------------------------------------------------- /scripts/website-healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Check health of relayscan.io and send notifications if state changes. 4 | # 5 | # https://www.relayscan.io/healthz 6 | # 7 | # This script is intended to be run as a cron job. 8 | # 9 | # It uses a temporary file to not send multiple notifications and store the error. 10 | # 11 | set -o errexit 12 | set -o nounset 13 | set -o pipefail 14 | 15 | url="https://www.relayscan.io/healthz" 16 | # url="localhost:9060/healthz" 17 | check_fn="/tmp/relayscan-error.txt" 18 | check_cmd="curl -s $url" 19 | 20 | # load environment variables $PUSHOVER_APP_TOKEN and $PUSHOVER_APP_KEY 21 | source "$(dirname "$0")/../.env.prod" 22 | 23 | function send_notification() { 24 | curl -s \ 25 | --form-string "token=$PUSHOVER_APP_TOKEN" \ 26 | --form-string "user=$PUSHOVER_APP_KEY" \ 27 | --form-string "message=$1" \ 28 | https://api.pushover.net/1/messages.json 29 | } 30 | 31 | function error() { 32 | # don't run if notification was alreaty sent 33 | if [ -f $check_fn ]; then 34 | return 35 | fi 36 | 37 | echo "relayscan.io is unhealthy" 38 | send_notification "relayscan.io is unhealthy" 39 | curl -vvvv $url > $check_fn 2>&1 40 | } 41 | 42 | function reset() { 43 | # Don't run if there is no error 44 | if [ ! -f $check_fn ]; then 45 | return 46 | fi 47 | 48 | rm $check_fn 49 | echo "relayscan.io is healthy again" 50 | send_notification "relayscan.io is healthy again" 51 | } 52 | 53 | # Allow errors, to catch curl error exit code 54 | set +e 55 | # echo $check_cmd 56 | $check_cmd 57 | if [ $? -eq 0 ]; then 58 | echo "All good" 59 | reset 60 | else 61 | echo "curl error $?" 62 | error 63 | fi 64 | -------------------------------------------------------------------------------- /config-hoodi.yaml: -------------------------------------------------------------------------------- 1 | # Hoodi beacon chain genesis timestamp (Mar/17, 2025, 12:10 UTC) - https://github.com/eth-clients/hoodi 2 | genesis: 1742213400 3 | 4 | relays: 5 | flashbots: "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-hoodi.flashbots.net" 6 | ultrasound: "https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-hoodi.ultrasound.money" 7 | all: 8 | - "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-hoodi.flashbots.net" 9 | - "https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-hoodi.ultrasound.money" 10 | - "https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.hoodi.blxrbdn.com" 11 | - "https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@hoodi.titanrelay.xyz" 12 | - "https://0x98f0ef62f00780cf8eb06701a7d22725b9437d4768bb19b363e882ae87129945ec206ec2dc16933f31d983f8225772b6@hoodi.aestus.live" 13 | - "https://0xb20c3fe59db9c3655088839ef3d972878d182eb745afd8abb1dd2abf6c14f93cd5934ed4446a5fe1ba039e2bc0cf1011@hoodi-relay.ethgas.com" 14 | # haven't had success getting data from TOOL + interstate, but these endpoints are supposed to work... 15 | # - "https://0x9110847c15a7f5c80a9fdd5db989a614cc01104e53bd8c252b6f46a4842c7fdef6b9593336035b5094878deff386804c@hoodi-builder-proxy-alpha.interstate.so:443" 16 | # - "https://0xa0f46566247ceb1f259a7189d5ac8bf2f0f07c135f081b0b5a9f226ef864bf6362c74306fcd02a87b7941f6feac57dc7@relay-hoodi.nuconstruct.xyz" 17 | 18 | builder_addresses: 19 | -------------------------------------------------------------------------------- /common/eth_node.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/big" 7 | 8 | ethcommon "github.com/ethereum/go-ethereum/common" 9 | "github.com/ethereum/go-ethereum/core/types" 10 | "github.com/ethereum/go-ethereum/ethclient" 11 | ) 12 | 13 | type EthNode struct { 14 | Clients []*ethclient.Client 15 | } 16 | 17 | func NewEthNode(uris ...string) (*EthNode, error) { 18 | if len(uris) == 0 { 19 | return nil, ErrURLEmpty 20 | } 21 | node := &EthNode{} //nolint:exhaustruct 22 | for _, uri := range uris { 23 | client, err := ethclient.Dial(uri) 24 | if err != nil { 25 | fmt.Println("Error connecting to eth node", uri, err) 26 | return nil, err 27 | } 28 | node.Clients = append(node.Clients, client) 29 | } 30 | return node, nil 31 | } 32 | 33 | func (n *EthNode) BlockByNumber(blockNumber int64) (block *types.Block, err error) { 34 | for _, client := range n.Clients { 35 | block, err = client.BlockByNumber(context.Background(), big.NewInt(blockNumber)) 36 | if err == nil { 37 | return block, nil 38 | } 39 | } 40 | return nil, err 41 | } 42 | 43 | func (n *EthNode) BlockByHash(blockHash string) (block *types.Block, err error) { 44 | for _, client := range n.Clients { 45 | block, err = client.BlockByHash(context.Background(), ethcommon.HexToHash(blockHash)) 46 | if err == nil { 47 | return block, nil 48 | } 49 | } 50 | return nil, err 51 | } 52 | 53 | func (n *EthNode) GetBalanceDiff(address string, blockNumber int64) (diff *big.Int, err error) { 54 | for _, client := range n.Clients { 55 | balanceBefore, err := client.BalanceAt(context.Background(), ethcommon.HexToAddress(address), big.NewInt(blockNumber-1)) 56 | if err != nil { 57 | continue 58 | } 59 | 60 | balanceAfter, err := client.BalanceAt(context.Background(), ethcommon.HexToAddress(address), big.NewInt(blockNumber)) 61 | if err != nil { 62 | continue 63 | } 64 | 65 | balanceDiff := new(big.Int).Sub(balanceAfter, balanceBefore) 66 | return balanceDiff, nil 67 | } 68 | return nil, err 69 | } 70 | -------------------------------------------------------------------------------- /vars/config.go: -------------------------------------------------------------------------------- 1 | package vars 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "gopkg.in/yaml.v3" 8 | ) 9 | 10 | // Config holds the application configuration loaded from YAML 11 | type Config struct { 12 | Genesis int64 `yaml:"genesis"` 13 | Relays RelaysConfig `yaml:"relays"` 14 | BuilderAddresses map[string][]string `yaml:"builder_addresses"` 15 | } 16 | 17 | // RelaysConfig holds relay URL configuration 18 | type RelaysConfig struct { 19 | Flashbots string `yaml:"flashbots"` 20 | Ultrasound string `yaml:"ultrasound"` 21 | All []string `yaml:"all"` 22 | } 23 | 24 | var loadedConfig *Config 25 | 26 | // LoadConfig loads the configuration from a YAML file 27 | func LoadConfig(path string) error { 28 | data, err := os.ReadFile(path) 29 | if err != nil { 30 | return fmt.Errorf("failed to read config file %s: %w", path, err) 31 | } 32 | 33 | var cfg Config 34 | if err := yaml.Unmarshal(data, &cfg); err != nil { 35 | return fmt.Errorf("failed to parse config file %s: %w", path, err) 36 | } 37 | 38 | loadedConfig = &cfg 39 | 40 | // Populate package-level variables for backwards compatibility 41 | Genesis = int(cfg.Genesis) 42 | RelayFlashbots = cfg.Relays.Flashbots 43 | RelayUltrasound = cfg.Relays.Ultrasound 44 | RelayURLs = cfg.Relays.All 45 | BuilderAddresses = buildAddressMap(cfg.BuilderAddresses) 46 | 47 | return nil 48 | } 49 | 50 | // MustLoadConfig loads the configuration or panics on error 51 | func MustLoadConfig(path string) { 52 | if err := LoadConfig(path); err != nil { 53 | panic(err) 54 | } 55 | } 56 | 57 | // GetConfig returns the loaded configuration 58 | func GetConfig() *Config { 59 | return loadedConfig 60 | } 61 | 62 | // buildAddressMap converts the config format to the expected map[coinbase]map[address]bool format 63 | func buildAddressMap(addresses map[string][]string) map[string]map[string]bool { 64 | result := make(map[string]map[string]bool) 65 | for coinbase, addrs := range addresses { 66 | result[coinbase] = make(map[string]bool) 67 | for _, addr := range addrs { 68 | result[coinbase][addr] = true 69 | } 70 | } 71 | return result 72 | } 73 | -------------------------------------------------------------------------------- /services/bidcollect/website/htmldata.go: -------------------------------------------------------------------------------- 1 | package website 2 | 3 | import ( 4 | "text/template" 5 | 6 | "github.com/flashbots/relayscan/common" 7 | ) 8 | 9 | type HTMLData struct { 10 | Title string 11 | Path string 12 | 13 | // Root page 14 | EthMainnetMonths []string 15 | 16 | // File-listing page 17 | CurrentNetwork string 18 | CurrentMonth string 19 | Files []FileEntry 20 | } 21 | 22 | type FileEntry struct { 23 | Filename string 24 | Size uint64 25 | Modified string 26 | } 27 | 28 | func prettyInt(i uint64) string { 29 | return printer.Sprintf("%d", i) 30 | } 31 | 32 | func caseIt(s string) string { 33 | return caser.String(s) 34 | } 35 | 36 | func percent(cnt, total uint64) string { 37 | p := float64(cnt) / float64(total) * 100 38 | return printer.Sprintf("%.2f", p) 39 | } 40 | 41 | func substr10(s string) string { 42 | return s[:10] 43 | } 44 | 45 | var DummyHTMLData = &HTMLData{ 46 | Title: "", 47 | Path: "", 48 | 49 | EthMainnetMonths: []string{ 50 | "2023-08", 51 | "2023-09", 52 | }, 53 | 54 | CurrentNetwork: "Ethereum Mainnet", 55 | CurrentMonth: "2023-08", 56 | Files: []FileEntry{ 57 | {"2023-08-29_all.csv.zip", 97210118, "02:02:23 2023-09-02"}, 58 | {"2023-08-29_top.csv.zip", 7210118, "02:02:23 2023-09-02"}, 59 | 60 | {"2023-08-30_all.csv.zip", 97210118, "02:02:23 2023-09-02"}, 61 | {"2023-08-30_top.csv.zip", 7210118, "02:02:23 2023-09-02"}, 62 | 63 | {"2023-08-31_all.csv.zip", 97210118, "02:02:23 2023-09-02"}, 64 | {"2023-08-31_top.csv.zip", 7210118, "02:02:23 2023-09-02"}, 65 | }, 66 | } 67 | 68 | var funcMap = template.FuncMap{ 69 | "prettyInt": prettyInt, 70 | "caseIt": caseIt, 71 | "percent": percent, 72 | "humanBytes": common.HumanBytes, 73 | "substr10": substr10, 74 | } 75 | 76 | func ParseIndexTemplate() (*template.Template, error) { 77 | return template.New("index.html").Funcs(funcMap).ParseFiles("services/bidcollect/website/templates/index_root.html", "services/bidcollect/website/templates/base.html") 78 | } 79 | 80 | func ParseFilesTemplate() (*template.Template, error) { 81 | return template.New("index.html").Funcs(funcMap).ParseFiles("services/bidcollect/website/templates/index_files.html", "services/bidcollect/website/templates/base.html") 82 | } 83 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | jobs: 9 | docker-image: 10 | name: Publish Docker Image 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout sources 15 | uses: actions/checkout@v2 16 | 17 | - name: Get tag version 18 | run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV 19 | 20 | - name: Print version 21 | run: | 22 | echo $RELEASE_VERSION 23 | echo ${{ env.RELEASE_VERSION }} 24 | 25 | - name: Set up QEMU 26 | uses: docker/setup-qemu-action@v2 27 | 28 | - name: Set up Docker Buildx 29 | uses: docker/setup-buildx-action@v2 30 | 31 | - name: Extract metadata (tags, labels) for Docker 32 | id: meta 33 | uses: docker/metadata-action@v4 34 | with: 35 | images: flashbots/relayscan 36 | tags: | 37 | type=sha 38 | type=pep440,pattern={{version}} 39 | type=pep440,pattern={{major}}.{{minor}} 40 | type=raw,value=latest,enable=${{ !contains(env.RELEASE_VERSION, '-') }} 41 | 42 | - name: Login to DockerHub 43 | uses: docker/login-action@v2 44 | with: 45 | username: ${{ secrets.DOCKERHUB_USERNAME }} 46 | password: ${{ secrets.DOCKERHUB_TOKEN }} 47 | 48 | - name: Build and push 49 | uses: docker/build-push-action@v3 50 | with: 51 | context: . 52 | push: true 53 | build-args: | 54 | VERSION=${{ env.RELEASE_VERSION }} 55 | platforms: linux/amd64,linux/arm64 56 | tags: ${{ steps.meta.outputs.tags }} 57 | labels: ${{ steps.meta.outputs.labels }} 58 | 59 | github-release: 60 | runs-on: ubuntu-latest 61 | steps: 62 | - name: Checkout sources 63 | uses: actions/checkout@v2 64 | 65 | - name: Create release 66 | id: create_release 67 | uses: actions/create-release@v1 68 | env: 69 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 70 | with: 71 | tag_name: ${{ github.ref }} 72 | release_name: ${{ github.ref }} 73 | draft: true 74 | prerelease: false 75 | -------------------------------------------------------------------------------- /common/request.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "net/http" 11 | ) 12 | 13 | var errHTTPErrorResponse = errors.New("HTTP error response") 14 | 15 | // type ErrorMessage struct { 16 | // code int 17 | // message string 18 | // } 19 | 20 | func SendHTTPRequest(ctx context.Context, client http.Client, method, url string, payload, dst any) (code int, err error) { 21 | var req *http.Request 22 | 23 | if payload == nil { 24 | req, err = http.NewRequestWithContext(ctx, method, url, nil) 25 | } else { 26 | payloadBytes, err2 := json.Marshal(payload) 27 | if err2 != nil { 28 | return 0, fmt.Errorf("could not marshal request: %w", err2) 29 | } 30 | req, err = http.NewRequestWithContext(ctx, method, url, bytes.NewReader(payloadBytes)) 31 | 32 | // Set content-type 33 | req.Header.Add("Content-Type", "application/json") 34 | } 35 | if err != nil { 36 | return 0, fmt.Errorf("could not prepare request: %w", err) 37 | } 38 | 39 | // Execute request 40 | resp, err := client.Do(req) 41 | if err != nil { 42 | return 0, err 43 | } 44 | defer resp.Body.Close() //nolint:errcheck 45 | 46 | if resp.StatusCode == http.StatusNoContent { 47 | return resp.StatusCode, nil 48 | } 49 | 50 | if resp.StatusCode > 299 { 51 | bodyBytes, err := io.ReadAll(resp.Body) 52 | if err != nil { 53 | return resp.StatusCode, fmt.Errorf("could not read error response body for status code %d: %w", resp.StatusCode, err) 54 | } 55 | return resp.StatusCode, fmt.Errorf("%w: %d / %s", errHTTPErrorResponse, resp.StatusCode, string(bodyBytes)) 56 | } 57 | 58 | if dst == nil { 59 | // still read the body to reuse http connection (see also https://stackoverflow.com/a/17953506) 60 | _, err = io.Copy(io.Discard, resp.Body) 61 | if err != nil { 62 | return resp.StatusCode, fmt.Errorf("could not read response body: %w", err) 63 | } 64 | } else { 65 | bodyBytes, err := io.ReadAll(resp.Body) 66 | if err != nil { 67 | return resp.StatusCode, fmt.Errorf("could not read response body: %w", err) 68 | } 69 | 70 | if err := json.Unmarshal(bodyBytes, dst); err != nil { 71 | return resp.StatusCode, fmt.Errorf("could not unmarshal response %s: %w", string(bodyBytes), err) 72 | } 73 | } 74 | 75 | return resp.StatusCode, nil 76 | } 77 | -------------------------------------------------------------------------------- /services/website/html.go: -------------------------------------------------------------------------------- 1 | package website 2 | 3 | import ( 4 | _ "embed" 5 | "text/template" 6 | "time" 7 | 8 | "github.com/dustin/go-humanize" 9 | "github.com/flashbots/relayscan/database" 10 | ) 11 | 12 | type Stats struct { 13 | Since time.Time 14 | Until time.Time 15 | 16 | TimeStr string // i.e. 24h, 12h, 1h or 7d 17 | 18 | TopRelays []*database.TopRelayEntry 19 | TopBuilders []*TopBuilderDisplayEntry 20 | BuilderProfits []*database.BuilderProfitEntry 21 | TopBuildersByRelay map[string][]*TopBuilderDisplayEntry 22 | } 23 | 24 | func NewStats() *Stats { 25 | return &Stats{ 26 | TopRelays: make([]*database.TopRelayEntry, 0), 27 | TopBuilders: make([]*TopBuilderDisplayEntry, 0), 28 | BuilderProfits: make([]*database.BuilderProfitEntry, 0), 29 | TopBuildersByRelay: make(map[string][]*TopBuilderDisplayEntry), 30 | } 31 | } 32 | 33 | type HTMLData struct { 34 | Title string 35 | TimeSpans []string 36 | TimeSpan string 37 | View string // overview or builder-profit 38 | 39 | Stats *Stats // stats for this view 40 | 41 | LastUpdateSlot uint64 42 | LastUpdateTime time.Time 43 | LastUpdateTimeStr string 44 | } 45 | 46 | type HTMLDataDailyStats struct { 47 | Title string 48 | 49 | Day string 50 | DayPrev string 51 | DayNext string 52 | TimeSince string 53 | TimeUntil string 54 | 55 | TopRelays []*database.TopRelayEntry 56 | TopBuildersBySummary []*TopBuilderDisplayEntry 57 | BuilderProfits []*database.BuilderProfitEntry 58 | } 59 | 60 | var funcMap = template.FuncMap{ 61 | "weiToEth": weiToEth, 62 | "prettyInt": prettyInt, 63 | "caseIt": caseIt, 64 | "percent": percent, 65 | "relayTable": relayTable, 66 | "builderTable": builderTable, 67 | "builderProfitTable": builderProfitTable, 68 | "humanTime": humanize.Time, 69 | "lowercaseNoWhitespace": lowercaseNoWhitespace, 70 | } 71 | 72 | func ParseIndexTemplate() (*template.Template, error) { 73 | return template.New("index.html").Funcs(funcMap).ParseFiles("services/website/templates/index.html", "services/website/templates/base.html") 74 | } 75 | 76 | func ParseDailyStatsTemplate() (*template.Template, error) { 77 | return template.New("daily-stats.html").Funcs(funcMap).ParseFiles("services/website/templates/daily-stats.html", "services/website/templates/base.html") 78 | } 79 | -------------------------------------------------------------------------------- /config-mainnet.yaml: -------------------------------------------------------------------------------- 1 | # Mainnet beacon chain genesis timestamp (Dec 1, 2020) 2 | genesis: 1606824023 3 | 4 | relays: 5 | flashbots: "https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net" 6 | ultrasound: "https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money" 7 | all: 8 | - "https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net" 9 | - "https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money" 10 | - "https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com" 11 | - "https://0xb0b07cd0abef743db4260b0ed50619cf6ad4d82064cb4fbec9d3ec530f7c5e6793d9f286c4e082c0244ffb9f2658fe88@bloxroute.regulated.blxrbdn.com" 12 | - "https://0xb3ee7afcf27f1f1259ac1787876318c6584ee353097a50ed84f51a1f21a323b3736f271a895c7ce918c038e4265918be@relay.edennetwork.io" 13 | - "https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net" 14 | - "https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live" 15 | - "https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@titanrelay.xyz" 16 | - "https://0x88ef3061f598101ca713d556cf757763d9be93d33c3092d3ab6334a36855b6b4a4020528dd533a62d25ea6648251e62e@relay.ethgas.com" 17 | - "https://0xb66921e917a8f4cfc3c52e10c1e5c77b1255693d9e6ed6f5f444b71ca4bb610f2eff4fa98178efbf4dd43a30472c497e@relay.btcs.com" 18 | 19 | builder_addresses: 20 | # Coinbase addresses mapped to their owned addresses 21 | "0xdadb0d80178819f2319190d340ce9a924f783711": 22 | - "0x59cadf9199248b50d40a6891c9e329ea13a88d31" 23 | - "0x75cc09358f100583d66f5277138bfb476345dc1b" 24 | - "0x397b28d85d77fef1576e129bb35b322c2bee1ba1" 25 | "0x4838b106fce9647bdf1e7877bf73ce8b0bad5f97": 26 | - "0x9fc3da866e7df3a1c57ade1a97c9f00a70f010c8" 27 | - "0xb29b9fd58cdb2e3bb068bc8560d8c13b2454684d" 28 | "0x1f9090aae28b8a3dceadf281b0f12828e676c326": 29 | - "0x0affb0a96fbefaa97dce488dfd97512346cf3ab8" 30 | "0x95222290dd7278aa3ddd389cc1e1d165cc4bafe5": 31 | - "0xa83114a443da1cecefc50368531cace9f37fcccb" 32 | - "0x28c74c0f29b686f21ea731bd2a8b88b6954475ba" 33 | -------------------------------------------------------------------------------- /services/bidcollect/website/utils.go: -------------------------------------------------------------------------------- 1 | package website 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "time" 7 | 8 | "go.uber.org/zap" 9 | "golang.org/x/text/cases" 10 | "golang.org/x/text/language" 11 | "golang.org/x/text/message" 12 | ) 13 | 14 | var ( 15 | // Printer for pretty printing numbers 16 | printer = message.NewPrinter(language.English) 17 | 18 | // Caser is used for casing strings 19 | caser = cases.Title(language.English) 20 | ) 21 | 22 | type HTTPErrorResp struct { 23 | Code int `json:"code"` 24 | Message string `json:"message"` 25 | } 26 | 27 | // responseWriter is a minimal wrapper for http.ResponseWriter that allows the 28 | // written HTTP status code to be captured for logging. 29 | type responseWriter struct { 30 | http.ResponseWriter 31 | status int 32 | wroteHeader bool 33 | } 34 | 35 | func wrapResponseWriter(w http.ResponseWriter) *responseWriter { 36 | return &responseWriter{ResponseWriter: w} //nolint:exhaustruct 37 | } 38 | 39 | func (rw *responseWriter) Status() int { 40 | return rw.status 41 | } 42 | 43 | func (rw *responseWriter) WriteHeader(code int) { 44 | if rw.wroteHeader { 45 | return 46 | } 47 | 48 | rw.status = code 49 | rw.ResponseWriter.WriteHeader(code) 50 | rw.wroteHeader = true 51 | } 52 | 53 | // LoggingMiddlewareZap logs the incoming HTTP request & its duration. 54 | func LoggingMiddlewareZap(logger *zap.Logger, next http.Handler) http.Handler { 55 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 56 | // Handle panics 57 | defer func() { 58 | if msg := recover(); msg != nil { 59 | w.WriteHeader(http.StatusInternalServerError) 60 | var method, url string 61 | if r != nil { 62 | method = r.Method 63 | url = r.URL.EscapedPath() 64 | } 65 | logger.Error("HTTP request handler panicked", 66 | zap.Any("error", msg), 67 | zap.String("method", method), 68 | zap.String("url", url), 69 | ) 70 | } 71 | }() 72 | 73 | start := time.Now() 74 | wrapped := wrapResponseWriter(w) 75 | next.ServeHTTP(w, r) 76 | 77 | // Passing request stats both in-message (for the human reader) 78 | // as well as inside the structured log (for the machine parser) 79 | logger.Info(fmt.Sprintf("%s %s %d", r.Method, r.URL.EscapedPath(), wrapped.status), 80 | zap.Int("durationMs", int(time.Since(start).Milliseconds())), 81 | zap.Int("status", wrapped.status), 82 | zap.String("logType", "access"), 83 | zap.String("method", r.Method), 84 | zap.String("path", r.URL.EscapedPath()), 85 | ) 86 | }) 87 | } 88 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | enable-all: true 4 | disable: 5 | - cyclop 6 | - forbidigo 7 | - funlen 8 | - gochecknoglobals 9 | - gochecknoinits 10 | - gocritic 11 | - godot 12 | - godox 13 | - lll 14 | - nestif 15 | - nilnil 16 | - nlreturn 17 | - noctx 18 | - nonamedreturns 19 | - paralleltest 20 | - revive 21 | - testpackage 22 | - unparam 23 | - varnamelen 24 | - wrapcheck 25 | - wsl 26 | - interfacebloat 27 | - dupword 28 | - mnd 29 | 30 | # 31 | # Disabled because of generics: 32 | # 33 | - contextcheck 34 | - rowserrcheck 35 | - sqlclosecheck 36 | - wastedassign 37 | 38 | # 39 | # Disabled because deprecated: 40 | # 41 | 42 | linters-settings: 43 | # 44 | # The G108 rule throws a false positive. We're not actually vulnerable. If 45 | # you're not careful the profiling endpoint is automatically exposed on 46 | # /debug/pprof if you import net/http/pprof. See this link: 47 | # 48 | # https://mmcloughlin.com/posts/your-pprof-is-showing 49 | # 50 | gosec: 51 | excludes: 52 | - G108 53 | 54 | gocognit: 55 | min-complexity: 100 # default: 30 56 | 57 | gocyclo: 58 | min-complexity: 33 # default: 30 59 | 60 | maintidx: 61 | under: 15 62 | 63 | tagliatelle: 64 | case: 65 | rules: 66 | json: snake 67 | 68 | gofumpt: 69 | extra-rules: true 70 | 71 | exhaustruct: 72 | exclude: 73 | # 74 | # Because it's easier to read without the other fields. 75 | # 76 | - 'GetPayloadsFilters' 77 | 78 | # 79 | # Structures outside our control that have a ton of settings. It doesn't 80 | # make sense to specify all of the fields. 81 | # 82 | - 'cobra.Command' 83 | - 'database.*Entry' 84 | - 'http.Server' 85 | - 'logrus.*Formatter' 86 | - 'Options' # redis 87 | 88 | # 89 | # Excluded because there are private fields (not capitalized) that are 90 | # not initialized. If possible, I think these should be altered. 91 | # 92 | - 'Datastore' 93 | - 'Housekeeper' 94 | - 'MockBeaconClient' 95 | - 'RelayAPI' 96 | - 'Webserver' 97 | 98 | formatters: 99 | enable: 100 | - gci 101 | - gofmt 102 | - gofumpt 103 | - goimports 104 | settings: 105 | gofumpt: 106 | extra-rules: true 107 | exclusions: 108 | generated: lax 109 | paths: 110 | - third_party$ 111 | - builtin$ 112 | - examples$ 113 | -------------------------------------------------------------------------------- /scripts/bidcollect/bids-combine-and-upload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Combine bid CSVs (from bidcollect) into a single CSV, and upload to R2/S3 4 | # 5 | set -e 6 | 7 | # require directory as first argument 8 | if [ -z "$1" ]; then 9 | echo "Usage: $0 " 10 | exit 1 11 | fi 12 | 13 | cd $1 14 | date=$(basename $1) 15 | ym=${date:0:7} 16 | echo $date 17 | echo "" 18 | 19 | # ALL BIDS 20 | fn_out="${date}_all.csv" 21 | fn_out_zip="${fn_out}.zip" 22 | rm -f $fn_out $fn_out_zip 23 | 24 | echo "Combining all bids..." 25 | first="1" 26 | for fn in $(\ls all*); do 27 | echo "- ${fn}" 28 | if [ $first == "1" ]; then 29 | head -n 1 $fn > $fn_out 30 | first="0" 31 | fi 32 | tail -n +2 $fn >> $fn_out 33 | done 34 | 35 | echo "Lines (all bids):" 36 | wc -l $fn_out 37 | 38 | echo "Source types (all bids):" 39 | clickhouse local -q "SELECT source_type, COUNT(source_type) FROM '$fn_out' GROUP BY source_type ORDER BY source_type;" 40 | 41 | zip ${fn_out_zip} $fn_out 42 | echo "Wrote ${fn_out_zip}" 43 | rm -f $fn_out 44 | rm -f all*.csv 45 | 46 | # Upload 47 | if [[ "${UPLOAD}" != "0" ]]; then 48 | echo "Uploading to R2 and S3..." 49 | aws --profile r2 s3 cp --no-progress "${fn_out_zip}" "s3://relayscan-bidarchive/ethereum/mainnet/${ym}/" --endpoint-url "https://${CLOUDFLARE_R2_ACCOUNT_ID}.r2.cloudflarestorage.com" 50 | aws --profile s3 s3 cp --no-progress "${fn_out_zip}" "s3://relayscan-bidarchive/ethereum/mainnet/${ym}/" 51 | fi 52 | 53 | if [[ "${DEL}" == "1" ]]; then 54 | rm -f all* 55 | fi 56 | 57 | echo "" 58 | 59 | # TOP BIDS 60 | echo "Combining top bids..." 61 | fn_out="${date}_top.csv" 62 | fn_out_zip="${fn_out}.zip" 63 | rm -f $fn_out $fn_out_zip 64 | 65 | first="1" 66 | for fn in $(\ls top*); do 67 | echo "- ${fn}" 68 | if [ $first == "1" ]; then 69 | head -n 1 $fn > $fn_out 70 | first="0" 71 | fi 72 | tail -n +2 $fn >> $fn_out 73 | done 74 | 75 | echo "Lines (top bids):" 76 | wc -l $fn_out 77 | 78 | echo "Source types (top bids):" 79 | clickhouse local -q "SELECT source_type, COUNT(source_type) FROM '$fn_out' GROUP BY source_type ORDER BY source_type;" 80 | 81 | zip ${fn_out_zip} $fn_out 82 | echo "Wrote ${fn_out_zip}" 83 | rm -f $fn_out 84 | rm -f top*.csv 85 | 86 | # Upload 87 | if [[ "${UPLOAD}" != "0" ]]; then 88 | echo "Uploading to R2 and S3..." 89 | aws --profile r2 s3 cp --no-progress "${fn_out_zip}" "s3://relayscan-bidarchive/ethereum/mainnet/${ym}/" --endpoint-url "https://${CLOUDFLARE_R2_ACCOUNT_ID}.r2.cloudflarestorage.com" 90 | aws --profile s3 s3 cp --no-progress "${fn_out_zip}" "s3://relayscan-bidarchive/ethereum/mainnet/${ym}/" 91 | fi 92 | 93 | if [[ "${DEL}" == "1" ]]; then 94 | rm -f top* 95 | fi 96 | -------------------------------------------------------------------------------- /common/relayentry.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "net/url" 5 | "strings" 6 | 7 | "github.com/flashbots/go-boost-utils/types" 8 | "github.com/flashbots/relayscan/vars" 9 | ) 10 | 11 | // RelayEntry represents a relay that mev-boost connects to. 12 | type RelayEntry struct { 13 | PublicKey types.PublicKey 14 | URL *url.URL 15 | } 16 | 17 | func (r *RelayEntry) String() string { 18 | return r.URL.String() 19 | } 20 | 21 | func (r *RelayEntry) Hostname() string { 22 | return r.URL.Hostname() 23 | } 24 | 25 | // GetURI returns the full request URI with scheme, host, path and args for the relay. 26 | func (r *RelayEntry) GetURI(path string) string { 27 | return GetURI(r.URL, path) 28 | } 29 | 30 | // NewRelayEntry creates a new instance based on an input string 31 | // relayURL can be IP@PORT, PUBKEY@IP:PORT, https://IP, etc. 32 | func NewRelayEntry(relayURL string, requireUser bool) (entry RelayEntry, err error) { 33 | // Add protocol scheme prefix if it does not exist. 34 | if !strings.HasPrefix(relayURL, "http") { 35 | relayURL = "https://" + relayURL 36 | } 37 | 38 | // Parse the provided relay's URL and save the parsed URL in the RelayEntry. 39 | entry.URL, err = url.ParseRequestURI(relayURL) 40 | if err != nil { 41 | return entry, err 42 | } 43 | 44 | // Extract the relay's public key from the parsed URL. 45 | if requireUser && entry.URL.User.Username() == "" { 46 | return entry, ErrMissingRelayPubkey 47 | } 48 | 49 | if entry.URL.User.Username() != "" { 50 | err = entry.PublicKey.UnmarshalText([]byte(entry.URL.User.Username())) 51 | } 52 | return entry, err 53 | } 54 | 55 | func MustNewRelayEntry(relayURL string, requireUser bool) (entry RelayEntry) { 56 | entry, err := NewRelayEntry(relayURL, requireUser) 57 | Check(err) 58 | return entry 59 | } 60 | 61 | // RelayEntriesToStrings returns the string representation of a list of relay entries 62 | func RelayEntriesToStrings(relays []RelayEntry) []string { 63 | ret := make([]string, len(relays)) 64 | for i, entry := range relays { 65 | ret[i] = entry.String() 66 | } 67 | return ret 68 | } 69 | 70 | // RelayEntriesToHostnameStrings returns the hostnames of a list of relay entries 71 | func RelayEntriesToHostnameStrings(relays []RelayEntry) []string { 72 | ret := make([]string, len(relays)) 73 | for i, entry := range relays { 74 | ret[i] = entry.Hostname() 75 | } 76 | return ret 77 | } 78 | 79 | func GetRelays() ([]RelayEntry, error) { 80 | var err error 81 | relays := make([]RelayEntry, len(vars.RelayURLs)) 82 | for i, relayStr := range vars.RelayURLs { 83 | relays[i], err = NewRelayEntry(relayStr, true) 84 | if err != nil { 85 | return relays, err 86 | } 87 | } 88 | return relays, nil 89 | } 90 | 91 | func MustGetRelays() []RelayEntry { 92 | relays, err := GetRelays() 93 | Check(err) 94 | return relays 95 | } 96 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Heavily inspired by Lighthouse: https://github.com/sigp/lighthouse/blob/stable/Makefile 2 | # and Reth: https://github.com/paradigmxyz/reth/blob/main/Makefile 3 | .DEFAULT_GOAL := help 4 | 5 | VERSION := $(shell git describe --tags --always --dirty="-dev") 6 | 7 | ##@ Help 8 | 9 | help: ## Display this help 10 | @awk 'BEGIN {FS = ":.*##"; printf "Usage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 11 | 12 | v: ## Show the current version 13 | @echo "Version: ${VERSION}" 14 | 15 | ##@ Building 16 | 17 | .PHONY: clean 18 | clean: ## Remove build artifacts 19 | rm -rf relayscan build/ 20 | 21 | .PHONY: build 22 | build: ## Build the relayscan binary 23 | go build -trimpath -ldflags "-s -X cmd.Version=${VERSION} -X main.Version=${VERSION}" -v -o relayscan . 24 | 25 | .PHONY: docker-image 26 | docker-image: ## Build the relayscan docker image 27 | DOCKER_BUILDKIT=1 docker build --platform linux/amd64 --build-arg VERSION=${VERSION} . -t relayscan 28 | 29 | .PHONY: generate-ssz 30 | generate-ssz: ## Generate SSZ serialization code 31 | rm -f common/ultrasoundbid_encoding.go 32 | sszgen --path common --objs UltrasoundStreamBid 33 | 34 | ##@ Production tasks 35 | 36 | .PHONY: update-bids-website 37 | update-bids-website: ## Update the bid archive website 38 | go run . service bidcollect --build-website --build-website-upload 39 | 40 | ##@ Linting and Testing 41 | 42 | lint: ## Lint the code 43 | gofmt -d -s . 44 | gofumpt -d -extra . 45 | go vet ./... 46 | staticcheck ./... 47 | golangci-lint run 48 | 49 | test: ## Run tests 50 | go test ./... 51 | 52 | test-race: ## Run tests with -race fla 53 | go test -race ./... 54 | 55 | lt: lint test ## Run lint and tests 56 | 57 | gofumpt: ## Run gofumpt on the code 58 | gofumpt -l -w -extra . 59 | 60 | fmt: ## Format the code with gofmt and gofumpt and gc 61 | gofmt -s -w . 62 | gofumpt -extra -w . 63 | gci write . 64 | go mod tidy 65 | 66 | cover: ## Run tests with coverage 67 | go test -coverprofile=/tmp/go-sim-lb.cover.tmp ./... 68 | go tool cover -func /tmp/go-sim-lb.cover.tmp 69 | unlink /tmp/go-sim-lb.cover.tmp 70 | 71 | cover-html: ## Run tests with coverage and output the HTML report 72 | go test -coverprofile=/tmp/go-sim-lb.cover.tmp ./... 73 | go tool cover -html=/tmp/go-sim-lb.cover.tmp 74 | unlink /tmp/go-sim-lb.cover.tmp 75 | 76 | ##@ Development 77 | 78 | dev-website: ## Run the relayscan website service in development mode 79 | DB_DONT_APPLY_SCHEMA=1 go run . service website --dev 80 | 81 | dev-bids-website: ## Run the bidcollect website in development mode 82 | go run . service bidcollect --devserver 83 | 84 | dev-postgres-start: ## Start a Postgres container for development 85 | docker run -d --name relayscan-postgres -p 5432:5432 -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=postgres -e POSTGRES_DB=postgres postgres 86 | 87 | dev-postgres-stop: ## Stop the Postgres container 88 | docker rm -f relayscan-postgres 89 | 90 | dev-postgres-wipe: dev-postgres-stop dev-postgres-start ## Restart the Postgres container (wipes the database) 91 | -------------------------------------------------------------------------------- /services/website/templates/base.html: -------------------------------------------------------------------------------- 1 | {{ define "base" }} 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | {{ .Title }} | relayscan.io 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 |
43 |
44 | relayscan.io 45 | 46 | 51 |
52 |
53 | 54 | {{ template "content" . }} 55 | 56 | 57 | 58 | {{ end }} -------------------------------------------------------------------------------- /services/bidcollect/bidcollector.go: -------------------------------------------------------------------------------- 1 | // Package bidcollect contains code for bid collection from various sources. 2 | package bidcollect 3 | 4 | import ( 5 | "github.com/flashbots/relayscan/common" 6 | "github.com/flashbots/relayscan/services/bidcollect/types" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | type BidCollectorOpts struct { 11 | Log *logrus.Entry 12 | UID string 13 | 14 | CollectUltrasoundStream bool 15 | CollectGetHeader bool 16 | CollectDataAPI bool 17 | 18 | Relays []common.RelayEntry 19 | BeaconNodeURI string // for getHeader 20 | 21 | OutDir string 22 | OutputTSV bool 23 | 24 | RedisAddr string 25 | UseRedis bool 26 | } 27 | 28 | type BidCollector struct { 29 | opts *BidCollectorOpts 30 | log *logrus.Entry 31 | 32 | ultrasoundBidC chan UltrasoundStreamBidsMsg 33 | dataAPIBidC chan DataAPIPollerBidsMsg 34 | getHeaderBidC chan GetHeaderPollerBidsMsg 35 | 36 | processor *BidProcessor 37 | } 38 | 39 | func NewBidCollector(opts *BidCollectorOpts) (c *BidCollector, err error) { 40 | c = &BidCollector{ 41 | log: opts.Log, 42 | opts: opts, 43 | } 44 | 45 | if c.opts.OutDir == "" { 46 | opts.Log.Fatal("outDir is required") 47 | } 48 | 49 | // inputs 50 | c.dataAPIBidC = make(chan DataAPIPollerBidsMsg, types.BidCollectorInputChannelSize) 51 | c.ultrasoundBidC = make(chan UltrasoundStreamBidsMsg, types.BidCollectorInputChannelSize) 52 | c.getHeaderBidC = make(chan GetHeaderPollerBidsMsg, types.BidCollectorInputChannelSize) 53 | 54 | // output 55 | c.processor, err = NewBidProcessor(&BidProcessorOpts{ 56 | Log: opts.Log, 57 | UID: opts.UID, 58 | OutDir: opts.OutDir, 59 | OutputTSV: opts.OutputTSV, 60 | RedisAddr: opts.RedisAddr, 61 | UseRedis: opts.UseRedis, 62 | }) 63 | return c, err 64 | } 65 | 66 | func (c *BidCollector) MustStart() { 67 | go c.processor.Start() 68 | 69 | if c.opts.CollectGetHeader { 70 | poller := NewGetHeaderPoller(&GetHeaderPollerOpts{ 71 | Log: c.log, 72 | BidC: c.getHeaderBidC, 73 | BeaconURI: c.opts.BeaconNodeURI, 74 | Relays: c.opts.Relays, 75 | }) 76 | go poller.Start() 77 | } 78 | 79 | if c.opts.CollectDataAPI { 80 | poller := NewDataAPIPoller(&DataAPIPollerOpts{ 81 | Log: c.log, 82 | BidC: c.dataAPIBidC, 83 | Relays: c.opts.Relays, 84 | }) 85 | go poller.Start() 86 | } 87 | 88 | if c.opts.CollectUltrasoundStream { 89 | ultrasoundStream := NewUltrasoundStreamConnection(UltrasoundStreamOpts{ 90 | Log: c.log, 91 | BidC: c.ultrasoundBidC, 92 | }) 93 | go ultrasoundStream.Start() 94 | } 95 | 96 | for { 97 | select { 98 | case bid := <-c.ultrasoundBidC: 99 | commonBid := UltrasoundStreamToCommonBid(&bid) 100 | c.processor.processBids([]*types.CommonBid{commonBid}) 101 | case bids := <-c.dataAPIBidC: 102 | commonBids := DataAPIToCommonBids(bids) 103 | c.processor.processBids(commonBids) 104 | case bid := <-c.getHeaderBidC: 105 | commonBid := GetHeaderToCommonBid(bid) 106 | c.processor.processBids([]*types.CommonBid{commonBid}) 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /common/ultrasoundbid_encoding.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | ssz "github.com/ferranbt/fastssz" 5 | ) 6 | 7 | // MarshalSSZ ssz marshals the UltrasoundStreamBid object 8 | func (u *UltrasoundStreamBid) MarshalSSZ() ([]byte, error) { 9 | return ssz.MarshalSSZ(u) 10 | } 11 | 12 | // MarshalSSZTo ssz marshals the UltrasoundStreamBid object to a target array 13 | func (u *UltrasoundStreamBid) MarshalSSZTo(buf []byte) (dst []byte, err error) { 14 | dst = buf 15 | 16 | // Field (0) 'Timestamp' 17 | dst = ssz.MarshalUint64(dst, u.Timestamp) 18 | 19 | // Field (1) 'Slot' 20 | dst = ssz.MarshalUint64(dst, u.Slot) 21 | 22 | // Field (2) 'BlockNumber' 23 | dst = ssz.MarshalUint64(dst, u.BlockNumber) 24 | 25 | // Field (3) 'BlockHash' 26 | dst = append(dst, u.BlockHash[:]...) 27 | 28 | // Field (4) 'ParentHash' 29 | dst = append(dst, u.ParentHash[:]...) 30 | 31 | // Field (5) 'BuilderPubkey' 32 | dst = append(dst, u.BuilderPubkey[:]...) 33 | 34 | // Field (6) 'FeeRecipient' 35 | dst = append(dst, u.FeeRecipient[:]...) 36 | 37 | // Field (7) 'Value' 38 | dst = append(dst, u.Value[:]...) 39 | 40 | return 41 | } 42 | 43 | // UnmarshalSSZ ssz unmarshals the UltrasoundStreamBid object 44 | func (u *UltrasoundStreamBid) UnmarshalSSZ(buf []byte) error { 45 | var err error 46 | size := uint64(len(buf)) 47 | if size != 188 { 48 | return ssz.ErrSize 49 | } 50 | 51 | // Field (0) 'Timestamp' 52 | u.Timestamp = ssz.UnmarshallUint64(buf[0:8]) 53 | 54 | // Field (1) 'Slot' 55 | u.Slot = ssz.UnmarshallUint64(buf[8:16]) 56 | 57 | // Field (2) 'BlockNumber' 58 | u.BlockNumber = ssz.UnmarshallUint64(buf[16:24]) 59 | 60 | // Field (3) 'BlockHash' 61 | copy(u.BlockHash[:], buf[24:56]) 62 | 63 | // Field (4) 'ParentHash' 64 | copy(u.ParentHash[:], buf[56:88]) 65 | 66 | // Field (5) 'BuilderPubkey' 67 | copy(u.BuilderPubkey[:], buf[88:136]) 68 | 69 | // Field (6) 'FeeRecipient' 70 | copy(u.FeeRecipient[:], buf[136:156]) 71 | 72 | // Field (7) 'Value' 73 | copy(u.Value[:], buf[156:188]) 74 | 75 | return err 76 | } 77 | 78 | // SizeSSZ returns the ssz encoded size in bytes for the UltrasoundStreamBid object 79 | func (u *UltrasoundStreamBid) SizeSSZ() (size int) { 80 | size = 188 81 | return 82 | } 83 | 84 | // HashTreeRoot ssz hashes the UltrasoundStreamBid object 85 | func (u *UltrasoundStreamBid) HashTreeRoot() ([32]byte, error) { 86 | return ssz.HashWithDefaultHasher(u) 87 | } 88 | 89 | // HashTreeRootWith ssz hashes the UltrasoundStreamBid object with a hasher 90 | func (u *UltrasoundStreamBid) HashTreeRootWith(hh ssz.HashWalker) (err error) { 91 | indx := hh.Index() 92 | 93 | // Field (0) 'Timestamp' 94 | hh.PutUint64(u.Timestamp) 95 | 96 | // Field (1) 'Slot' 97 | hh.PutUint64(u.Slot) 98 | 99 | // Field (2) 'BlockNumber' 100 | hh.PutUint64(u.BlockNumber) 101 | 102 | // Field (3) 'BlockHash' 103 | hh.PutBytes(u.BlockHash[:]) 104 | 105 | // Field (4) 'ParentHash' 106 | hh.PutBytes(u.ParentHash[:]) 107 | 108 | // Field (5) 'BuilderPubkey' 109 | hh.PutBytes(u.BuilderPubkey[:]) 110 | 111 | // Field (6) 'FeeRecipient' 112 | hh.PutBytes(u.FeeRecipient[:]) 113 | 114 | // Field (7) 'Value' 115 | hh.PutBytes(u.Value[:]) 116 | 117 | hh.Merkleize(indx) 118 | return 119 | } 120 | 121 | // GetTree ssz hashes the UltrasoundStreamBid object 122 | func (u *UltrasoundStreamBid) GetTree() (*ssz.Node, error) { 123 | return ssz.ProofTree(u) 124 | } 125 | -------------------------------------------------------------------------------- /services/bidcollect/webserver/webserver.go: -------------------------------------------------------------------------------- 1 | // Package webserver provides a SSE stream of new bids (via Redis subscription) 2 | package webserver 3 | 4 | import ( 5 | "context" 6 | "errors" 7 | "net/http" 8 | "sync" 9 | "time" 10 | 11 | "github.com/flashbots/relayscan/services/bidcollect/types" 12 | "github.com/go-chi/chi/v5" 13 | "github.com/redis/go-redis/v9" 14 | "github.com/sirupsen/logrus" 15 | ) 16 | 17 | type HTTPServerConfig struct { 18 | ListenAddr string 19 | RedisAddr string 20 | Log *logrus.Entry 21 | } 22 | 23 | type Server struct { 24 | cfg *HTTPServerConfig 25 | log *logrus.Entry 26 | srv *http.Server 27 | 28 | redisClient *redis.Client 29 | 30 | sseConnectionMap map[string]*SSESubscription 31 | sseConnectionLock sync.RWMutex 32 | } 33 | 34 | func New(cfg *HTTPServerConfig) (srv *Server) { 35 | srv = &Server{ 36 | cfg: cfg, 37 | log: cfg.Log, 38 | srv: nil, 39 | sseConnectionMap: make(map[string]*SSESubscription), 40 | } 41 | 42 | router := chi.NewRouter() 43 | router.Get("/v1/sse/bids", srv.handleSSESubscription) 44 | 45 | srv.srv = &http.Server{ 46 | Addr: cfg.ListenAddr, 47 | Handler: router, 48 | ReadHeaderTimeout: 1 * time.Second, 49 | } 50 | return srv 51 | } 52 | 53 | func (srv *Server) MustStart() { 54 | go srv.MustSubscribeToRedis() 55 | 56 | srv.log.WithField("listenAddress", srv.cfg.ListenAddr).Info("Starting HTTP server") 57 | if err := srv.srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { 58 | srv.log.WithField("err", err).Error("HTTP server failed") 59 | } 60 | } 61 | 62 | func (srv *Server) MustSubscribeToRedis() { 63 | if srv.cfg.RedisAddr == "" { 64 | srv.log.Fatal("Redis address is required") 65 | } 66 | 67 | srv.log.Info("Subscribing to Redis...") 68 | srv.redisClient = redis.NewClient(&redis.Options{ 69 | Addr: srv.cfg.RedisAddr, 70 | Password: "", // no password set 71 | DB: 0, // use default DB 72 | }) 73 | 74 | // Make sure we can connect to redis to connect to redis 75 | if _, err := srv.redisClient.Ping(context.Background()).Result(); err != nil { 76 | srv.log.WithError(err).Fatal("failed to ping redis") 77 | } 78 | 79 | pubsub := srv.redisClient.Subscribe(context.Background(), types.RedisChannel) 80 | ch := pubsub.Channel() 81 | srv.log.Info("Subscribed to Redis") 82 | 83 | for msg := range ch { 84 | srv.SendToSubscribers(msg.Payload) 85 | } 86 | } 87 | 88 | func (srv *Server) addSubscriber(sub *SSESubscription) { 89 | srv.sseConnectionLock.Lock() 90 | defer srv.sseConnectionLock.Unlock() 91 | srv.sseConnectionMap[sub.uid] = sub 92 | srv.log.WithField("subscribers", len(srv.sseConnectionMap)).Info("subscriber added") 93 | } 94 | 95 | func (srv *Server) removeSubscriber(sub *SSESubscription) { 96 | srv.sseConnectionLock.Lock() 97 | defer srv.sseConnectionLock.Unlock() 98 | delete(srv.sseConnectionMap, sub.uid) 99 | srv.log.WithField("subscribers", len(srv.sseConnectionMap)).Info("subscriber removed") 100 | } 101 | 102 | func (srv *Server) SendToSubscribers(msg string) { 103 | srv.sseConnectionLock.RLock() 104 | defer srv.sseConnectionLock.RUnlock() 105 | if len(srv.sseConnectionMap) == 0 { 106 | return 107 | } 108 | 109 | // Send tx to all subscribers (only if channel is not full) 110 | for _, sub := range srv.sseConnectionMap { 111 | select { 112 | case sub.msgC <- msg: 113 | default: 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /services/bidcollect/types/types.go: -------------------------------------------------------------------------------- 1 | // Package types contains various types, consts and vars for bidcollect 2 | package types 3 | 4 | import ( 5 | "fmt" 6 | "math/big" 7 | "strings" 8 | 9 | "github.com/flashbots/relayscan/common" 10 | ) 11 | 12 | var CommonBidCSVFields = []string{ 13 | "source_type", 14 | "received_at_ms", 15 | 16 | "timestamp_ms", 17 | "slot", 18 | "slot_t_ms", 19 | "value", 20 | 21 | "block_hash", 22 | "parent_hash", 23 | "builder_pubkey", 24 | "block_number", 25 | 26 | "block_fee_recipient", 27 | "relay", 28 | "proposer_pubkey", 29 | "proposer_fee_recipient", 30 | "optimistic_submission", 31 | } 32 | 33 | type CommonBid struct { 34 | // Collector-internal fields 35 | SourceType int `json:"source_type"` 36 | ReceivedAtMs int64 `json:"received_at"` 37 | 38 | // Common fields 39 | Slot uint64 `json:"slot"` 40 | BlockNumber uint64 `json:"block_number"` 41 | BlockHash string `json:"block_hash"` 42 | ParentHash string `json:"parent_hash"` 43 | BuilderPubkey string `json:"builder_pubkey"` 44 | Value string `json:"value"` 45 | 46 | // Ultrasound top-bid stream - https://github.com/ultrasoundmoney/docs/blob/main/top-bid-websocket.md 47 | BlockFeeRecipient string `json:"block_fee_recipient"` 48 | 49 | // Data API 50 | // - Ultrasound: https://relay-analytics.ultrasound.money/relay/v1/data/bidtraces/builder_blocks_received?slot=9194844 51 | // - Flashbots: https://boost-relay.flashbots.net/relay/v1/data/bidtraces/builder_blocks_received?slot=8969837 52 | Relay string `json:"relay"` 53 | TimestampMs int64 `json:"timestamp_ms"` 54 | ProposerPubkey string `json:"proposer_pubkey"` 55 | ProposerFeeRecipient string `json:"proposer_fee_recipient"` 56 | OptimisticSubmission bool `json:"optimistic_submission"` 57 | } 58 | 59 | func (bid *CommonBid) UniqueKey() string { 60 | return fmt.Sprintf("%d-%s-%s-%s-%s", bid.Slot, bid.BlockHash, bid.ParentHash, bid.BuilderPubkey, bid.Value) 61 | } 62 | 63 | func (bid *CommonBid) ValueAsBigInt() *big.Int { 64 | value := new(big.Int) 65 | value.SetString(bid.Value, 10) 66 | return value 67 | } 68 | 69 | func (bid *CommonBid) ToCSVFields() []string { 70 | bidTimestampMsString := "" 71 | bidIntoSlotTmsString := "" 72 | 73 | // If we have a timestamp, can caculate how 74 | if bid.TimestampMs > 0 { 75 | bidTimestampMsString = fmt.Sprint(bid.TimestampMs) 76 | 77 | // calculate the bid time into the slot 78 | bitIntoSlotTms := bid.TimestampMs - common.SlotToTime(bid.Slot).UnixMilli() 79 | bidIntoSlotTmsString = fmt.Sprint(bitIntoSlotTms) 80 | } 81 | 82 | // Optimistic string can be (1) empty, (2) `true` or (3) `false` 83 | bidIsOptimisticString := "" 84 | if bid.SourceType == SourceTypeDataAPI { 85 | bidIsOptimisticString = boolToString(bid.OptimisticSubmission) 86 | } 87 | 88 | return []string{ 89 | // Collector-internal fields 90 | fmt.Sprint(bid.SourceType), 91 | fmt.Sprint(bid.ReceivedAtMs), 92 | 93 | // Common fields 94 | bidTimestampMsString, 95 | fmt.Sprint(bid.Slot), 96 | bidIntoSlotTmsString, 97 | bid.Value, 98 | 99 | bid.BlockHash, 100 | bid.ParentHash, 101 | bid.BuilderPubkey, 102 | fmt.Sprint(bid.BlockNumber), 103 | 104 | // Ultrasound top-bid stream 105 | bid.BlockFeeRecipient, 106 | 107 | // Relay is common too 108 | bid.Relay, 109 | 110 | // Data API 111 | bid.ProposerPubkey, 112 | bid.ProposerFeeRecipient, 113 | bidIsOptimisticString, 114 | } 115 | } 116 | 117 | func (bid *CommonBid) ToCSVLine(separator string) string { 118 | return strings.Join(bid.ToCSVFields(), separator) 119 | } 120 | 121 | func boolToString(b bool) string { 122 | if b { 123 | return "true" 124 | } 125 | return "false" 126 | } 127 | -------------------------------------------------------------------------------- /database/typesconv.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "math/big" 5 | "time" 6 | "unicode/utf8" 7 | 8 | "github.com/flashbots/go-boost-utils/types" 9 | relaycommon "github.com/flashbots/mev-boost-relay/common" 10 | "github.com/flashbots/relayscan/common" 11 | ) 12 | 13 | func BidTraceV2JSONToPayloadDeliveredEntry(relay string, entry relaycommon.BidTraceV2JSON) DataAPIPayloadDeliveredEntry { 14 | wei, ok := new(big.Int).SetString(entry.Value, 10) 15 | if !ok { 16 | wei = big.NewInt(0) 17 | } 18 | eth := common.WeiToEth(wei) 19 | ret := DataAPIPayloadDeliveredEntry{ 20 | Relay: relay, 21 | Epoch: entry.Slot / 32, 22 | Slot: entry.Slot, 23 | ParentHash: entry.ParentHash, 24 | BlockHash: entry.BlockHash, 25 | BuilderPubkey: entry.BuilderPubkey, 26 | ProposerPubkey: entry.ProposerPubkey, 27 | ProposerFeeRecipient: entry.ProposerFeeRecipient, 28 | GasLimit: entry.GasLimit, 29 | GasUsed: entry.GasUsed, 30 | ValueClaimedWei: entry.Value, 31 | ValueClaimedEth: eth.String(), 32 | } 33 | 34 | if entry.NumTx > 0 { 35 | ret.NumTx = NewNullInt64(int64(entry.NumTx)) //nolint:gosec 36 | } 37 | 38 | if entry.BlockNumber > 0 { 39 | ret.BlockNumber = NewNullInt64(int64(entry.BlockNumber)) //nolint:gosec 40 | } 41 | return ret 42 | } 43 | 44 | func BidTraceV2WithTimestampJSONToBuilderBidEntry(relay string, entry relaycommon.BidTraceV2WithTimestampJSON) DataAPIBuilderBidEntry { 45 | ret := DataAPIBuilderBidEntry{ 46 | Relay: relay, 47 | Epoch: entry.Slot / 32, 48 | Slot: entry.Slot, 49 | ParentHash: entry.ParentHash, 50 | BlockHash: entry.BlockHash, 51 | BuilderPubkey: entry.BuilderPubkey, 52 | ProposerPubkey: entry.ProposerPubkey, 53 | ProposerFeeRecipient: entry.ProposerFeeRecipient, 54 | GasLimit: entry.GasLimit, 55 | GasUsed: entry.GasUsed, 56 | Value: entry.Value, 57 | Timestamp: time.Unix(entry.Timestamp, 0).UTC(), 58 | } 59 | 60 | if entry.NumTx > 0 { 61 | ret.NumTx = NewNullInt64(int64(entry.NumTx)) //nolint:gosec 62 | } 63 | 64 | if entry.BlockNumber > 0 { 65 | ret.BlockNumber = NewNullInt64(int64(entry.BlockNumber)) //nolint:gosec 66 | } 67 | return ret 68 | } 69 | 70 | func ExtraDataToUtf8Str(extraData types.ExtraData) string { 71 | // replace non-ascii bytes 72 | for i, b := range extraData { 73 | if b < 32 || b > 126 { 74 | extraData[i] = 32 75 | } 76 | } 77 | 78 | // convert to str 79 | if !utf8.Valid(extraData) { 80 | return "" 81 | } 82 | 83 | return string(extraData) 84 | } 85 | 86 | func SignedBuilderBidToEntry(relay string, slot uint64, parentHash, proposerPubkey string, timeRequestStart, timeRequestEnd time.Time, bid *types.SignedBuilderBid) SignedBuilderBidEntry { 87 | return SignedBuilderBidEntry{ 88 | Relay: relay, 89 | RequestedAt: timeRequestStart, 90 | ReceivedAt: timeRequestEnd, 91 | LatencyMS: timeRequestEnd.Sub(timeRequestStart).Milliseconds(), 92 | 93 | Slot: slot, 94 | ParentHash: parentHash, 95 | ProposerPubkey: proposerPubkey, 96 | 97 | Pubkey: bid.Message.Pubkey.String(), 98 | Signature: bid.Signature.String(), 99 | 100 | Value: bid.Message.Value.String(), 101 | FeeRecipient: bid.Message.Header.FeeRecipient.String(), 102 | BlockHash: bid.Message.Header.BlockHash.String(), 103 | BlockNumber: bid.Message.Header.BlockNumber, 104 | GasLimit: bid.Message.Header.GasLimit, 105 | GasUsed: bid.Message.Header.GasUsed, 106 | ExtraData: ExtraDataToUtf8Str(bid.Message.Header.ExtraData), 107 | Epoch: slot / 32, 108 | Timestamp: bid.Message.Header.Timestamp, 109 | PrevRandao: bid.Message.Header.Random.String(), 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /services/website/utils_test.go: -------------------------------------------------------------------------------- 1 | package website 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/flashbots/relayscan/database" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestConsolidateBuilderEntries(t *testing.T) { 11 | in := []*database.TopBuilderEntry{ 12 | { 13 | ExtraData: "made by builder0x69", 14 | NumBlocks: 1, 15 | Aliases: []string{"builder0x69"}, 16 | }, 17 | { 18 | ExtraData: "builder0x69", 19 | NumBlocks: 1, 20 | Aliases: []string{"builder0x69"}, 21 | }, 22 | { 23 | ExtraData: "foo-builder", 24 | NumBlocks: 1, 25 | }, 26 | } 27 | expected := []*TopBuilderDisplayEntry{ 28 | { 29 | Info: &database.TopBuilderEntry{ 30 | ExtraData: "builder0x69", 31 | NumBlocks: 2, 32 | Percent: "66.67", 33 | }, 34 | Children: []*database.TopBuilderEntry{ 35 | { 36 | ExtraData: "made by builder0x69", 37 | NumBlocks: 1, 38 | Percent: "33.33", 39 | Aliases: []string{"builder0x69"}, 40 | }, 41 | { 42 | ExtraData: "builder0x69", 43 | NumBlocks: 1, 44 | Percent: "33.33", 45 | Aliases: []string{"builder0x69"}, 46 | }, 47 | }, 48 | }, 49 | { 50 | Info: &database.TopBuilderEntry{ 51 | ExtraData: "foo-builder", 52 | NumBlocks: 1, 53 | Percent: "33.33", 54 | }, 55 | Children: []*database.TopBuilderEntry{}, 56 | }, 57 | } 58 | 59 | out := consolidateBuilderEntries(in) 60 | for i, o := range out { 61 | require.Equal(t, expected[i], o) 62 | } 63 | } 64 | 65 | func TestConsolidateBuilderProfitEntries(t *testing.T) { 66 | in := []*database.BuilderProfitEntry{ 67 | { 68 | ExtraData: "made by builder0x69", 69 | NumBlocks: 1, 70 | NumBlocksProfit: 1, 71 | ProfitTotal: "1", 72 | Aliases: []string{"builder0x69"}, 73 | }, 74 | { 75 | ExtraData: "builder0x69", 76 | NumBlocks: 1, 77 | NumBlocksProfit: 1, 78 | ProfitTotal: "1", 79 | Aliases: []string{"builder0x69"}, 80 | }, 81 | { 82 | ExtraData: "s3e6f", 83 | NumBlocks: 1, 84 | NumBlocksSubsidised: 1, 85 | SubsidiesTotal: "1", 86 | Aliases: []string{"bob the builder"}, 87 | }, 88 | { 89 | ExtraData: "s0e3f", 90 | NumBlocks: 1, 91 | NumBlocksSubsidised: 1, 92 | SubsidiesTotal: "1", 93 | Aliases: []string{"bob the builder"}, 94 | }, 95 | { 96 | ExtraData: "s12e14t", 97 | NumBlocks: 1, 98 | NumBlocksSubsidised: 1, 99 | SubsidiesTotal: "1", 100 | Aliases: []string{"bob the builder"}, 101 | }, 102 | { 103 | ExtraData: "s0e2ts10e11t", 104 | NumBlocks: 1, 105 | NumBlocksSubsidised: 1, 106 | SubsidiesTotal: "1", 107 | Aliases: []string{"bob the builder"}, 108 | }, 109 | { 110 | ExtraData: "manta-builder", 111 | NumBlocks: 1, 112 | NumBlocksProfit: 1, 113 | ProfitTotal: "3", 114 | }, 115 | } 116 | expected := []*database.BuilderProfitEntry{ 117 | { 118 | ExtraData: "manta-builder", 119 | NumBlocks: 1, 120 | NumBlocksProfit: 1, 121 | ProfitTotal: "3", 122 | }, 123 | { 124 | ExtraData: "builder0x69", 125 | NumBlocks: 2, 126 | NumBlocksProfit: 2, 127 | ProfitTotal: "2.0000", 128 | ProfitPerBlockAvg: "1.0000", 129 | SubsidiesTotal: "0.0000", 130 | Aliases: []string{"made by builder0x69", "builder0x69"}, 131 | }, 132 | { 133 | ExtraData: "bob the builder", 134 | NumBlocks: 4, 135 | NumBlocksSubsidised: 4, 136 | ProfitTotal: "0.0000", 137 | ProfitPerBlockAvg: "0.0000", 138 | SubsidiesTotal: "4.0000", 139 | Aliases: []string{"s3e6f", "s0e3f", "s12e14t", "s0e2ts10e11t"}, 140 | }, 141 | } 142 | 143 | out := consolidateBuilderProfitEntries(in) 144 | for i, o := range out { 145 | require.Equal(t, expected[i], o) 146 | } 147 | } 148 | 149 | func TestLowercaseNoWhitespace(t *testing.T) { 150 | c1 := lowercaseNoWhitespace("abCD 123!@#") 151 | require.Equal(t, "abcd123!@#", c1) 152 | } 153 | -------------------------------------------------------------------------------- /services/bidcollect/ultrasound-stream.go: -------------------------------------------------------------------------------- 1 | package bidcollect 2 | 3 | import ( 4 | "strings" 5 | "time" 6 | 7 | "github.com/ethereum/go-ethereum/common/hexutil" 8 | "github.com/flashbots/relayscan/common" 9 | "github.com/flashbots/relayscan/services/bidcollect/types" 10 | "github.com/gorilla/websocket" 11 | "github.com/sirupsen/logrus" 12 | ) 13 | 14 | type UltrasoundStreamBidsMsg struct { 15 | Bid common.UltrasoundStreamBid 16 | Relay string 17 | ReceivedAt time.Time 18 | } 19 | 20 | type UltrasoundStreamOpts struct { 21 | Log *logrus.Entry 22 | BidC chan UltrasoundStreamBidsMsg 23 | } 24 | 25 | type UltrasoundStreamConnection struct { 26 | log *logrus.Entry 27 | url string 28 | bidC chan UltrasoundStreamBidsMsg 29 | backoffSec int 30 | } 31 | 32 | func NewUltrasoundStreamConnection(opts UltrasoundStreamOpts) *UltrasoundStreamConnection { 33 | return &UltrasoundStreamConnection{ 34 | log: opts.Log, 35 | url: types.UltrasoundStreamDefaultURL, 36 | bidC: opts.BidC, 37 | backoffSec: types.InitialBackoffSec, 38 | } 39 | } 40 | 41 | func (ustream *UltrasoundStreamConnection) Start() { 42 | ustream.connect() 43 | } 44 | 45 | func (ustream *UltrasoundStreamConnection) reconnect() { 46 | backoffDuration := time.Duration(ustream.backoffSec) * time.Second 47 | ustream.log.Infof("[ultrasounds-stream] reconnecting to ultrasound stream in %s sec ...", backoffDuration.String()) 48 | time.Sleep(backoffDuration) 49 | 50 | // increase backoff timeout for next try 51 | ustream.backoffSec *= 2 52 | if ustream.backoffSec > types.MaxBackoffSec { 53 | ustream.backoffSec = types.MaxBackoffSec 54 | } 55 | 56 | ustream.connect() 57 | } 58 | 59 | func (ustream *UltrasoundStreamConnection) connect() { 60 | ustream.log.WithField("uri", ustream.url).Info("[ultrasounds-stream] Starting bid stream...") 61 | 62 | dialer := websocket.DefaultDialer 63 | wsSubscriber, resp, err := dialer.Dial(ustream.url, nil) 64 | if err != nil { 65 | ustream.log.WithError(err).Error("[ultrasounds-stream] failed to connect to bloxroute, reconnecting in a bit...") 66 | go ustream.reconnect() 67 | return 68 | } 69 | defer wsSubscriber.Close() //nolint:errcheck 70 | defer resp.Body.Close() //nolint:errcheck 71 | 72 | ustream.log.Info("[ultrasounds-stream] stream connection successful") 73 | ustream.backoffSec = types.InitialBackoffSec // reset backoff timeout 74 | 75 | bid := new(common.UltrasoundStreamBid) 76 | 77 | for { 78 | _, nextNotification, err := wsSubscriber.ReadMessage() 79 | if err != nil { 80 | // Handle websocket errors, by closing and reconnecting. Errors seen previously: 81 | ustream.log.WithError(err).Error("ultrasound stream websocket error") 82 | go ustream.reconnect() 83 | return 84 | } 85 | 86 | // nc.log.WithField("msg", hexutil.Encode(nextNotification)).Info("got message from ultrasound stream") 87 | 88 | // Unmarshal SSZ 89 | err = bid.UnmarshalSSZ(nextNotification) 90 | if err != nil { 91 | ustream.log.WithError(err).WithField("msg", hexutil.Encode(nextNotification)).Error("[ultrasounds-stream] failed to unmarshal ultrasound stream message") 92 | continue 93 | } 94 | 95 | ustream.bidC <- UltrasoundStreamBidsMsg{ 96 | Bid: *bid, 97 | Relay: "relay.ultrasound.money", 98 | ReceivedAt: time.Now().UTC(), 99 | } 100 | } 101 | } 102 | 103 | func UltrasoundStreamToCommonBid(bid *UltrasoundStreamBidsMsg) *types.CommonBid { 104 | blockHash := hexutil.Encode(bid.Bid.BlockHash[:]) 105 | parentHash := hexutil.Encode(bid.Bid.ParentHash[:]) 106 | builderPubkey := hexutil.Encode(bid.Bid.BuilderPubkey[:]) 107 | blockFeeRecipient := hexutil.Encode(bid.Bid.FeeRecipient[:]) 108 | 109 | return &types.CommonBid{ 110 | SourceType: types.SourceTypeUltrasoundStream, 111 | ReceivedAtMs: bid.ReceivedAt.UnixMilli(), 112 | 113 | TimestampMs: int64(bid.Bid.Timestamp), //nolint:gosec 114 | Slot: bid.Bid.Slot, 115 | BlockNumber: bid.Bid.BlockNumber, 116 | BlockHash: strings.ToLower(blockHash), 117 | ParentHash: strings.ToLower(parentHash), 118 | BuilderPubkey: strings.ToLower(builderPubkey), 119 | Value: bid.Bid.Value.String(), 120 | BlockFeeRecipient: strings.ToLower(blockFeeRecipient), 121 | Relay: bid.Relay, 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /cmd/util/update-extradata.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/ethereum/go-ethereum/common/hexutil" 7 | "github.com/flashbots/relayscan/database" 8 | dbvars "github.com/flashbots/relayscan/database/vars" 9 | "github.com/flashbots/relayscan/vars" 10 | "github.com/metachris/flashbotsrpc" 11 | "github.com/sirupsen/logrus" 12 | "github.com/spf13/cobra" 13 | ) 14 | 15 | var slot uint64 16 | 17 | func init() { 18 | // rootCmd.AddCommand(backfillExtradataCmd) 19 | backfillExtradataCmd.Flags().StringVar(ðNodeURI, "eth-node", vars.DefaultEthNodeURI, "eth node URI (i.e. Infura)") 20 | backfillExtradataCmd.Flags().StringVar(ðNodeBackupURI, "eth-node-backup", vars.DefaultEthBackupNodeURI, "eth node backup URI (i.e. Infura)") 21 | } 22 | 23 | var backfillExtradataCmd = &cobra.Command{ 24 | Use: "backfill-extradata", 25 | Short: "Backfill extra_data", 26 | Run: func(cmd *cobra.Command, args []string) { 27 | var err error 28 | 29 | log.Infof("Using eth node: %s", ethNodeURI) 30 | client := flashbotsrpc.New(ethNodeURI) 31 | var client2 *flashbotsrpc.FlashbotsRPC 32 | if ethNodeBackupURI != "" { 33 | log.Infof("Using eth backup node: %s", ethNodeBackupURI) 34 | client2 = flashbotsrpc.New(ethNodeBackupURI) 35 | } 36 | _, _ = client, client2 37 | 38 | // Connect to Postgres 39 | db := database.MustConnectPostgres(log, vars.DefaultPostgresDSN) 40 | 41 | entries := []database.DataAPIPayloadDeliveredEntry{} 42 | query := `SELECT id, inserted_at, relay, epoch, slot, parent_hash, block_hash, builder_pubkey, proposer_pubkey, proposer_fee_recipient, gas_limit, gas_used, value_claimed_wei, value_claimed_eth, num_tx, block_number FROM ` + dbvars.TableDataAPIPayloadDelivered + ` WHERE slot < 4823872 AND extra_data = ''` 43 | // query += ` LIMIT 1000` 44 | err = db.DB.Select(&entries, query) 45 | if err != nil { 46 | log.WithError(err).Fatalf("couldn't get entries") 47 | } 48 | 49 | log.Infof("got %d entries", len(entries)) 50 | if len(entries) == 0 { 51 | return 52 | } 53 | 54 | wg := new(sync.WaitGroup) 55 | entryC := make(chan database.DataAPIPayloadDeliveredEntry) 56 | if slot != 0 { 57 | numThreads = 1 58 | } 59 | for i := 0; i < int(numThreads); i++ { //nolint:intrange 60 | log.Infof("starting worker %d", i+1) 61 | wg.Add(1) 62 | go startBackfillWorker(wg, db, client, client2, entryC) 63 | } 64 | 65 | for _, entry := range entries { 66 | entryC <- entry 67 | } 68 | close(entryC) 69 | wg.Wait() 70 | }, 71 | } 72 | 73 | func startBackfillWorker(wg *sync.WaitGroup, db *database.DatabaseService, client, client2 *flashbotsrpc.FlashbotsRPC, entryC chan database.DataAPIPayloadDeliveredEntry) { 74 | defer wg.Done() 75 | 76 | getBlockByHash := func(blockHash string, withTransactions bool) (*flashbotsrpc.Block, error) { 77 | block, err := client.EthGetBlockByHash(blockHash, withTransactions) 78 | if err != nil || block == nil { 79 | block, err = client2.EthGetBlockByHash(blockHash, withTransactions) 80 | } 81 | return block, err 82 | } 83 | 84 | var err error 85 | var block *flashbotsrpc.Block 86 | for entry := range entryC { 87 | _log := log.WithFields(logrus.Fields{ 88 | "slot": entry.Slot, 89 | "blockNumber": entry.BlockNumber.Int64, 90 | "blockHash": entry.BlockHash, 91 | "relay": entry.Relay, 92 | }) 93 | _log.Infof("checking slot...") 94 | 95 | block, err = getBlockByHash(entry.BlockHash, true) 96 | if err != nil { 97 | _log.WithError(err).Fatalf("couldn't get block %s", entry.BlockHash) 98 | } else if block == nil { 99 | _log.WithError(err).Warnf("block not found: %s", entry.BlockHash) 100 | continue 101 | } 102 | 103 | extraDataBytes, err := hexutil.Decode(block.ExtraData) 104 | if err != nil { 105 | log.WithError(err).Errorf("failed to decode extradata %s", block.ExtraData) 106 | } else { 107 | entry.ExtraData = database.ExtraDataToUtf8Str(extraDataBytes) 108 | _log.Infof("id: %d, extradata: %s", entry.ID, entry.ExtraData) 109 | if entry.ExtraData == "" { 110 | continue 111 | } 112 | 113 | query := `UPDATE ` + dbvars.TableDataAPIPayloadDelivered + ` SET extra_data=$1 WHERE id=$2` 114 | _, err := db.DB.Exec(query, entry.ExtraData, entry.ID) 115 | if err != nil { 116 | _log.WithError(err).Fatalf("failed to save entry") 117 | } 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/flashbots/relayscan 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.1 6 | 7 | require ( 8 | github.com/NYTimes/gziphandler v1.1.1 9 | github.com/dustin/go-humanize v1.0.1 10 | github.com/ethereum/go-ethereum v1.16.7 11 | github.com/ferranbt/fastssz v0.1.4 12 | github.com/flashbots/go-boost-utils v1.6.0 13 | github.com/flashbots/go-utils v0.4.9 14 | github.com/flashbots/mev-boost-relay v1.0.0-alpha4.0.20230519091033-0453fc247553 15 | github.com/go-chi/chi/v5 v5.1.0 16 | github.com/google/uuid v1.3.0 17 | github.com/gorilla/mux v1.8.0 18 | github.com/gorilla/websocket v1.5.1 19 | github.com/jmoiron/sqlx v1.3.5 20 | github.com/lib/pq v1.10.9 21 | github.com/lithammer/shortuuid v3.0.0+incompatible 22 | github.com/metachris/flashbotsrpc v0.5.0 23 | github.com/olekukonko/tablewriter v0.0.5 24 | github.com/redis/go-redis/v9 v9.6.1 25 | github.com/rubenv/sql-migrate v1.7.0 26 | github.com/sirupsen/logrus v1.9.3 27 | github.com/spf13/cobra v1.8.1 28 | github.com/stretchr/testify v1.10.0 29 | github.com/tdewolff/minify v2.3.6+incompatible 30 | go.uber.org/atomic v1.11.0 31 | go.uber.org/zap v1.24.0 32 | golang.org/x/text v0.31.0 33 | gopkg.in/yaml.v3 v3.0.1 34 | ) 35 | 36 | require ( 37 | github.com/DataDog/zstd v1.5.5 // indirect 38 | github.com/Microsoft/go-winio v0.6.2 // indirect 39 | github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251119083800-2aa1d4cc79d7 // indirect 40 | github.com/attestantio/go-builder-client v0.3.0 // indirect 41 | github.com/attestantio/go-eth2-client v0.16.3 // indirect 42 | github.com/bits-and-blooms/bitset v1.24.4 // indirect 43 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 44 | github.com/consensys/gnark-crypto v0.19.2 // indirect 45 | github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect 46 | github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect 47 | github.com/davecgh/go-spew v1.1.1 // indirect 48 | github.com/deckarep/golang-set/v2 v2.6.0 // indirect 49 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect 50 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 51 | github.com/emicklei/dot v1.6.2 // indirect 52 | github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect 53 | github.com/ethereum/go-verkle v0.2.2 // indirect 54 | github.com/fatih/color v1.16.0 // indirect 55 | github.com/go-gorp/gorp/v3 v3.1.0 // indirect 56 | github.com/go-ole/go-ole v1.3.0 // indirect 57 | github.com/go-playground/validator/v10 v10.11.1 // indirect 58 | github.com/goccy/go-yaml v1.11.0 // indirect 59 | github.com/gofrs/flock v0.12.1 // indirect 60 | github.com/golang/snappy v1.0.0 // indirect 61 | github.com/google/go-cmp v0.6.0 // indirect 62 | github.com/holiman/uint256 v1.3.2 // indirect 63 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 64 | github.com/jarcoal/httpmock v1.2.0 // indirect 65 | github.com/klauspost/compress v1.16.5 // indirect 66 | github.com/klauspost/cpuid/v2 v2.2.4 // indirect 67 | github.com/mattn/go-colorable v0.1.13 // indirect 68 | github.com/mattn/go-isatty v0.0.20 // indirect 69 | github.com/mattn/go-runewidth v0.0.14 // indirect 70 | github.com/minio/sha256-simd v1.0.0 // indirect 71 | github.com/mitchellh/mapstructure v1.5.0 // indirect 72 | github.com/pkg/errors v0.9.1 // indirect 73 | github.com/pmezard/go-difflib v1.0.0 // indirect 74 | github.com/prometheus/client_golang v1.15.1 // indirect 75 | github.com/prometheus/client_model v0.4.0 // indirect 76 | github.com/prometheus/common v0.43.0 // indirect 77 | github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 // indirect 78 | github.com/r3labs/sse/v2 v2.10.0 // indirect 79 | github.com/rivo/uniseg v0.4.4 // indirect 80 | github.com/shirou/gopsutil v3.21.11+incompatible // indirect 81 | github.com/spf13/pflag v1.0.6 // indirect 82 | github.com/supranational/blst v0.3.16 // indirect 83 | github.com/tdewolff/parse v2.3.4+incompatible // indirect 84 | github.com/tidwall/gjson v1.14.2 // indirect 85 | github.com/tklauser/go-sysconf v0.3.12 // indirect 86 | github.com/tklauser/numcpus v0.6.1 // indirect 87 | github.com/yusufpapurcu/wmi v1.2.3 // indirect 88 | go.uber.org/multierr v1.11.0 // indirect 89 | golang.org/x/crypto v0.45.0 // indirect 90 | golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect 91 | golang.org/x/net v0.47.0 // indirect 92 | golang.org/x/sync v0.18.0 // indirect 93 | golang.org/x/sys v0.38.0 // indirect 94 | golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect 95 | gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect 96 | gopkg.in/yaml.v2 v2.4.0 // indirect 97 | ) 98 | -------------------------------------------------------------------------------- /services/bidcollect/website/devserver.go: -------------------------------------------------------------------------------- 1 | // Package website contains the service delivering the website 2 | package website 3 | 4 | // 5 | // DevServer is a simple webserver for development purposes that simulates the file listing HTMLs 6 | // 7 | 8 | import ( 9 | "encoding/json" 10 | "errors" 11 | "fmt" 12 | "net/http" 13 | _ "net/http/pprof" 14 | "time" 15 | 16 | "github.com/gorilla/mux" 17 | "github.com/sirupsen/logrus" 18 | "github.com/tdewolff/minify" 19 | "github.com/tdewolff/minify/html" 20 | uberatomic "go.uber.org/atomic" 21 | ) 22 | 23 | var ErrServerAlreadyStarted = errors.New("server was already started") 24 | 25 | type DevWebserverOpts struct { 26 | ListenAddress string 27 | Log *logrus.Entry 28 | } 29 | 30 | type DevWebserver struct { 31 | opts *DevWebserverOpts 32 | log *logrus.Entry 33 | 34 | srv *http.Server 35 | srvStarted uberatomic.Bool 36 | minifier *minify.M 37 | } 38 | 39 | func NewDevWebserver(opts *DevWebserverOpts) (server *DevWebserver, err error) { 40 | minifier := minify.New() 41 | minifier.AddFunc("text/css", html.Minify) 42 | minifier.AddFunc("text/html", html.Minify) 43 | minifier.AddFunc("application/javascript", html.Minify) 44 | 45 | server = &DevWebserver{ //nolint:exhaustruct 46 | opts: opts, 47 | log: opts.Log, 48 | minifier: minifier, 49 | } 50 | 51 | return server, nil 52 | } 53 | 54 | func (srv *DevWebserver) StartServer() (err error) { 55 | if srv.srvStarted.Swap(true) { 56 | return ErrServerAlreadyStarted 57 | } 58 | 59 | srv.srv = &http.Server{ //nolint:exhaustruct 60 | Addr: srv.opts.ListenAddress, 61 | Handler: srv.getRouter(), 62 | 63 | ReadTimeout: 600 * time.Millisecond, 64 | ReadHeaderTimeout: 400 * time.Millisecond, 65 | WriteTimeout: 3 * time.Second, 66 | IdleTimeout: 3 * time.Second, 67 | } 68 | 69 | err = srv.srv.ListenAndServe() 70 | if errors.Is(err, http.ErrServerClosed) { 71 | return nil 72 | } 73 | return err 74 | } 75 | 76 | func (srv *DevWebserver) getRouter() http.Handler { 77 | r := mux.NewRouter() 78 | r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir("./website/static")))) 79 | 80 | r.HandleFunc("/", srv.handleRoot).Methods(http.MethodGet) 81 | r.HandleFunc("/index.html", srv.handleRoot).Methods(http.MethodGet) 82 | r.HandleFunc("/ethereum/mainnet/{month}/index.html", srv.handleMonth).Methods(http.MethodGet) 83 | 84 | return r 85 | } 86 | 87 | func (srv *DevWebserver) RespondError(w http.ResponseWriter, code int, message string) { 88 | w.Header().Set("Content-Type", "application/json") 89 | w.WriteHeader(code) 90 | resp := HTTPErrorResp{code, message} 91 | if err := json.NewEncoder(w).Encode(resp); err != nil { 92 | srv.log.WithError(err).Error("Couldn't write error response") 93 | http.Error(w, "", http.StatusInternalServerError) 94 | } 95 | } 96 | 97 | func (srv *DevWebserver) RespondOK(w http.ResponseWriter, response any) { 98 | w.Header().Set("Content-Type", "application/json") 99 | w.WriteHeader(http.StatusOK) 100 | if err := json.NewEncoder(w).Encode(response); err != nil { 101 | srv.log.WithError(err).Error("Couldn't write OK response") 102 | http.Error(w, "", http.StatusInternalServerError) 103 | } 104 | } 105 | 106 | func (srv *DevWebserver) handleRoot(w http.ResponseWriter, req *http.Request) { 107 | tpl, err := ParseIndexTemplate() 108 | if err != nil { 109 | srv.log.WithError(err).Error("wroot: error parsing template") 110 | return 111 | } 112 | w.WriteHeader(http.StatusOK) 113 | 114 | data := *DummyHTMLData 115 | data.Path = "/" 116 | err = tpl.ExecuteTemplate(w, "base", data) 117 | if err != nil { 118 | srv.log.WithError(err).Error("wroot: error executing template") 119 | return 120 | } 121 | } 122 | 123 | func (srv *DevWebserver) handleMonth(w http.ResponseWriter, req *http.Request) { 124 | vars := mux.Vars(req) 125 | 126 | layout := "2006-01" 127 | _, err := time.Parse(layout, vars["month"]) 128 | if err != nil { 129 | srv.RespondError(w, http.StatusBadRequest, "invalid date") 130 | return 131 | } 132 | 133 | tpl, err := ParseFilesTemplate() 134 | if err != nil { 135 | srv.log.WithError(err).Error("wroot: error parsing template") 136 | return 137 | } 138 | w.WriteHeader(http.StatusOK) 139 | 140 | data := *DummyHTMLData 141 | data.Title = vars["month"] 142 | data.Path = fmt.Sprintf("ethereum/mainnet/%s/index.html", vars["month"]) 143 | 144 | err = tpl.ExecuteTemplate(w, "base", &data) 145 | if err != nil { 146 | srv.log.WithError(err).Error("wroot: error executing template") 147 | return 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /common/utils.go: -------------------------------------------------------------------------------- 1 | // Package common includes common utilities 2 | package common 3 | 4 | import ( 5 | "encoding/json" 6 | "math/big" 7 | "net/http" 8 | "net/url" 9 | "runtime" 10 | "strings" 11 | "time" 12 | 13 | "github.com/dustin/go-humanize" 14 | "github.com/ethereum/go-ethereum/params" 15 | "github.com/flashbots/mev-boost-relay/beaconclient" 16 | "github.com/flashbots/relayscan/vars" 17 | "github.com/sirupsen/logrus" 18 | ) 19 | 20 | func Check(err error) { 21 | if err != nil { 22 | panic(err) 23 | } 24 | } 25 | 26 | // GetURI returns the full request URI with scheme, host, path and args. 27 | func GetURI(url *url.URL, path string) string { 28 | u2 := *url 29 | u2.User = nil 30 | u2.Path = path 31 | return u2.String() 32 | } 33 | 34 | func GetURIWithQuery(url *url.URL, path string, queryArgs map[string]string) string { 35 | u2 := *url 36 | u2.User = nil 37 | u2.Path = path 38 | q := u2.Query() 39 | for key, value := range queryArgs { 40 | q.Add(key, value) 41 | } 42 | u2.RawQuery = q.Encode() 43 | return u2.String() 44 | } 45 | 46 | func EthToWei(eth *big.Int) *big.Float { 47 | if eth == nil { 48 | return big.NewFloat(0) 49 | } 50 | return new(big.Float).Quo(new(big.Float).SetInt(eth), new(big.Float).SetInt(big.NewInt(params.Ether))) 51 | } 52 | 53 | func PercentDiff(x, y *big.Int) *big.Float { 54 | fx := new(big.Float).SetInt(x) 55 | fy := new(big.Float).SetInt(y) 56 | r := new(big.Float).Quo(fy, fx) 57 | return new(big.Float).Sub(r, big.NewFloat(1)) 58 | } 59 | 60 | func WeiToEth(wei *big.Int) (ethValue *big.Float) { 61 | // wei / 10^18 62 | fbalance := new(big.Float) 63 | fbalance.SetString(wei.String()) 64 | ethValue = new(big.Float).Quo(fbalance, big.NewFloat(1e18)) 65 | return 66 | } 67 | 68 | func WeiStrToEthStr(wei string, decimals int) string { 69 | weiBigInt := new(big.Int) 70 | weiBigInt.SetString(wei, 10) 71 | ethValue := WeiToEth(weiBigInt) 72 | return ethValue.Text('f', decimals) 73 | } 74 | 75 | func WeiToEthStr(wei *big.Int) string { 76 | return WeiToEth(wei).Text('f', 6) 77 | } 78 | 79 | func StrToBigInt(s string) *big.Int { 80 | i := new(big.Int) 81 | i.SetString(s, 10) 82 | return i 83 | } 84 | 85 | func StringSliceContains(haystack []string, needle string) bool { 86 | for _, entry := range haystack { 87 | if entry == needle { 88 | return true 89 | } 90 | } 91 | return false 92 | } 93 | 94 | func TimeToSlot(t time.Time) uint64 { 95 | return uint64((t.Unix() - int64(vars.Genesis)) / 12) //nolint:gosec 96 | } 97 | 98 | func SlotToTime(slot uint64) time.Time { 99 | timestamp := (slot * 12) + uint64(vars.Genesis) //nolint:gosec 100 | return time.Unix(int64(timestamp), 0).UTC() //nolint:gosec 101 | } 102 | 103 | func MustParseDateTimeStr(s string) time.Time { 104 | layout1 := "2006-01-02" 105 | layout2 := "2006-01-02 15:04" 106 | t, err := time.Parse(layout1, s) 107 | if err != nil { 108 | t, err = time.Parse(layout2, s) 109 | Check(err) 110 | } 111 | return t 112 | } 113 | 114 | func BeginningOfDay(t time.Time) time.Time { 115 | year, month, day := t.Date() 116 | return time.Date(year, month, day, 0, 0, 0, 0, t.Location()) 117 | } 118 | 119 | func MustConnectBeaconNode(log *logrus.Entry, beaconNodeURI string, allowSyncing bool) (bn *beaconclient.ProdBeaconInstance, headSlot uint64) { 120 | bn = beaconclient.NewProdBeaconInstance(log, beaconNodeURI) 121 | syncStatus, err := bn.SyncStatus() 122 | Check(err) 123 | if syncStatus.IsSyncing && !allowSyncing { 124 | panic("beacon node is syncing") 125 | } 126 | return bn, syncStatus.HeadSlot 127 | } 128 | 129 | func ReverseBytes(src []byte) []byte { 130 | dst := make([]byte, len(src)) 131 | copy(dst, src) 132 | for i := len(dst)/2 - 1; i >= 0; i-- { 133 | opp := len(dst) - 1 - i 134 | dst[i], dst[opp] = dst[opp], dst[i] 135 | } 136 | return dst 137 | } 138 | 139 | func GetMemMB() uint64 { 140 | var m runtime.MemStats 141 | runtime.ReadMemStats(&m) 142 | return m.Alloc / 1024 / 1024 143 | } 144 | 145 | // HumanBytes returns size in the same format as AWS S3 146 | func HumanBytes(n uint64) string { 147 | s := humanize.IBytes(n) 148 | s = strings.Replace(s, "KiB", "KB", 1) 149 | s = strings.Replace(s, "MiB", "MB", 1) 150 | s = strings.Replace(s, "GiB", "GB", 1) 151 | return s 152 | } 153 | 154 | func getJSON(url string, target interface{}) error { 155 | r, err := http.DefaultClient.Get(url) 156 | if err != nil { 157 | return err 158 | } 159 | defer r.Body.Close() //nolint:errcheck 160 | return json.NewDecoder(r.Body).Decode(target) 161 | } 162 | 163 | func MustGetLatestSlot() uint64 { 164 | url := "https://beaconcha.in/api/v1/slot/latest" 165 | var c struct { 166 | Data struct { 167 | Slot uint64 `json:"slot"` 168 | } 169 | } 170 | err := getJSON(url, &c) 171 | Check(err) 172 | return c.Data.Slot 173 | } 174 | -------------------------------------------------------------------------------- /services/bidcollect/website/templates/base.html: -------------------------------------------------------------------------------- 1 | {{ define "base" }} 2 | 3 | {{ $title:="Relayscan.io Bid Archive 📚" }} 4 | 5 | {{ if ne .Title "" }} 6 | {{ $title = (printf "%v | %v" .Title $title) }} 7 | {{ end }} 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | {{ $title }} 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 101 | 106 | 107 | 108 | 109 |
110 | 111 | 112 | 113 |

{{ $title }}

114 |

115 | Docs · github.com/flashbots/relayscan 116 |

117 |

Illuminate, Democratize, Distribute

118 |
119 |
120 | {{ template "content" . }} 121 |
122 | 123 | 124 | 125 | {{ end }} -------------------------------------------------------------------------------- /services/bidcollect/website/generator.go: -------------------------------------------------------------------------------- 1 | package website 2 | 3 | // 4 | // Quick and dirty website generator 5 | // 6 | 7 | import ( 8 | "bytes" 9 | "fmt" 10 | "os" 11 | "os/exec" 12 | "path/filepath" 13 | "regexp" 14 | "strconv" 15 | "strings" 16 | 17 | "github.com/sirupsen/logrus" 18 | "github.com/tdewolff/minify" 19 | "github.com/tdewolff/minify/css" 20 | "github.com/tdewolff/minify/html" 21 | ) 22 | 23 | func BuildProdWebsite(log *logrus.Entry, outDir string, upload bool) { 24 | log.Infof("Creating build server in %s", outDir) 25 | err := os.MkdirAll(outDir, os.ModePerm) 26 | if err != nil { 27 | log.Fatal(err) 28 | } 29 | 30 | dir := "ethereum/mainnet/" 31 | 32 | // Setup minifier 33 | minifier := minify.New() 34 | minifier.AddFunc("text/html", html.Minify) 35 | minifier.AddFunc("text/css", css.Minify) 36 | 37 | // Load month folders from S3 38 | log.Infof("Getting folders from S3 for %s ...", dir) 39 | months, err := getFoldersFromS3(dir) 40 | if err != nil { 41 | log.Fatal(err) 42 | } 43 | fmt.Println("Months:", months) 44 | 45 | // build root page 46 | log.Infof("Building root page ...") 47 | rootPageData := HTMLData{ //nolint:exhaustruct 48 | Title: "", 49 | Path: "/index.html", 50 | EthMainnetMonths: months, 51 | } 52 | 53 | tpl, err := ParseIndexTemplate() 54 | if err != nil { 55 | log.Fatal(err) 56 | } 57 | 58 | buf := new(bytes.Buffer) 59 | err = tpl.ExecuteTemplate(buf, "base", rootPageData) 60 | if err != nil { 61 | log.Fatal(err) 62 | } 63 | 64 | // minify 65 | mBytes, err := minifier.Bytes("text/html", buf.Bytes()) 66 | if err != nil { 67 | log.Fatal(err) 68 | } 69 | 70 | // write to file 71 | fn := filepath.Join(outDir, "index.html") 72 | log.Infof("Writing to %s ...", fn) 73 | err = os.WriteFile(fn, mBytes, 0o0600) 74 | if err != nil { 75 | log.Fatal(err) 76 | } 77 | 78 | toUpload := []struct{ from, to string }{ 79 | {fn, "/"}, 80 | } 81 | 82 | // build files pages 83 | for _, month := range months { 84 | dir := "ethereum/mainnet/" + month + "/" 85 | log.Infof("Getting files from S3 for %s ...", dir) 86 | files, err := getFilesFromS3(dir) 87 | if err != nil { 88 | log.Fatal(err) 89 | } 90 | 91 | rootPageData := HTMLData{ //nolint:exhaustruct 92 | Title: month, 93 | Path: fmt.Sprintf("ethereum/mainnet/%s/index.html", month), 94 | 95 | CurrentNetwork: "Ethereum Mainnet", 96 | CurrentMonth: month, 97 | Files: files, 98 | } 99 | 100 | tpl, err := ParseFilesTemplate() 101 | if err != nil { 102 | log.Fatal(err) 103 | } 104 | 105 | buf := new(bytes.Buffer) 106 | err = tpl.ExecuteTemplate(buf, "base", rootPageData) 107 | if err != nil { 108 | log.Fatal(err) 109 | } 110 | 111 | // minify 112 | mBytes, err := minifier.Bytes("text/html", buf.Bytes()) 113 | if err != nil { 114 | log.Fatal(err) 115 | } 116 | 117 | // write to file 118 | _outDir := filepath.Join(outDir, dir) 119 | err = os.MkdirAll(_outDir, os.ModePerm) 120 | if err != nil { 121 | log.Fatal(err) 122 | } 123 | 124 | fn := filepath.Join(_outDir, "index.html") 125 | log.Infof("Writing to %s ...", fn) 126 | err = os.WriteFile(fn, mBytes, 0o0600) 127 | if err != nil { 128 | log.Fatal(err) 129 | } 130 | 131 | toUpload = append(toUpload, struct{ from, to string }{fn, "/" + dir}) 132 | } 133 | 134 | if upload { 135 | log.Info("Uploading to S3 ...") 136 | // for _, file := range toUpload { 137 | // fmt.Printf("- %s -> %s\n", file.from, file.to) 138 | // } 139 | 140 | for _, file := range toUpload { 141 | app := "./scripts/bidcollect/s3/upload-file-to-r2.sh" 142 | cmd := exec.Command(app, file.from, file.to) //nolint:gosec 143 | stdout, err := cmd.Output() 144 | if err != nil { 145 | log.Fatal(err) 146 | } 147 | fmt.Println(string(stdout)) 148 | } 149 | } 150 | } 151 | 152 | func getFoldersFromS3(dir string) ([]string, error) { 153 | folders := []string{} 154 | 155 | app := "./scripts/bidcollect/s3/get-folders.sh" 156 | cmd := exec.Command(app, dir) 157 | stdout, err := cmd.Output() 158 | if err != nil { 159 | return folders, err 160 | } 161 | 162 | // Print the output 163 | lines := strings.Split(string(stdout), "\n") 164 | for _, line := range lines { 165 | if line != "" && strings.HasPrefix(line, "20") { 166 | folders = append(folders, strings.TrimSuffix(line, "/")) 167 | } 168 | } 169 | return folders, nil 170 | } 171 | 172 | func getFilesFromS3(month string) ([]FileEntry, error) { 173 | files := []FileEntry{} 174 | 175 | app := "./scripts/bidcollect/s3/get-files.sh" 176 | cmd := exec.Command(app, month) 177 | stdout, err := cmd.Output() 178 | if err != nil { 179 | return files, err 180 | } 181 | 182 | space := regexp.MustCompile(`\s+`) 183 | lines := strings.Split(string(stdout), "\n") 184 | for _, line := range lines { 185 | if line != "" { 186 | line = space.ReplaceAllString(line, " ") 187 | parts := strings.Split(line, " ") 188 | 189 | // parts[2] is the size 190 | size, err := strconv.ParseUint(parts[2], 10, 64) 191 | if err != nil { 192 | return files, err 193 | } 194 | 195 | filename := parts[3] 196 | 197 | if filename == "index.html" { 198 | continue 199 | } else if strings.HasSuffix(filename, ".csv.gz") { 200 | continue 201 | } 202 | 203 | files = append(files, FileEntry{ 204 | Filename: filename, 205 | Size: size, 206 | Modified: parts[1] + " " + parts[0], 207 | }) 208 | } 209 | } 210 | return files, nil 211 | } 212 | -------------------------------------------------------------------------------- /cmd/service/bidcollect.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | /** 4 | * https://github.com/ultrasoundmoney/docs/blob/main/top-bid-websocket.md 5 | */ 6 | 7 | import ( 8 | "github.com/flashbots/relayscan/common" 9 | "github.com/flashbots/relayscan/services/bidcollect" 10 | "github.com/flashbots/relayscan/services/bidcollect/webserver" 11 | "github.com/flashbots/relayscan/services/bidcollect/website" 12 | "github.com/flashbots/relayscan/vars" 13 | "github.com/lithammer/shortuuid" 14 | "github.com/spf13/cobra" 15 | ) 16 | 17 | var ( 18 | collectUltrasoundStream bool 19 | collectGetHeader bool 20 | collectDataAPI bool 21 | useAllRelays bool 22 | 23 | outDir string 24 | outputTSV bool // by default: CSV, but can be changed to TSV with this setting 25 | uid string // used in output filenames, to avoid collissions between multiple collector instances 26 | 27 | useRedis bool 28 | redisAddr string 29 | 30 | runDevServerOnly bool // used to play with file listing website 31 | devServerListenAddr string 32 | 33 | buildWebsite bool 34 | buildWebsiteUpload bool 35 | buildWebsiteOutDir string 36 | 37 | runWebserverOnly bool // provides a SSE stream of new bids 38 | WebserverListenAddr string 39 | ) 40 | 41 | func init() { 42 | bidCollectCmd.Flags().BoolVar(&collectUltrasoundStream, "ultrasound-stream", false, "use ultrasound top-bid stream") 43 | bidCollectCmd.Flags().BoolVar(&collectGetHeader, "get-header", false, "use getHeader API") 44 | bidCollectCmd.Flags().BoolVar(&collectDataAPI, "data-api", false, "use data API") 45 | bidCollectCmd.Flags().BoolVar(&useAllRelays, "all-relays", false, "use all relays") 46 | 47 | // for getHeader 48 | bidCollectCmd.Flags().StringVar(&beaconNodeURI, "beacon-uri", vars.DefaultBeaconURI, "beacon endpoint") 49 | 50 | // for saving to file 51 | bidCollectCmd.Flags().StringVar(&outDir, "out", "csv", "output directory for CSV/TSV") 52 | bidCollectCmd.Flags().BoolVar(&outputTSV, "out-tsv", false, "output as TSV (instead of CSV)") 53 | 54 | // utils 55 | bidCollectCmd.Flags().StringVar(&uid, "uid", "", "unique identifier for output files (to avoid collisions)") 56 | 57 | // Redis for pushing bids to 58 | bidCollectCmd.Flags().BoolVar(&useRedis, "redis", false, "Publish bids to Redis") 59 | bidCollectCmd.Flags().StringVar(&redisAddr, "redis-addr", "localhost:6379", "Redis address for publishing bids (optional)") 60 | 61 | // Webserver mode 62 | bidCollectCmd.Flags().BoolVar(&runWebserverOnly, "webserver", false, "only run webserver for SSE stream") 63 | bidCollectCmd.Flags().StringVar(&WebserverListenAddr, "webserver-addr", "localhost:8080", "listen address for webserver") 64 | 65 | // devserver provides the file listing for playing with file HTML 66 | bidCollectCmd.Flags().BoolVar(&runDevServerOnly, "devserver", false, "only run devserver to play with file listing website") 67 | bidCollectCmd.Flags().StringVar(&devServerListenAddr, "devserver-addr", "localhost:8095", "listen address for devserver") 68 | 69 | // building the S3 website 70 | bidCollectCmd.Flags().BoolVar(&buildWebsite, "build-website", false, "build file listing website") 71 | bidCollectCmd.Flags().BoolVar(&buildWebsiteUpload, "build-website-upload", false, "upload after building") 72 | bidCollectCmd.Flags().StringVar(&buildWebsiteOutDir, "build-website-out", "build", "output directory for website") 73 | } 74 | 75 | var bidCollectCmd = &cobra.Command{ 76 | Use: "bidcollect", 77 | Short: "Collect bids", 78 | Run: func(cmd *cobra.Command, args []string) { 79 | if runWebserverOnly { 80 | srv := webserver.New(&webserver.HTTPServerConfig{ 81 | ListenAddr: WebserverListenAddr, 82 | RedisAddr: redisAddr, 83 | Log: log, 84 | }) 85 | srv.MustStart() 86 | return 87 | } 88 | if runDevServerOnly { 89 | log.Infof("Bidcollect %s devserver starting on %s ...", vars.Version, devServerListenAddr) 90 | fileListingDevServer() 91 | return 92 | } 93 | 94 | if buildWebsite { 95 | log.Infof("Bidcollect %s building website (output: %s) ...", vars.Version, buildWebsiteOutDir) 96 | website.BuildProdWebsite(log, buildWebsiteOutDir, buildWebsiteUpload) 97 | return 98 | } 99 | 100 | if uid == "" { 101 | uid = shortuuid.New()[:6] 102 | } 103 | 104 | log.WithField("uid", uid).Infof("Bidcollect %s starting ...", vars.Version) 105 | 106 | // Prepare relays 107 | relays := []common.RelayEntry{ 108 | common.MustNewRelayEntry(vars.RelayFlashbots, false), 109 | common.MustNewRelayEntry(vars.RelayUltrasound, false), 110 | } 111 | if useAllRelays { 112 | relays = common.MustGetRelays() 113 | } 114 | 115 | log.Infof("Using %d relays", len(relays)) 116 | for index, relay := range relays { 117 | log.Infof("- relay #%d: %s", index+1, relay.Hostname()) 118 | } 119 | 120 | opts := bidcollect.BidCollectorOpts{ 121 | Log: log, 122 | UID: uid, 123 | Relays: relays, 124 | CollectUltrasoundStream: collectUltrasoundStream, 125 | CollectGetHeader: collectGetHeader, 126 | CollectDataAPI: collectDataAPI, 127 | BeaconNodeURI: beaconNodeURI, 128 | OutDir: outDir, 129 | OutputTSV: outputTSV, 130 | RedisAddr: redisAddr, 131 | UseRedis: useRedis, 132 | } 133 | 134 | bidCollector, err := bidcollect.NewBidCollector(&opts) 135 | if err != nil { 136 | log.WithError(err).Fatal("failed to create bid collector") 137 | } 138 | bidCollector.MustStart() 139 | }, 140 | } 141 | 142 | func fileListingDevServer() { 143 | webserver, err := website.NewDevWebserver(&website.DevWebserverOpts{ //nolint:exhaustruct 144 | ListenAddress: devServerListenAddr, 145 | Log: log, 146 | }) 147 | if err != nil { 148 | log.Fatal(err) 149 | } 150 | err = webserver.StartServer() 151 | if err != nil { 152 | log.Fatal(err) 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /cmd/service/backfill_runner.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "os" 5 | "os/signal" 6 | "syscall" 7 | "time" 8 | 9 | "github.com/ethereum/go-ethereum/ethclient" 10 | "github.com/flashbots/relayscan/cmd/core" 11 | "github.com/flashbots/relayscan/common" 12 | "github.com/flashbots/relayscan/database" 13 | "github.com/flashbots/relayscan/vars" 14 | "github.com/spf13/cobra" 15 | ) 16 | 17 | var ( 18 | runnerInterval time.Duration 19 | runnerEthNodeURI string 20 | runnerEthBackupURI string 21 | runnerLimit uint64 22 | runnerNumThreads uint64 23 | runnerRunOnce bool 24 | runnerSkipBackfill bool 25 | runnerSkipCheckValue bool 26 | runnerRelay string 27 | runnerMinSlot int64 28 | ) 29 | 30 | func init() { 31 | backfillRunnerCmd.Flags().DurationVar(&runnerInterval, "interval", time.Duration(vars.DefaultBackfillRunnerInterval)*time.Minute, "interval between runs") 32 | backfillRunnerCmd.Flags().StringVar(&runnerEthNodeURI, "eth-node", vars.DefaultEthNodeURI, "eth node URI") 33 | backfillRunnerCmd.Flags().StringVar(&runnerEthBackupURI, "eth-node-backup", vars.DefaultEthBackupNodeURI, "eth backup node URI") 34 | backfillRunnerCmd.Flags().Uint64Var(&runnerLimit, "limit", 1000, "limit for check-payload-value") 35 | backfillRunnerCmd.Flags().Uint64Var(&runnerNumThreads, "threads", uint64(vars.DefaultBackfillRunnerNumThreads), "number of threads for check-payload-value") 36 | backfillRunnerCmd.Flags().BoolVar(&runnerRunOnce, "once", false, "run once and exit") 37 | backfillRunnerCmd.Flags().BoolVar(&runnerSkipBackfill, "skip-backfill", false, "skip data-api-backfill step") 38 | backfillRunnerCmd.Flags().BoolVar(&runnerSkipCheckValue, "skip-check-value", false, "skip check-payload-value step") 39 | backfillRunnerCmd.Flags().StringVar(&runnerRelay, "relay", "", "specific relay only (e.g. 'fb', 'us', or full URL)") 40 | backfillRunnerCmd.Flags().Int64Var(&runnerMinSlot, "min-slot", 0, "minimum slot (negative for offset from latest)") 41 | } 42 | 43 | var backfillRunnerCmd = &cobra.Command{ 44 | Use: "backfill-runner", 45 | Short: "Continuously run data-api-backfill and check-payload-value", 46 | Run: func(cmd *cobra.Command, args []string) { 47 | var err error 48 | var relays []common.RelayEntry 49 | 50 | log.Infof("Relayscan backfill-runner %s starting...", vars.Version) 51 | log.Infof("Interval: %s", runnerInterval) 52 | 53 | // Get relays 54 | if runnerRelay != "" { 55 | var relayEntry common.RelayEntry 56 | switch runnerRelay { 57 | case "fb": 58 | relayEntry, err = common.NewRelayEntry(vars.RelayURLs[0], false) 59 | case "us": 60 | relayEntry, err = common.NewRelayEntry(vars.RelayURLs[1], false) 61 | default: 62 | relayEntry, err = common.NewRelayEntry(runnerRelay, false) 63 | } 64 | if err != nil { 65 | log.WithField("relay", runnerRelay).WithError(err).Fatal("failed to decode relay") 66 | } 67 | relays = []common.RelayEntry{relayEntry} 68 | } else { 69 | relays, err = common.GetRelays() 70 | if err != nil { 71 | log.WithError(err).Fatal("failed to get relays") 72 | } 73 | } 74 | log.Infof("Using %d relays", len(relays)) 75 | for i, relay := range relays { 76 | log.Infof("- relay #%d: %s", i+1, relay.Hostname()) 77 | } 78 | 79 | if runnerMinSlot != 0 { 80 | log.Infof("Using min-slot: %d", runnerMinSlot) 81 | } 82 | 83 | // Connect to Postgres 84 | db := database.MustConnectPostgres(log, vars.DefaultPostgresDSN) 85 | 86 | // Connect to eth nodes 87 | var ethClient, ethClient2 *ethclient.Client 88 | if !runnerSkipCheckValue { 89 | if runnerEthNodeURI == "" { 90 | log.Fatal("eth-node is required for check-payload-value") 91 | } 92 | ethClient, err = ethclient.Dial(runnerEthNodeURI) 93 | if err != nil { 94 | log.WithError(err).Fatalf("failed to connect to eth node: %s", runnerEthNodeURI) 95 | } 96 | log.Infof("Connected to eth node: %s", runnerEthNodeURI) 97 | 98 | ethClient2 = ethClient 99 | if runnerEthBackupURI != "" { 100 | ethClient2, err = ethclient.Dial(runnerEthBackupURI) 101 | if err != nil { 102 | log.WithError(err).Fatalf("failed to connect to backup eth node: %s", runnerEthBackupURI) 103 | } 104 | log.Infof("Connected to backup eth node: %s", runnerEthBackupURI) 105 | } 106 | } 107 | 108 | // Prepare check-payload-value options 109 | checkOpts := core.CheckPayloadValueOpts{ 110 | Limit: runnerLimit, 111 | NumThreads: runnerNumThreads, 112 | } 113 | 114 | // Run function 115 | runBackfillCycle := func() { 116 | log.Info("Starting backfill cycle...") 117 | 118 | // Step 1: data-api-backfill 119 | if !runnerSkipBackfill { 120 | log.Info("Running data-api-backfill...") 121 | err := core.RunBackfill(db, relays, 0, runnerMinSlot) 122 | if err != nil { 123 | log.WithError(err).Error("data-api-backfill failed") 124 | } 125 | } 126 | 127 | // Step 2: check-payload-value 128 | if !runnerSkipCheckValue { 129 | log.Info("Running check-payload-value...") 130 | err := core.RunCheckPayloadValue(db, ethClient, ethClient2, checkOpts) 131 | if err != nil { 132 | log.WithError(err).Error("check-payload-value failed") 133 | } 134 | } 135 | 136 | log.Info("Backfill cycle complete") 137 | } 138 | 139 | // Run once immediately 140 | runBackfillCycle() 141 | 142 | if runnerRunOnce { 143 | log.Info("Run once mode, exiting") 144 | return 145 | } 146 | 147 | // Set up signal handling 148 | sigC := make(chan os.Signal, 1) 149 | signal.Notify(sigC, syscall.SIGINT, syscall.SIGTERM) 150 | 151 | // Run on interval 152 | ticker := time.NewTicker(runnerInterval) 153 | defer ticker.Stop() 154 | 155 | log.Infof("Waiting for next run in %s...", runnerInterval) 156 | 157 | for { 158 | select { 159 | case <-ticker.C: 160 | runBackfillCycle() 161 | log.Infof("Waiting for next run in %s...", runnerInterval) 162 | case sig := <-sigC: 163 | log.Infof("Received signal %s, shutting down...", sig) 164 | return 165 | } 166 | } 167 | }, 168 | } 169 | -------------------------------------------------------------------------------- /services/bidcollect/data-api-poller.go: -------------------------------------------------------------------------------- 1 | package bidcollect 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "strings" 8 | "time" 9 | 10 | relaycommon "github.com/flashbots/mev-boost-relay/common" 11 | "github.com/flashbots/relayscan/common" 12 | "github.com/flashbots/relayscan/services/bidcollect/types" 13 | "github.com/sirupsen/logrus" 14 | ) 15 | 16 | type DataAPIPollerBidsMsg struct { 17 | Bids []relaycommon.BidTraceV2WithTimestampJSON 18 | Relay common.RelayEntry 19 | ReceivedAt time.Time 20 | } 21 | 22 | type DataAPIPollerOpts struct { 23 | Log *logrus.Entry 24 | BidC chan DataAPIPollerBidsMsg 25 | Relays []common.RelayEntry 26 | } 27 | 28 | type DataAPIPoller struct { 29 | Log *logrus.Entry 30 | BidC chan DataAPIPollerBidsMsg 31 | Relays []common.RelayEntry 32 | } 33 | 34 | func NewDataAPIPoller(opts *DataAPIPollerOpts) *DataAPIPoller { 35 | return &DataAPIPoller{ 36 | Log: opts.Log, 37 | BidC: opts.BidC, 38 | Relays: opts.Relays, 39 | } 40 | } 41 | 42 | func (poller *DataAPIPoller) Start() { 43 | poller.Log.WithField("relays", common.RelayEntriesToHostnameStrings(poller.Relays)).Info("Starting DataAPIPoller ...") 44 | 45 | // initially, wait until start of next slot 46 | t := time.Now().UTC() 47 | slot := common.TimeToSlot(t) 48 | nextSlot := slot + 1 49 | tNextSlot := common.SlotToTime(nextSlot) 50 | untilNextSlot := tNextSlot.Sub(t) 51 | 52 | poller.Log.Infof("[data-api poller] waiting until start of next slot (%d - %s from now)", nextSlot, untilNextSlot.String()) 53 | time.Sleep(untilNextSlot) 54 | 55 | // then run polling loop 56 | for { 57 | // calculate next slot details 58 | t := time.Now().UTC() 59 | slot := common.TimeToSlot(t) 60 | nextSlot := slot + 1 61 | tNextSlot := common.SlotToTime(nextSlot) 62 | untilNextSlot := tNextSlot.Sub(t) 63 | 64 | poller.Log.Infof("[data-api poller] scheduling polling for upcoming slot: %d (%s - in %s)", nextSlot, tNextSlot.String(), untilNextSlot.String()) 65 | 66 | // Schedule polling at t-4, t-2, t=0, t=2, t=20 67 | go poller.pollRelaysForBids(nextSlot, -4*time.Second) 68 | go poller.pollRelaysForBids(nextSlot, -2*time.Second) 69 | go poller.pollRelaysForBids(nextSlot, -500*time.Millisecond) 70 | go poller.pollRelaysForBids(nextSlot, 500*time.Millisecond) 71 | go poller.pollRelaysForBids(nextSlot, 2*time.Second) 72 | go poller.pollRelaysForBids(nextSlot, 20*time.Second) 73 | 74 | // wait until next slot 75 | time.Sleep(untilNextSlot) 76 | } 77 | } 78 | 79 | // pollRelaysForBids will poll data api for given slot with t seconds offset 80 | func (poller *DataAPIPoller) pollRelaysForBids(slot uint64, tOffset time.Duration) { 81 | tSlotStart := common.SlotToTime(slot) 82 | tStart := tSlotStart.Add(tOffset) 83 | waitTime := tStart.Sub(time.Now().UTC()) 84 | 85 | // poller.Log.Debugf("[data-api poller] - prepare polling for slot %d t %d (tSlotStart: %s, tStart: %s, waitTime: %s)", slot, t, tSlotStart.String(), tStart.String(), waitTime.String()) 86 | if waitTime < 0 { 87 | poller.Log.Debugf("[data-api poller] waitTime is negative: %s", waitTime.String()) 88 | return 89 | } 90 | 91 | // Wait until expected time 92 | time.Sleep(waitTime) 93 | 94 | // Poll for bids now 95 | untilSlot := tSlotStart.Sub(time.Now().UTC()) 96 | poller.Log.Debugf("[data-api poller] polling for slot %d at t=%s (tNow=%s)", slot, tOffset.String(), (untilSlot * -1).String()) 97 | 98 | for _, relay := range poller.Relays { 99 | go poller._pollRelayForBids(slot, relay, tOffset) 100 | } 101 | } 102 | 103 | func (poller *DataAPIPoller) _pollRelayForBids(slot uint64, relay common.RelayEntry, t time.Duration) { 104 | // log := poller.Log.WithField("relay", relay.Hostname()).WithField("slot", slot) 105 | log := poller.Log.WithFields(logrus.Fields{ 106 | "relay": relay.Hostname(), 107 | "slot": slot, 108 | "t": t.String(), 109 | }) 110 | // log.Debugf("[data-api poller] polling relay %s for slot %d", relay.Hostname(), slot) 111 | 112 | // build query URL 113 | path := "/relay/v1/data/bidtraces/builder_blocks_received" 114 | url := common.GetURIWithQuery(relay.URL, path, map[string]string{"slot": fmt.Sprintf("%d", slot)}) 115 | // log.Debugf("[data-api poller] Querying %s", url) 116 | 117 | // start query 118 | var data []relaycommon.BidTraceV2WithTimestampJSON 119 | timeRequestStart := time.Now().UTC() 120 | code, err := common.SendHTTPRequest(context.Background(), *http.DefaultClient, http.MethodGet, url, nil, &data) 121 | timeRequestEnd := time.Now().UTC() 122 | if err != nil { 123 | log.WithError(err).Error("[data-api poller] failed to get data") 124 | return 125 | } 126 | log = log.WithFields(logrus.Fields{"code": code, "entries": len(data), "durationMs": timeRequestEnd.Sub(timeRequestStart).Milliseconds()}) 127 | log.Debug("[data-api poller] request complete") 128 | 129 | // send data to channel 130 | poller.BidC <- DataAPIPollerBidsMsg{Bids: data, Relay: relay, ReceivedAt: time.Now().UTC()} 131 | } 132 | 133 | func DataAPIToCommonBids(bids DataAPIPollerBidsMsg) []*types.CommonBid { 134 | commonBids := make([]*types.CommonBid, 0, len(bids.Bids)) 135 | for _, bid := range bids.Bids { 136 | // ensure it works even if some relays don't provide the timestamp in ms by converting regular timestamp to ms 137 | bidTimestampMs := bid.TimestampMs 138 | if bidTimestampMs == 0 && bid.Timestamp > 0 { 139 | bidTimestampMs = bid.Timestamp * 1000 140 | } 141 | 142 | commonBids = append(commonBids, &types.CommonBid{ 143 | SourceType: types.SourceTypeDataAPI, 144 | ReceivedAtMs: bids.ReceivedAt.UnixMilli(), 145 | 146 | TimestampMs: bidTimestampMs, 147 | Slot: bid.Slot, 148 | BlockNumber: bid.BlockNumber, 149 | BlockHash: strings.ToLower(bid.BlockHash), 150 | ParentHash: strings.ToLower(bid.ParentHash), 151 | BuilderPubkey: strings.ToLower(bid.BuilderPubkey), 152 | Value: bid.Value, 153 | Relay: bids.Relay.Hostname(), 154 | ProposerPubkey: strings.ToLower(bid.ProposerPubkey), 155 | ProposerFeeRecipient: strings.ToLower(bid.ProposerFeeRecipient), 156 | OptimisticSubmission: bid.OptimisticSubmission, 157 | }) 158 | } 159 | return commonBids 160 | } 161 | -------------------------------------------------------------------------------- /database/types.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "database/sql" 5 | "time" 6 | ) 7 | 8 | var ( 9 | BuilderStatsEntryTypeExtraData = "extra_data" 10 | BuilderStatsEntryTypeBuilderPubkey = "builder_pubkey" 11 | ) 12 | 13 | func NewNullBool(b bool) sql.NullBool { 14 | return sql.NullBool{ 15 | Bool: b, 16 | Valid: true, 17 | } 18 | } 19 | 20 | func NewNullInt64(i int64) sql.NullInt64 { 21 | return sql.NullInt64{ 22 | Int64: i, 23 | Valid: true, 24 | } 25 | } 26 | 27 | func NewNullString(s string) sql.NullString { 28 | return sql.NullString{ 29 | String: s, 30 | Valid: true, 31 | } 32 | } 33 | 34 | func NewNullTime(t time.Time) sql.NullTime { 35 | return sql.NullTime{ 36 | Time: t, 37 | Valid: true, 38 | } 39 | } 40 | 41 | type DataAPIPayloadDeliveredEntry struct { 42 | ID int64 `db:"id"` 43 | InsertedAt time.Time `db:"inserted_at"` 44 | Relay string `db:"relay"` 45 | 46 | Epoch uint64 `db:"epoch"` 47 | Slot uint64 `db:"slot"` 48 | 49 | ParentHash string `db:"parent_hash"` 50 | BlockHash string `db:"block_hash"` 51 | BuilderPubkey string `db:"builder_pubkey"` 52 | ProposerPubkey string `db:"proposer_pubkey"` 53 | ProposerFeeRecipient string `db:"proposer_fee_recipient"` 54 | GasLimit uint64 `db:"gas_limit"` 55 | GasUsed uint64 `db:"gas_used"` 56 | ValueClaimedWei string `db:"value_claimed_wei"` 57 | ValueClaimedEth string `db:"value_claimed_eth"` 58 | NumTx sql.NullInt64 `db:"num_tx"` 59 | BlockNumber sql.NullInt64 `db:"block_number"` 60 | ExtraData string `db:"extra_data"` 61 | 62 | FoundOnChain sql.NullBool `db:"found_onchain"` 63 | SlotWasMissed sql.NullBool `db:"slot_missed"` 64 | 65 | ValueCheckOk sql.NullBool `db:"value_check_ok"` 66 | ValueCheckMethod sql.NullString `db:"value_check_method"` 67 | 68 | ValueDeliveredWei sql.NullString `db:"value_delivered_wei"` 69 | ValueDeliveredEth sql.NullString `db:"value_delivered_eth"` 70 | ValueDeliveredDiffWei sql.NullString `db:"value_delivered_diff_wei"` 71 | ValueDeliveredDiffEth sql.NullString `db:"value_delivered_diff_eth"` 72 | BlockCoinbaseAddress sql.NullString `db:"block_coinbase_addr"` 73 | BlockCoinbaseIsProposer sql.NullBool `db:"block_coinbase_is_proposer"` 74 | CoinbaseDiffWei sql.NullString `db:"coinbase_diff_wei"` 75 | CoinbaseDiffEth sql.NullString `db:"coinbase_diff_eth"` 76 | Notes sql.NullString `db:"notes"` 77 | 78 | // Blob info added 2024-07-25 79 | NumBlobTxs sql.NullInt64 `db:"num_blob_txs"` 80 | NumBlobs sql.NullInt64 `db:"num_blobs"` 81 | 82 | // Block time added 2024-07-26 83 | BlockTimestamp sql.NullTime `db:"block_timestamp"` 84 | } 85 | 86 | type DataAPIBuilderBidEntry struct { 87 | ID int64 `db:"id"` 88 | InsertedAt time.Time `db:"inserted_at"` 89 | Relay string `db:"relay"` 90 | 91 | Epoch uint64 `db:"epoch"` 92 | Slot uint64 `db:"slot"` 93 | 94 | ParentHash string `db:"parent_hash"` 95 | BlockHash string `db:"block_hash"` 96 | BuilderPubkey string `db:"builder_pubkey"` 97 | ProposerPubkey string `db:"proposer_pubkey"` 98 | ProposerFeeRecipient string `db:"proposer_fee_recipient"` 99 | GasLimit uint64 `db:"gas_limit"` 100 | GasUsed uint64 `db:"gas_used"` 101 | Value string `db:"value"` 102 | NumTx sql.NullInt64 `db:"num_tx"` 103 | BlockNumber sql.NullInt64 `db:"block_number"` 104 | Timestamp time.Time `db:"timestamp"` 105 | } 106 | 107 | type SignedBuilderBidEntry struct { 108 | ID int64 `db:"id"` 109 | InsertedAt time.Time `db:"inserted_at"` 110 | 111 | Relay string `db:"relay"` 112 | RequestedAt time.Time `db:"requested_at"` 113 | ReceivedAt time.Time `db:"received_at"` 114 | LatencyMS int64 `db:"duration_ms"` 115 | 116 | Slot uint64 `db:"slot"` 117 | ParentHash string `db:"parent_hash"` 118 | ProposerPubkey string `db:"proposer_pubkey"` 119 | 120 | Pubkey string `db:"pubkey"` 121 | Signature string `db:"signature"` 122 | 123 | Value string `db:"value"` 124 | FeeRecipient string `db:"fee_recipient"` 125 | BlockHash string `db:"block_hash"` 126 | BlockNumber uint64 `db:"block_number"` 127 | GasLimit uint64 `db:"gas_limit"` 128 | GasUsed uint64 `db:"gas_used"` 129 | ExtraData string `db:"extra_data"` 130 | PrevRandao string `db:"prev_randao"` 131 | Timestamp uint64 `db:"timestamp"` 132 | Epoch uint64 `db:"epoch"` 133 | } 134 | 135 | type BlockBuilderEntry struct { 136 | ID int64 `db:"id"` 137 | InsertedAt time.Time `db:"inserted_at"` 138 | BuilderPubkey string `db:"builder_pubkey"` 139 | Description string `db:"description"` 140 | } 141 | 142 | type TopRelayEntry struct { 143 | Relay string `db:"relay" json:"relay"` 144 | NumPayloads uint64 `db:"payloads" json:"num_payloads"` 145 | Percent string `json:"percent"` 146 | } 147 | 148 | type TopBuilderEntry struct { 149 | ExtraData string `db:"extra_data" json:"extra_data"` 150 | NumBlocks uint64 `db:"blocks" json:"num_blocks"` 151 | Percent string `json:"percent"` 152 | Aliases []string `json:"aliases,omitempty"` 153 | } 154 | 155 | // type RelayProfitability struct { 156 | // Relay string `db:"relay" json:"relay"` 157 | // TimeSince time.Time 158 | // TimeUntil time.Time 159 | // NumPayloads uint64 `db:"payloads" json:"num_payloads"` 160 | // } 161 | 162 | type BuilderProfitEntry struct { 163 | ExtraData string `db:"extra_data" json:"extra_data"` 164 | Aliases []string `json:"aliases,omitempty"` 165 | 166 | NumBlocks uint64 `db:"blocks" json:"num_blocks"` 167 | NumBlocksProfit uint64 `db:"blocks_profit" json:"num_blocks_profit"` 168 | NumBlocksSubsidised uint64 `db:"blocks_sub" json:"num_blocks_sub"` 169 | 170 | ProfitPerBlockAvg string `db:"avg_profit_per_block" json:"avg_profit_per_block"` 171 | ProfitPerBlockMedian string `db:"median_profit_per_block" json:"median_profit_per_block"` 172 | 173 | ProfitTotal string `db:"total_profit" json:"profit_total"` 174 | SubsidiesTotal string `db:"total_subsidies" json:"subsidies_total"` 175 | } 176 | 177 | type BuilderStatsEntry struct { 178 | ID int64 `db:"id"` 179 | InsertedAt time.Time `db:"inserted_at"` 180 | 181 | Type string `db:"type" json:"type"` 182 | Hours int `db:"hours" json:"hours"` 183 | 184 | TimeStart time.Time `db:"time_start" json:"time_start"` 185 | TimeEnd time.Time `db:"time_end" json:"time_end"` 186 | BuilderName string `db:"builder_name" json:"builder_name"` 187 | 188 | ExtraData string `db:"extra_data" json:"extra_data"` 189 | BuilderPubkeys string `db:"builder_pubkeys" json:"builder_pubkeys"` 190 | BlocksIncluded int `db:"blocks_included" json:"blocks_included"` 191 | } 192 | type TmpPayloadsForExtraDataEntry struct { 193 | Slot uint64 `db:"slot"` 194 | ExtraData string `db:"extra_data"` 195 | InsertedAt time.Time `db:"inserted_at"` 196 | BlockTimestamp sql.NullTime `db:"block_timestamp"` 197 | } 198 | -------------------------------------------------------------------------------- /docs/2024-06_bidcollect.md: -------------------------------------------------------------------------------- 1 | # Relayscan Bid Archive 📚 2 | 3 | Relayscan.io collects a full, public archive of bids across [relays](../vars/relays.go). 4 | 5 | **https://bidarchive.relayscan.io** 6 | 7 | --- 8 | 9 | ### Output 10 | 11 | For every day, there are two CSV files: 12 | 1. All bids 13 | 2. Top bids only 14 | 15 | ### Bids source types 16 | 17 | - `0`: [GetHeader polling](https://ethereum.github.io/builder-specs/#/Builder/getHeader) 18 | - `1`: [Data API polling](https://flashbots.github.io/relay-specs/#/Data/getReceivedBids) 19 | - `2`: [Ultrasound top-bid websocket stream](https://github.com/ultrasoundmoney/docs/blob/main/top-bid-websocket.md) 20 | 21 | ### Collected fields 22 | 23 | | Field | Description | Source Types | 24 | | ------------------------ | ---------------------------------------------------------- | ------------ | 25 | | `source_type` | 0: GetHeader, 1: Data API, 2: Ultrasound stream | all | 26 | | `received_at_ms` | When the bid was first received by the relayscan collector | all | 27 | | `timestamp_ms` | When the bid was received by the relay | 1 + 2 | 28 | | `slot` | Slot the bid was submitted for | all | 29 | | `slot_t_ms` | How late into the slot the bid was received by the relay | 1 + 2 | 30 | | `value` | Bid value in wei | all | 31 | | `block_hash` | Block hash | all | 32 | | `parent_hash` | Parent hash | all | 33 | | `builder_pubkey` | Builder pubkey | 1 + 2 | 34 | | `block_number` | Block number | all | 35 | | `block_fee_recipient` | Block fee recipient | 2 | 36 | | `relay` | Relay name | all | 37 | | `proposer_pubkey` | Proposer pubkey | 1 | 38 | | `proposer_fee_recipient` | Proposer fee recipient | 1 | 39 | | `optimistic_submission` | Optimistic submission flag | 1 | 40 | 41 | ### See also 42 | 43 | - Live data: https://bidarchive.relayscan.io 44 | - [Pull request #37](https://github.com/flashbots/relayscan/pull/37) 45 | - [Example output](https://gist.github.com/metachris/061c0443afb8b8d07eed477a848fa395) 46 | 47 | --- 48 | 49 | ## Notes on data sources 50 | 51 | Source types: 52 | - `0`: `GetHeader` polling 53 | - `1`: Data API polling 54 | - `2`: Ultrasound top-bid Websockets stream 55 | 56 | Different data sources have different limitations: 57 | 58 | - `GetHeader` polling ([code](/services/bidcollect/getheader-poller.go)): 59 | - The received header only has limited information, with these implications: 60 | - Optimistic is always `false` 61 | - No `builder_pubkey` 62 | - No bid timestamp (need to use receive timestamp) 63 | - GetHeader bid timestamps are always when the response from polling at t=1s comes back (but not when the bid was received at a relay) 64 | - Some relays only allow a single `GetHeader` request per slot, so we time it at `t=1s` 65 | - Data API polling ([code](/services/bidcollect/data-api-poller.go): 66 | - Has all the necessary information 67 | - Due to rate limits, we only poll at specific times 68 | - Polling at t-4, t-2, t-0.5, t+0.5, t+2 (see also [`/services/bidcollect/data-api-poller.go`](/services/bidcollect/data-api-poller.go#64-69)) 69 | - Ultrasound websocket stream ([code](/services/bidcollect/ultrasound-stream.go): 70 | - doesn't expose optimistic, thus that field is always `false` 71 | 72 | ## Other notes 73 | 74 | - Bids are deduplicated based on this key: 75 | - `fmt.Sprintf("%d-%s-%s-%s-%s", bid.Slot, bid.BlockHash, bid.ParentHash, bid.BuilderPubkey, bid.Value)` 76 | - this means only the first bid for a given key is stored, even if - for instance - other relays also deliver the same bid 77 | - Bids can be published to Redis (to be consumed by whatever, i.e. a webserver). The channel is called `bidcollect/bids`. 78 | - Enable publishing to Redis with the `--redis` flag 79 | - You can start a webserver that publishes the data via a SSE stream with `--webserver` 80 | 81 | --- 82 | 83 | ## Running it 84 | 85 | By default, the collector will output CSV into `//.csv` 86 | 87 | ```bash 88 | # Start data API and ultrasound stream collectors 89 | go run . service bidcollect --data-api --ultrasound-stream --all-relays 90 | 91 | # GetHeader needs a beacon node too 92 | go run . service bidcollect --get-header --beacon-uri http://localhost:3500 --all-relays 93 | ``` 94 | 95 | Publish new bids to Redis: 96 | 97 | ```bash 98 | # Start Redis 99 | docker run --name redis -d -p 6379:6379 redis 100 | 101 | # Start the collector with the `--redis ` flag: 102 | go run . service bidcollect --data-api --ultrasound-stream --redis 103 | 104 | # Subscribe to the `bidcollect/bids` channel 105 | redis-cli SUBSCRIBE bidcollect/bids 106 | ``` 107 | 108 | SSE stream of bids via the built-in webserver: 109 | 110 | ```bash 111 | # Start the webserver in another process to subscribe to Redis and publish bids as SSE stream: 112 | go run . service bidcollect --webserver 113 | 114 | # Check if it works by subscribing with curl 115 | curl localhost:8080/v1/sse/bids 116 | ``` 117 | 118 | --- 119 | 120 | ## Useful Clickhouse queries 121 | 122 | Useful [clickhouse-local](https://clickhouse.com/docs/en/operations/utilities/clickhouse-local) queries: 123 | 124 | ```bash 125 | # Set the CSV filename, for ease of reuse across queries 126 | $ fn=2024-06-12_top.csv 127 | 128 | # Count different source types in file 129 | $ clickhouse local -q "SELECT source_type, COUNT(source_type) FROM '$fn' GROUP BY source_type ORDER BY source_type;" 130 | 0 2929 131 | 1 21249 132 | 2 1057722 133 | 134 | # Count optimistic mode 135 | $ clickhouse local -q "SELECT optimistic_submission, COUNT(optimistic_submission) FROM '$fn' WHERE optimistic_submission IS NOT NULL GROUP BY optimistic_submission;" 136 | 137 | # Count bids with >1 ETH in value, by builder 138 | $ clickhouse local -q "SELECT builder_pubkey, count(builder_pubkey) as count, quantile(0.5)(value) as p50, quantile(0.75)(value) as p75, quantile(0.9)(value) as p90, max(value) FROM '$fn' WHERE value > 1000000000000000000 AND builder_pubkey != '' GROUP BY builder_pubkey ORDER BY count DESC FORMAT TabSeparatedWithNames;" 139 | 140 | # Get bids > 1 ETH for specific builders 141 | $ clickhouse local -q "SELECT count(value), quantile(0.5)(value) as p50, quantile(0.75)(value) as p75, quantile(0.9)(value) as p90, max(value) FROM '$fn' WHERE value > 1000000000000000000 AND builder_pubkey IN ('0x...', '0x...', '0x...') FORMAT TabSeparatedWithNames;" 142 | ``` 143 | 144 | --- 145 | 146 | ## Architecture 147 | 148 | ![Architecture](./img/bidcollect-overview.png) 149 | 150 | 151 | --- 152 | 153 | ## TODO 154 | 155 | - Dockerization -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # relayscan 2 | 3 | [![Goreport status](https://goreportcard.com/badge/github.com/flashbots/relayscan)](https://goreportcard.com/report/github.com/flashbots/relayscan) 4 | [![Test status](https://github.com/flashbots/relayscan/actions/workflows/checks.yml/badge.svg?branch=main)](https://github.com/flashbots/relayscan/actions?query=workflow%3A%22Checks%22) 5 | [![Docker hub](https://badgen.net/docker/size/flashbots/relayscan?icon=docker&label=image)](https://hub.docker.com/r/flashbots/relayscan/tags) 6 | 7 | Monitoring, analytics & data for Ethereum MEV-Boost builders and relays 8 | 9 | **Running on https://relayscan.io** 10 | 11 | Additional URLs: 12 | 13 | - Builder profit: 14 | - Last 24h: https://www.relayscan.io/builder-profit?t=24h 15 | - Last 7d: https://www.relayscan.io/builder-profit?t=7d 16 | - Stats in markdown: 17 | - https://www.relayscan.io/overview/md 18 | - https://www.relayscan.io/builder-profit/md 19 | - Stats in JSON: 20 | - https://www.relayscan.io/overview/json 21 | - https://www.relayscan.io/overview/json?t=7d 22 | - https://www.relayscan.io/builder-profit/json 23 | - https://www.relayscan.io/builder-profit/json?t=7d 24 | - Daily stats: 25 | - https://www.relayscan.io/stats/day/2023-06-20 26 | - https://www.relayscan.io/stats/day/2023-06-20/json 27 | 28 | **Bid Archive** 29 | 30 | https://bidarchive.relayscan.io 31 | 32 | ## Notes 33 | 34 | - Work in progress 35 | - At it's core, a set of tools to fill and show a postgres database 36 | - Multiple relays can serve a payload for the same slot (if the winning builder sent the best bid to multiple relays, and the proposer asks for a payload from all of them) 37 | - Comments and feature requests: [@relayscan_io](https://twitter.com/relayscan_io) 38 | - License: AGPL 39 | - Maintainer: [@metachris](https://twitter.com/metachris) 40 | 41 | --- 42 | 43 | ## Overview 44 | 45 | * Uses PostgreSQL as data store 46 | * Configuration: 47 | * Relays and builder addresses in [`config-mainnet.yaml`](/config-mainnet.yaml) 48 | * Builder aliases in [`/vars/builder_aliases.go`](/vars/builder_aliases.go) 49 | * Version and common env vars in [`/vars/vars.go`](/vars/vars.go) 50 | * Some environment variables are required, see [`.env.example`](/.env.example) 51 | 52 | ### Config file 53 | 54 | Relay URLs and builder addresses are configured via a YAML config file. By default, `config-mainnet.yaml` is used. 55 | 56 | ```bash 57 | # Use default config (config-mainnet.yaml) 58 | ./relayscan 59 | 60 | # Use a custom config file 61 | ./relayscan --config config-hoodi.yaml 62 | 63 | # Or via environment variable 64 | CONFIG_FILE=config-hoodi.yaml ./relayscan 65 | ``` 66 | * Saving and checking payloads is split into phases/commands: 67 | * [`data-api-backfill`](/cmd/core/data-api-backfill.go) -- queries the data API of all relays and puts that data into the database 68 | * [`check-payload-value`](/cmd/core/check-payload-value.go) -- checks all new database entries for payment validity 69 | * [`update-builder-stats`](/cmd/core/update-builder-stats.go) -- create daily builder stats and save to database 70 | 71 | 72 | ## Getting started 73 | 74 | ### Run 75 | 76 | You can either build relayscan from the repository, or use the Docker image: 77 | 78 | ```bash 79 | # Build & run 80 | make build 81 | ./relayscan help 82 | ./relayscan version 83 | 84 | # Run with Docker 85 | docker run flashbots/relayscan 86 | docker run flashbots/relayscan /app/relayscan version 87 | ``` 88 | 89 | More example commands: 90 | 91 | ```bash 92 | # Grab delivered payloads from relays data API, and fill up database 93 | ./relayscan core data-api-backfill # for all slots since the merge 94 | ./relayscan core data-api-backfill --min-slot 9590900 # since a given slot (good for dev/testing) 95 | 96 | # Double-check new entries for valid payments (and other) 97 | ./relayscan core check-payload-value 98 | 99 | # Update daily builder inclusion stats 100 | ./relayscan core update-builder-stats --start 2023-06-04 --end 2023-06-06 # update daily stats for 2023-06-04 and 2023-06-05 101 | ./relayscan core update-builder-stats --start 2023-06-04 # update daily stats for 2023-06-04 until today 102 | ./relayscan core update-builder-stats --backfill # update daily stats since last entry, until today 103 | 104 | # Start the website (--dev reloads the template on every page load, for easier iteration) 105 | ./relayscan service website --dev 106 | 107 | # 108 | # backfill-runner: Backfill + Check Service 109 | # - a single service to continuously run these 110 | # - default interval: 5 minutes 111 | # 112 | # Test with just one relay (flashbots) 113 | ./relayscan service backfill-runner --relay fb 114 | 115 | # Test with ultrasound relay and limited slots (last 50 slots) 116 | ./relayscan service backfill-runner --relay us --min-slot -50 117 | 118 | # Combine flags for quick testing 119 | ./relayscan service backfill-runner --relay fb --min-slot -50 --skip-check-value 120 | 121 | # Custom interval 122 | ./relayscan service backfill-runner --interval 10m 123 | 124 | # Run once and exit (useful for testing) 125 | ./relayscan service backfill-runner --once 126 | 127 | # Skip one of the steps 128 | ./relayscan service backfill-runner --skip-backfill 129 | ./relayscan service backfill-runner --skip-check-value 130 | ``` 131 | 132 | ### Test & development 133 | 134 | Start by filling the DB with relay data (delivered payloads), and checking it: 135 | 136 | ```bash 137 | # Copy .env.example to .env.local, update ETH_NODE_URI and source it 138 | source .env.local 139 | 140 | # Start Postgres Docker container 141 | make dev-postgres-start 142 | 143 | # Query only a single relay, and for the shortest time possible 144 | go run . core data-api-backfill --relay us --min-slot -2000 145 | 146 | # Now the DB has data, check it (and update in DB) 147 | go run . core check-payload-value 148 | 149 | # Can also check a single slot only: 150 | go run . core check-payload-value --slot 151 | 152 | # Run the website 153 | go run . service website --dev 154 | 155 | # Simplify working with read-only DB or large amount of data: 156 | DB_DONT_APPLY_SCHEMA=1 SKIP_7D_STATS=1 go run . service website --dev 157 | 158 | # Now you can open http://localhost:9060 in your browser and see the data 159 | open http://localhost:9060 160 | 161 | # You can also reset the database: 162 | make dev-postgres-wipe 163 | 164 | # See the Makefile for more commands 165 | make help 166 | ``` 167 | 168 | For linting and testing: 169 | 170 | ```bash 171 | # Install dependencies 172 | go install mvdan.cc/gofumpt@latest 173 | go install honnef.co/go/tools/cmd/staticcheck@v0.4.3 174 | go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 175 | 176 | # Lint and test 177 | make lint 178 | make test 179 | make test-race 180 | 181 | # Format the code 182 | make fmt 183 | ``` 184 | 185 | 186 | ### Updating relayscan 187 | 188 | Notes for updating relayscan: 189 | 190 | - Relay payloads are selected by `inserted_at`. When adding a new relay, you probably want to manually subtract a day from `inserted_at` so they don't show up all for today (`UPDATE mainnet_data_api_payload_delivered SET inserted_at = inserted_at - INTERVAL '1 DAY' WHERE relay='newrelay.xyz';`). See also https://github.com/flashbots/relayscan/issues/28 191 | -------------------------------------------------------------------------------- /services/website/webserver_data.go: -------------------------------------------------------------------------------- 1 | package website 2 | 3 | import ( 4 | "bytes" 5 | "database/sql" 6 | "errors" 7 | "fmt" 8 | "time" 9 | 10 | "github.com/flashbots/relayscan/common" 11 | "github.com/flashbots/relayscan/database" 12 | "github.com/sirupsen/logrus" 13 | ) 14 | 15 | func (srv *Webserver) startRootHTMLUpdateLoops() { 16 | // kick off latest slot update 17 | go srv.latestSlotUpdateLoop() 18 | 19 | // kick off 24h update 20 | go srv.rootDataUpdateLoop(24) 21 | 22 | // kick off 12h update 23 | go srv.rootDataUpdateLoop(12) 24 | 25 | // kick off 1h update 26 | go srv.rootDataUpdateLoop(1) 27 | 28 | // kick off 7d update 29 | if envSkip7dStats { 30 | srv.log.Info("skipping 7d stats") 31 | } else { 32 | go srv.rootDataUpdateLoop(7 * 24) 33 | } 34 | } 35 | 36 | func (srv *Webserver) latestSlotUpdateLoop() { 37 | for { 38 | lastPayload, err := srv.db.GetLatestDeliveredPayload() 39 | if errors.Is(err, sql.ErrNoRows) { 40 | srv.log.Info("No last delivered payload entry found") 41 | time.Sleep(1 * time.Minute) 42 | continue 43 | } else if err != nil { 44 | srv.log.WithError(err).Error("Failed to get last delivered payload entry") 45 | time.Sleep(1 * time.Minute) 46 | continue 47 | } 48 | 49 | // Process the latest entry 50 | srv.latestSlot.Store(lastPayload.Slot) 51 | srv.log.WithFields(logrus.Fields{ 52 | "slot": lastPayload.Slot, 53 | }).Infof("Latest database entry found for slot %d", lastPayload.Slot) 54 | 55 | // Wait a bit before checking again 56 | time.Sleep(1 * time.Minute) 57 | } 58 | } 59 | 60 | func (srv *Webserver) rootDataUpdateLoop(hours int) { 61 | for { 62 | startTime := time.Now() 63 | srv.log.Infof("updating %dh stats...", hours) 64 | 65 | // Get data from database 66 | stats, err := srv.getStatsForHours(time.Duration(hours) * time.Hour) 67 | if err != nil { 68 | srv.log.WithError(err).Errorf("Failed to get stats for %dh", hours) 69 | continue 70 | } 71 | 72 | srv.log.WithField("duration", time.Since(startTime).String()).Infof("updated %dh stats", hours) 73 | 74 | // Generate HTML 75 | overviewBytes, profitBytes, err := srv._renderRootHTML(stats) 76 | if err != nil { 77 | srv.log.WithError(err).Error("Failed to render root HTML") 78 | continue 79 | } 80 | 81 | // Save the HTML 82 | htmlKeyOverview := fmt.Sprintf("%s-overview", stats.TimeStr) 83 | htmlKeyProfit := fmt.Sprintf("%s-builder-profit", stats.TimeStr) 84 | 85 | srv.dataLock.Lock() 86 | srv.stats[stats.TimeStr] = stats 87 | srv.html[htmlKeyOverview] = &overviewBytes 88 | srv.html[htmlKeyProfit] = &profitBytes 89 | srv.dataLock.Unlock() 90 | 91 | // Wait a bit and then continue 92 | time.Sleep(1 * time.Minute) 93 | } 94 | } 95 | 96 | func (srv *Webserver) getStatsForHours(duration time.Duration) (stats *Stats, err error) { 97 | hours := int(duration.Hours()) 98 | 99 | timeStr := fmt.Sprintf("%dh", hours) 100 | if hours == 168 { 101 | timeStr = "7d" 102 | } 103 | 104 | until := time.Now().UTC() 105 | since := until.Add(-1 * duration.Abs()) 106 | log := srv.log.WithFields(logrus.Fields{ 107 | "since": since, 108 | "until": until, 109 | "hours": hours, 110 | }) 111 | 112 | log.Debug("- loading top relays...") 113 | startTime := time.Now() 114 | topRelays, err := srv.db.GetTopRelays(since, until) 115 | if err != nil { 116 | return nil, err 117 | } 118 | log.WithField("duration", time.Since(startTime).String()).Debug("- got top relays") 119 | 120 | log.Debug("- loading top builders...") 121 | startTime = time.Now() 122 | topBuilders, err := srv.db.GetTopBuilders(since, until, "") 123 | if err != nil { 124 | return nil, err 125 | } 126 | log.WithField("duration", time.Since(startTime).String()).Debug("- got top builders") 127 | 128 | log.Debug("- loading builder profits...") 129 | startTime = time.Now() 130 | builderProfits, err := srv.db.GetBuilderProfits(since, until) 131 | if err != nil { 132 | return nil, err 133 | } 134 | log.WithField("duration", time.Since(startTime).String()).Debug("- got builder profits") 135 | 136 | stats = &Stats{ 137 | Since: since, 138 | Until: until, 139 | TimeStr: timeStr, 140 | 141 | TopRelays: prepareRelaysEntries(topRelays), 142 | TopBuilders: consolidateBuilderEntries(topBuilders), 143 | BuilderProfits: consolidateBuilderProfitEntries(builderProfits), 144 | TopBuildersByRelay: make(map[string][]*TopBuilderDisplayEntry), 145 | } 146 | 147 | // Query builders for each relay 148 | log.Debug("- loading builders per relay...") 149 | startTime = time.Now() 150 | for _, relay := range topRelays { 151 | topBuildersForRelay, err := srv.db.GetTopBuilders(since, until, relay.Relay) 152 | if err != nil { 153 | return nil, err 154 | } 155 | stats.TopBuildersByRelay[relay.Relay] = consolidateBuilderEntries(topBuildersForRelay) 156 | } 157 | log.WithField("duration", time.Since(startTime).String()).Debug("- got builders per relay") 158 | return stats, nil 159 | } 160 | 161 | func (srv *Webserver) _renderRootHTML(stats *Stats) (overviewBytes, profitBytes []byte, err error) { 162 | latestSlotInDB := srv.latestSlot.Load() 163 | latestSlotInDBTime := common.SlotToTime(latestSlotInDB) 164 | 165 | // Render the HTML for overview 166 | htmlBuf := bytes.Buffer{} 167 | htmlData := &HTMLData{ 168 | Title: "MEV-Boost Relay & Builder Stats", 169 | View: "overview", 170 | TimeSpans: timespans, 171 | TimeSpan: stats.TimeStr, 172 | Stats: stats, 173 | LastUpdateSlot: latestSlotInDB, 174 | LastUpdateTime: latestSlotInDBTime, 175 | LastUpdateTimeStr: latestSlotInDBTime.Format("2006-01-02 15:04"), 176 | } 177 | 178 | // Render the template & minify 179 | if err := srv.templateIndex.ExecuteTemplate(&htmlBuf, "base", htmlData); err != nil { 180 | srv.log.WithError(err).Error("error rendering template") 181 | return nil, nil, err 182 | } 183 | overviewBytes, err = srv.minifier.Bytes("text/html", htmlBuf.Bytes()) 184 | if err != nil { 185 | srv.log.WithError(err).Error("error minifying html") 186 | return nil, nil, err 187 | } 188 | 189 | // Render HTML for builder profit 190 | htmlBuf = bytes.Buffer{} 191 | htmlData.Title = "MEV-Boost Builder Profitability" 192 | htmlData.View = "builder-profit" 193 | if err := srv.templateIndex.ExecuteTemplate(&htmlBuf, "base", htmlData); err != nil { 194 | srv.log.WithError(err).Error("error rendering template") 195 | return nil, nil, err 196 | } 197 | profitBytes, err = srv.minifier.Bytes("text/html", htmlBuf.Bytes()) 198 | if err != nil { 199 | srv.log.WithError(err).Error("error minifying html") 200 | return nil, nil, err 201 | } 202 | 203 | return overviewBytes, profitBytes, nil 204 | } 205 | 206 | func (srv *Webserver) _getDailyStats(t time.Time) (since, until, minDate time.Time, relays []*database.TopRelayEntry, builders []*database.TopBuilderEntry, builderProfits []*database.BuilderProfitEntry, err error) { 207 | now := time.Now().UTC() 208 | minDate = time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC).Add(-24 * time.Hour).UTC() 209 | if t.UTC().After(minDate.UTC()) { 210 | return now, now, minDate, nil, nil, nil, fmt.Errorf("date is too recent") //nolint:goerr113 211 | } 212 | 213 | since = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC) 214 | until = time.Date(t.Year(), t.Month(), t.Day(), 23, 59, 59, 0, time.UTC) 215 | relays, builders, builderProfits, err = srv.db.GetStatsForTimerange(since, until, "") 216 | return since, until, minDate, relays, builders, builderProfits, err 217 | } 218 | -------------------------------------------------------------------------------- /cmd/core/data-api-backfill.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "errors" 7 | "fmt" 8 | "net/http" 9 | "time" 10 | 11 | relaycommon "github.com/flashbots/mev-boost-relay/common" 12 | "github.com/flashbots/relayscan/common" 13 | "github.com/flashbots/relayscan/database" 14 | "github.com/flashbots/relayscan/vars" 15 | "github.com/sirupsen/logrus" 16 | "github.com/spf13/cobra" 17 | ) 18 | 19 | var ( 20 | cliRelay string 21 | minSlot int64 22 | initCursor uint64 23 | pageLimit = 100 // 100 is max on bloxroute 24 | ) 25 | 26 | func init() { 27 | backfillDataAPICmd.Flags().StringVar(&cliRelay, "relay", "", "specific relay only") 28 | backfillDataAPICmd.Flags().Uint64Var(&initCursor, "cursor", 0, "initial cursor") 29 | backfillDataAPICmd.Flags().Int64Var(&minSlot, "min-slot", 0, "minimum slot (if unset, backfill until the merge, negative number for that number of slots before latest)") 30 | } 31 | 32 | var backfillDataAPICmd = &cobra.Command{ 33 | Use: "data-api-backfill", 34 | Short: "Backfill all relays data API", 35 | Run: func(cmd *cobra.Command, args []string) { 36 | var err error 37 | var relays []common.RelayEntry 38 | 39 | if cliRelay != "" { 40 | var relayEntry common.RelayEntry 41 | switch cliRelay { 42 | case "fb": 43 | relayEntry, err = common.NewRelayEntry(vars.RelayURLs[0], false) 44 | case "us": 45 | relayEntry, err = common.NewRelayEntry(vars.RelayURLs[1], false) 46 | default: 47 | relayEntry, err = common.NewRelayEntry(cliRelay, false) 48 | } 49 | if err != nil { 50 | log.WithField("relay", cliRelay).WithError(err).Fatal("failed to decode relay") 51 | } 52 | relays = []common.RelayEntry{relayEntry} 53 | } else { 54 | relays, err = common.GetRelays() 55 | if err != nil { 56 | log.WithError(err).Fatal("failed to get relays") 57 | } 58 | } 59 | 60 | log.Infof("Relayscan %s", vars.Version) 61 | 62 | // Connect to Postgres 63 | db := database.MustConnectPostgres(log, vars.DefaultPostgresDSN) 64 | 65 | // Run backfill 66 | err = RunBackfill(db, relays, initCursor, minSlot) 67 | if err != nil { 68 | log.WithError(err).Fatal("backfill failed") 69 | } 70 | }, 71 | } 72 | 73 | // RunBackfill runs the data API backfill for all given relays 74 | func RunBackfill(db *database.DatabaseService, relays []common.RelayEntry, initCursor uint64, minSlot int64) error { 75 | startTime := time.Now().UTC() 76 | 77 | log.Infof("Using %d relays", len(relays)) 78 | for index, relay := range relays { 79 | log.Infof("- relay #%d: %s", index+1, relay.Hostname()) 80 | } 81 | 82 | // If needed, get latest slot (i.e. if min-slot is negative) 83 | if minSlot < 0 { 84 | log.Infof("Getting latest slot from beaconcha.in for offset %d", minSlot) 85 | latestSlotOnBeaconChain := common.MustGetLatestSlot() 86 | log.Infof("Latest slot from beaconcha.in: %d", latestSlotOnBeaconChain) 87 | minSlot = int64(latestSlotOnBeaconChain) + minSlot 88 | } 89 | 90 | if minSlot != 0 { 91 | log.Infof("Using min slot: %d", minSlot) 92 | } 93 | 94 | for _, relay := range relays { 95 | log.Infof("Starting backfilling for relay %s ...", relay.Hostname()) 96 | backfiller := newBackfiller(db, relay, initCursor, uint64(minSlot)) 97 | err := backfiller.backfillPayloadsDelivered() 98 | if err != nil { 99 | log.WithError(err).WithField("relay", relay).Error("backfill failed") 100 | } 101 | } 102 | 103 | timeNeeded := time.Since(startTime) 104 | log.WithField("timeNeeded", timeNeeded).Info("Backfill done!") 105 | return nil 106 | } 107 | 108 | type backfiller struct { 109 | relay common.RelayEntry 110 | db *database.DatabaseService 111 | cursorSlot uint64 112 | minSlot uint64 113 | } 114 | 115 | func newBackfiller(db *database.DatabaseService, relay common.RelayEntry, cursorSlot, minSlot uint64) *backfiller { 116 | return &backfiller{ 117 | relay: relay, 118 | db: db, 119 | cursorSlot: cursorSlot, 120 | minSlot: minSlot, 121 | } 122 | } 123 | 124 | func (bf *backfiller) backfillPayloadsDelivered() error { 125 | _log := log.WithField("relay", bf.relay.Hostname()) 126 | // _log.Info("backfilling payloads from relay data-api ...") 127 | 128 | // 1. get latest entry from DB 129 | latestEntry, err := bf.db.GetDataAPILatestPayloadDelivered(bf.relay.Hostname()) 130 | latestSlotInDB := uint64(0) 131 | if err != nil && !errors.Is(err, sql.ErrNoRows) { 132 | _log.WithError(err).Fatal("failed to get latest entry") 133 | return err 134 | } else { 135 | latestSlotInDB = latestEntry.Slot 136 | } 137 | _log.Infof("Latest payload in DB for slot: %d", latestSlotInDB) 138 | 139 | // 2. backfill until latest DB entry is reached 140 | baseURL := bf.relay.GetURI("/relay/v1/data/bidtraces/proposer_payload_delivered") 141 | cursorSlot := bf.cursorSlot 142 | slotsReceived := make(map[uint64]bool) 143 | builders := make(map[string]bool) 144 | 145 | for { 146 | payloadsNew := 0 147 | url := fmt.Sprintf("%s?limit=%d", baseURL, pageLimit) 148 | if cursorSlot > 0 { 149 | url = fmt.Sprintf("%s&cursor=%d", url, cursorSlot) 150 | } 151 | _log.WithField("url: ", url).Info("Fetching payloads...") 152 | var data []relaycommon.BidTraceV2JSON 153 | _, err = common.SendHTTPRequest(context.Background(), *http.DefaultClient, http.MethodGet, url, nil, &data) 154 | if err != nil { 155 | return err 156 | } 157 | 158 | _log.Infof("Response contains %d delivered payloads", len(data)) 159 | 160 | // build a list of entries for batch DB update 161 | entries := make([]*database.DataAPIPayloadDeliveredEntry, len(data)) 162 | slotFirst := uint64(0) 163 | slotLast := uint64(0) 164 | for index, payload := range data { 165 | _log.Debugf("saving entry for slot %d", payload.Slot) 166 | dbEntry := database.BidTraceV2JSONToPayloadDeliveredEntry(bf.relay.Hostname(), payload) 167 | entries[index] = &dbEntry 168 | 169 | // Set first and last slot 170 | if slotFirst == 0 || payload.Slot < slotFirst { 171 | slotFirst = payload.Slot 172 | } 173 | if slotLast == 0 || payload.Slot > slotLast { 174 | slotLast = payload.Slot 175 | } 176 | 177 | // Count number of slots with payloads 178 | if !slotsReceived[payload.Slot] { 179 | slotsReceived[payload.Slot] = true 180 | payloadsNew += 1 181 | } 182 | 183 | // Set cursor for next request 184 | if cursorSlot == 0 || cursorSlot > payload.Slot { 185 | cursorSlot = payload.Slot 186 | } 187 | 188 | // Remember the builder 189 | builders[payload.BuilderPubkey] = true 190 | } 191 | 192 | // Save entries 193 | newEntries, err := bf.db.SaveDataAPIPayloadDeliveredBatch(entries) 194 | if err != nil { 195 | _log.WithError(err).Fatal("failed to save entries") 196 | return err 197 | } 198 | 199 | _log.WithFields(logrus.Fields{ 200 | "newEntries": newEntries, 201 | "slotFirst": slotFirst, 202 | "slotLast": slotLast, 203 | }).Info("Batch of payloads saved to database") 204 | 205 | // Save builders 206 | for builderPubkey := range builders { 207 | err = bf.db.SaveBuilder(&database.BlockBuilderEntry{BuilderPubkey: builderPubkey}) 208 | if err != nil { 209 | _log.WithError(err).Error("failed to save builder") 210 | } 211 | } 212 | 213 | // Stop as soon as no new payloads are received 214 | if payloadsNew == 0 { 215 | _log.Infof("No new payloads, all done. Earliest payload for slot: %d", cursorSlot) 216 | return nil 217 | } 218 | 219 | // Stop if at the latest slot in DB 220 | if cursorSlot < latestSlotInDB { 221 | _log.Infof("Payloads backfilled until latest slot in DB: %d", latestSlotInDB) 222 | return nil 223 | } 224 | 225 | // Stop if at min slot 226 | if cursorSlot < bf.minSlot { 227 | _log.Infof("Payloads backfilled until min slot: %d", bf.minSlot) 228 | return nil 229 | } 230 | } 231 | } 232 | -------------------------------------------------------------------------------- /services/bidcollect/bid-processor.go: -------------------------------------------------------------------------------- 1 | package bidcollect 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "sync" 10 | "time" 11 | 12 | "github.com/flashbots/relayscan/common" 13 | "github.com/flashbots/relayscan/services/bidcollect/types" 14 | "github.com/redis/go-redis/v9" 15 | "github.com/sirupsen/logrus" 16 | ) 17 | 18 | // Goals: 19 | // 1. Dedup bids 20 | // 2. Save bids to CSV 21 | // - One CSV for all bids 22 | // - One CSV for top bids only 23 | 24 | type BidProcessorOpts struct { 25 | Log *logrus.Entry 26 | UID string 27 | OutDir string 28 | OutputTSV bool 29 | RedisAddr string 30 | UseRedis bool 31 | } 32 | 33 | type OutFiles struct { 34 | FAll *os.File 35 | FTop *os.File 36 | } 37 | 38 | type BidProcessor struct { 39 | opts *BidProcessorOpts 40 | log *logrus.Entry 41 | 42 | outFiles map[int64]*OutFiles // map[slot][bidUniqueKey]Bid 43 | outFilesLock sync.RWMutex 44 | 45 | bidCache map[uint64]map[string]*types.CommonBid // map[slot][bidUniqueKey]Bid 46 | topBidCache map[uint64]*types.CommonBid // map[slot]Bid 47 | bidCacheLock sync.RWMutex 48 | 49 | csvSeparator string 50 | csvFileEnding string 51 | 52 | redisClient *redis.Client 53 | } 54 | 55 | func NewBidProcessor(opts *BidProcessorOpts) (*BidProcessor, error) { 56 | c := &BidProcessor{ 57 | log: opts.Log, 58 | opts: opts, 59 | outFiles: make(map[int64]*OutFiles), 60 | bidCache: make(map[uint64]map[string]*types.CommonBid), 61 | topBidCache: make(map[uint64]*types.CommonBid), 62 | } 63 | 64 | if opts.OutputTSV { 65 | c.csvSeparator = "\t" 66 | c.csvFileEnding = "tsv" 67 | } else { 68 | c.csvSeparator = "," 69 | c.csvFileEnding = "csv" 70 | } 71 | 72 | if opts.UseRedis && opts.RedisAddr != "" { 73 | c.redisClient = redis.NewClient(&redis.Options{ 74 | Addr: opts.RedisAddr, 75 | Password: "", // no password set 76 | DB: 0, // use default DB 77 | }) 78 | 79 | // Make sure we can connect to redis to connect to redis 80 | if _, err := c.redisClient.Ping(context.Background()).Result(); err != nil { 81 | return nil, err 82 | } 83 | } 84 | return c, nil 85 | } 86 | 87 | func (c *BidProcessor) Start() { 88 | for { 89 | time.Sleep(30 * time.Second) 90 | c.housekeeping() 91 | } 92 | } 93 | 94 | func (c *BidProcessor) processBids(bids []*types.CommonBid) { 95 | c.bidCacheLock.Lock() 96 | defer c.bidCacheLock.Unlock() 97 | 98 | var isTopBid, isNewBid bool 99 | for _, bid := range bids { 100 | isNewBid, isTopBid = false, false 101 | if _, ok := c.bidCache[bid.Slot]; !ok { 102 | c.bidCache[bid.Slot] = make(map[string]*types.CommonBid) 103 | } 104 | 105 | // Check if bid is new top bid 106 | if topBid, ok := c.topBidCache[bid.Slot]; !ok { 107 | c.topBidCache[bid.Slot] = bid // first one for the slot 108 | isTopBid = true 109 | } else { 110 | // if current bid has higher value, use it as new top bid 111 | if bid.ValueAsBigInt().Cmp(topBid.ValueAsBigInt()) == 1 { 112 | c.topBidCache[bid.Slot] = bid 113 | isTopBid = true 114 | } 115 | } 116 | 117 | // process regular bids only once per unique key (slot+blockhash+parenthash+builderpubkey+value) 118 | if _, ok := c.bidCache[bid.Slot][bid.UniqueKey()]; !ok { 119 | // yet unknown bid, save it 120 | c.bidCache[bid.Slot][bid.UniqueKey()] = bid 121 | isNewBid = true 122 | } 123 | 124 | // Send to Redis 125 | if c.redisClient != nil { 126 | err := c.redisClient.Publish(context.Background(), types.RedisChannel, bid.ToCSVLine(",")).Err() 127 | if err != nil { 128 | c.log.WithError(err).Error("failed to publish bid to redis") 129 | } 130 | } 131 | 132 | // Write to CSV 133 | c.writeBidToFile(bid, isNewBid, isTopBid) 134 | } 135 | } 136 | 137 | func (c *BidProcessor) writeBidToFile(bid *types.CommonBid, isNewBid, isTopBid bool) { 138 | fAll, fTop, err := c.getFiles(bid) 139 | if err != nil { 140 | c.log.WithError(err).Error("get get output file") 141 | return 142 | } 143 | if isNewBid { 144 | _, err = fmt.Fprint(fAll, bid.ToCSVLine(c.csvSeparator)+"\n") 145 | if err != nil { 146 | c.log.WithError(err).Error("couldn't write bid to file") 147 | return 148 | } 149 | } 150 | if isTopBid { 151 | _, err = fmt.Fprint(fTop, bid.ToCSVLine(c.csvSeparator)+"\n") 152 | if err != nil { 153 | c.log.WithError(err).Error("couldn't write bid to file") 154 | return 155 | } 156 | } 157 | } 158 | 159 | func (c *BidProcessor) getFiles(bid *types.CommonBid) (fAll, fTop *os.File, err error) { 160 | // hourlybucket 161 | sec := int64(types.BucketMinutes * 60) 162 | bucketTS := bid.ReceivedAtMs / 1000 / sec * sec // timestamp down-round to start of bucket 163 | t := time.Unix(bucketTS, 0).UTC() 164 | 165 | // files may already be opened 166 | c.outFilesLock.RLock() 167 | outFiles, outFilesOk := c.outFiles[bucketTS] 168 | c.outFilesLock.RUnlock() 169 | 170 | if outFilesOk { 171 | return outFiles.FAll, outFiles.FTop, nil 172 | } 173 | 174 | // Create output directory 175 | dir := filepath.Join(c.opts.OutDir, t.Format(time.DateOnly)) 176 | err = os.MkdirAll(dir, os.ModePerm) 177 | if err != nil { 178 | return nil, nil, err 179 | } 180 | 181 | // Open ALL BIDS CSV 182 | fnAll := filepath.Join(dir, c.getFilename("all", bucketTS)) 183 | fAll, err = os.OpenFile(fnAll, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o600) 184 | if err != nil { 185 | return nil, nil, err 186 | } 187 | fi, err := fAll.Stat() 188 | if err != nil { 189 | c.log.WithError(err).Fatal("failed stat on output file") 190 | } 191 | if fi.Size() == 0 { 192 | _, err = fmt.Fprint(fAll, strings.Join(types.CommonBidCSVFields, c.csvSeparator)+"\n") 193 | if err != nil { 194 | c.log.WithError(err).Fatal("failed to write header to output file") 195 | } 196 | } 197 | 198 | // Open TOP BIDS CSV 199 | fnTop := filepath.Join(dir, c.getFilename("top", bucketTS)) 200 | fTop, err = os.OpenFile(fnTop, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o600) 201 | if err != nil { 202 | return nil, nil, err 203 | } 204 | fi, err = fTop.Stat() 205 | if err != nil { 206 | c.log.WithError(err).Fatal("failed stat on output file") 207 | } 208 | if fi.Size() == 0 { 209 | _, err = fmt.Fprint(fTop, strings.Join(types.CommonBidCSVFields, c.csvSeparator)+"\n") 210 | if err != nil { 211 | c.log.WithError(err).Fatal("failed to write header to output file") 212 | } 213 | } 214 | 215 | outFiles = &OutFiles{ 216 | FAll: fAll, 217 | FTop: fTop, 218 | } 219 | c.outFilesLock.Lock() 220 | c.outFiles[bucketTS] = outFiles 221 | c.outFilesLock.Unlock() 222 | 223 | c.log.Infof("[bid-processor] created output file: %s", fnAll) 224 | c.log.Infof("[bid-processor] created output file: %s", fnTop) 225 | return fAll, fTop, nil 226 | } 227 | 228 | func (c *BidProcessor) getFilename(prefix string, timestamp int64) string { 229 | t := time.Unix(timestamp, 0).UTC() 230 | if prefix != "" { 231 | prefix += "_" 232 | } 233 | return fmt.Sprintf("%s%s_%s.%s", prefix, t.Format("2006-01-02_15-04"), c.opts.UID, c.csvFileEnding) 234 | } 235 | 236 | func (c *BidProcessor) housekeeping() { 237 | currentSlot := common.TimeToSlot(time.Now().UTC()) 238 | maxSlotInCache := currentSlot - 3 239 | 240 | nDeleted := 0 241 | nBids := 0 242 | 243 | c.bidCacheLock.Lock() 244 | defer c.bidCacheLock.Unlock() 245 | for slot := range c.bidCache { 246 | if slot < maxSlotInCache { 247 | delete(c.bidCache, slot) 248 | nDeleted += 1 249 | } else { 250 | nBids += len(c.bidCache[slot]) 251 | } 252 | } 253 | 254 | // Close and remove old files 255 | now := time.Now().UTC().Unix() 256 | filesBefore := len(c.outFiles) 257 | c.outFilesLock.Lock() 258 | for timestamp, outFiles := range c.outFiles { 259 | usageSec := types.BucketMinutes * 60 * 2 260 | if now-timestamp > int64(usageSec) { // remove all handles from 2x usage seconds ago 261 | c.log.Info("closing output files", timestamp) 262 | delete(c.outFiles, timestamp) 263 | _ = outFiles.FAll.Close() 264 | _ = outFiles.FTop.Close() 265 | } 266 | } 267 | nFiles := len(c.outFiles) 268 | filesClosed := len(c.outFiles) - filesBefore 269 | c.outFilesLock.Unlock() 270 | 271 | c.log.Infof("[bid-processor] cleanupBids - deleted slots: %d / total slots: %d / total bids: %d / files closed: %d, current: %d / memUsedMB: %d", nDeleted, len(c.bidCache), nBids, filesClosed, nFiles, common.GetMemMB()) 272 | } 273 | -------------------------------------------------------------------------------- /services/bidcollect/getheader-poller.go: -------------------------------------------------------------------------------- 1 | package bidcollect 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "strings" 8 | "time" 9 | 10 | "github.com/flashbots/go-boost-utils/types" 11 | "github.com/flashbots/mev-boost-relay/beaconclient" 12 | relaycommon "github.com/flashbots/mev-boost-relay/common" 13 | "github.com/flashbots/relayscan/common" 14 | bidcollecttypes "github.com/flashbots/relayscan/services/bidcollect/types" 15 | "github.com/sirupsen/logrus" 16 | ) 17 | 18 | type GetHeaderPollerBidsMsg struct { 19 | Slot uint64 20 | Bid types.GetHeaderResponse 21 | Relay common.RelayEntry 22 | ReceivedAt time.Time 23 | } 24 | 25 | type GetHeaderPollerOpts struct { 26 | Log *logrus.Entry 27 | BidC chan GetHeaderPollerBidsMsg 28 | BeaconURI string 29 | Relays []common.RelayEntry 30 | } 31 | 32 | type GetHeaderPoller struct { 33 | log *logrus.Entry 34 | bidC chan GetHeaderPollerBidsMsg 35 | relays []common.RelayEntry 36 | bn *beaconclient.ProdBeaconInstance 37 | } 38 | 39 | func NewGetHeaderPoller(opts *GetHeaderPollerOpts) *GetHeaderPoller { 40 | return &GetHeaderPoller{ 41 | log: opts.Log, 42 | bidC: opts.BidC, 43 | relays: opts.Relays, 44 | bn: beaconclient.NewProdBeaconInstance(opts.Log, opts.BeaconURI), 45 | } 46 | } 47 | 48 | func (poller *GetHeaderPoller) Start() { 49 | poller.log.WithField("relays", common.RelayEntriesToHostnameStrings(poller.relays)).Info("Starting GetHeaderPoller ...") 50 | 51 | // Check beacon-node sync status, process current slot and start slot updates 52 | syncStatus, err := poller.bn.SyncStatus() 53 | if err != nil { 54 | poller.log.WithError(err).Fatal("couldn't get BN sync status") 55 | } else if syncStatus.IsSyncing { 56 | poller.log.Fatal("beacon node is syncing") 57 | } 58 | 59 | // var headSlot uint64 60 | var headSlot, nextSlot, currentEpoch, lastDutyUpdateEpoch uint64 61 | var duties map[uint64]string 62 | 63 | // subscribe to head events (because then, the BN will know the block + proposer details for the next slot) 64 | c := make(chan beaconclient.HeadEventData) 65 | go poller.bn.SubscribeToHeadEvents(c) 66 | 67 | // then run polling loop 68 | for { 69 | headEvent := <-c 70 | if headEvent.Slot <= headSlot { 71 | continue 72 | } 73 | 74 | headSlot = headEvent.Slot 75 | nextSlot = headSlot + 1 76 | tNextSlot := common.SlotToTime(nextSlot) 77 | untilNextSlot := tNextSlot.Sub(time.Now().UTC()) 78 | 79 | currentEpoch = headSlot / relaycommon.SlotsPerEpoch 80 | poller.log.Infof("[getHeader poller] headSlot slot: %d / next slot: %d (%s), waitTime: %s", headSlot, nextSlot, tNextSlot.String(), untilNextSlot.String()) 81 | 82 | // On every new epoch, get proposer duties for current and next epoch (to avoid boundary problems) 83 | if len(duties) == 0 || currentEpoch > lastDutyUpdateEpoch { 84 | dutiesResp, err := poller.bn.GetProposerDuties(currentEpoch) 85 | if err != nil { 86 | poller.log.WithError(err).Error("couldn't get proposer duties") 87 | continue 88 | } 89 | 90 | duties = make(map[uint64]string) 91 | for _, d := range dutiesResp.Data { 92 | duties[d.Slot] = d.Pubkey 93 | } 94 | 95 | dutiesResp, err = poller.bn.GetProposerDuties(currentEpoch + 1) 96 | if err != nil { 97 | poller.log.WithError(err).Error("failed get proposer duties") 98 | } else { 99 | for _, d := range dutiesResp.Data { 100 | duties[d.Slot] = d.Pubkey 101 | } 102 | } 103 | poller.log.Debugf("[getHeader poller] duties updated: %d entries", len(duties)) 104 | lastDutyUpdateEpoch = currentEpoch 105 | } 106 | 107 | // Now get the latest block, for the execution payload 108 | block, err := poller.bn.GetBlock("head") 109 | if err != nil { 110 | poller.log.WithError(err).Error("failed get latest block from BN") 111 | continue 112 | } 113 | 114 | if block.Data.Message.Slot != headSlot { 115 | poller.log.WithField("slot", headSlot).WithField("bnSlot", block.Data.Message.Slot).Error("latest block slot is not current slot") 116 | continue 117 | } 118 | 119 | nextProposerPubkey := duties[nextSlot] 120 | poller.log.Debugf("[getHeader poller] next slot: %d / block: %s / parent: %s / proposerPubkey: %s", nextSlot, block.Data.Message.Body.ExecutionPayload.BlockHash.String(), block.Data.Message.Body.ExecutionPayload.ParentHash, nextProposerPubkey) 121 | 122 | if nextProposerPubkey == "" { 123 | poller.log.WithField("duties", duties).Error("no proposerPubkey for next slot") 124 | } else { 125 | // go poller.pollRelaysForBids(0*time.Second, nextSlot, block.Data.Message.Body.ExecutionPayload.BlockHash.String(), duties[nextSlot]) 126 | go poller.pollRelaysForBids(1000*time.Millisecond, nextSlot, block.Data.Message.Body.ExecutionPayload.BlockHash.String(), duties[nextSlot]) 127 | } 128 | } 129 | } 130 | 131 | // pollRelaysForBids will poll data api for given slot with t seconds offset 132 | func (poller *GetHeaderPoller) pollRelaysForBids(tOffset time.Duration, slot uint64, parentHash, proposerPubkey string) { 133 | tSlotStart := common.SlotToTime(slot) 134 | tStart := tSlotStart.Add(tOffset) 135 | waitTime := tStart.Sub(time.Now().UTC()) 136 | 137 | // poller.Log.Debugf("[getHeader poller] - prepare polling for slot %d t %d (tSlotStart: %s, tStart: %s, waitTime: %s)", slot, t, tSlotStart.String(), tStart.String(), waitTime.String()) 138 | if waitTime < 0 { 139 | poller.log.Debugf("[getHeader poller] waitTime is negative: %s", waitTime.String()) 140 | return 141 | } 142 | 143 | // Wait until expected time 144 | time.Sleep(waitTime) 145 | 146 | // Poll for bids now 147 | untilSlot := tSlotStart.Sub(time.Now().UTC()) 148 | poller.log.Debugf("[getHeader poller] polling for slot %d at t=%s (tNow=%s)", slot, tOffset.String(), (untilSlot * -1).String()) 149 | 150 | for _, relay := range poller.relays { 151 | go poller._pollRelayForBids(relay, tOffset, slot, parentHash, proposerPubkey) 152 | } 153 | } 154 | 155 | func (poller *GetHeaderPoller) _pollRelayForBids(relay common.RelayEntry, t time.Duration, slot uint64, parentHash, proposerPubkey string) { 156 | // log := poller.Log.WithField("relay", relay.Hostname()).WithField("slot", slot) 157 | log := poller.log.WithFields(logrus.Fields{ 158 | "relay": relay.Hostname(), 159 | "slot": slot, 160 | "t": t.String(), 161 | }) 162 | log.Debugf("[getHeader poller] polling relay %s for slot %d", relay.Hostname(), slot) 163 | 164 | path := fmt.Sprintf("/eth/v1/builder/header/%d/%s/%s", slot, parentHash, proposerPubkey) 165 | url := relay.GetURI(path) 166 | // log.Debugf("Querying %s", url) 167 | 168 | var bid types.GetHeaderResponse 169 | timeRequestStart := time.Now().UTC() 170 | code, err := common.SendHTTPRequest(context.Background(), *http.DefaultClient, http.MethodGet, url, nil, &bid) 171 | timeRequestEnd := time.Now().UTC() 172 | if err != nil { 173 | msg := err.Error() 174 | if strings.Contains(msg, "no builder bid") { 175 | return 176 | } else if strings.Contains(msg, "Too many getHeader requests! Use relay-analytics.ultrasound.money or the Websocket API") { 177 | return 178 | } else if code == 429 { 179 | log.Warn("[getHeader poller] 429 received") 180 | return 181 | } 182 | log.WithFields(logrus.Fields{ 183 | "code": code, 184 | "url": url, 185 | }).WithError(err).Error("[getHeader poller] error on getHeader request") 186 | return 187 | } 188 | if code != 200 { 189 | log.WithField("code", code).Debug("[getHeader poller] no bid received") 190 | return 191 | } 192 | log.WithField("durationMs", timeRequestEnd.Sub(timeRequestStart).Milliseconds()).Infof("[getHeader poller] bid received! slot: %d - value: %s - block_hash: %s -", slot, bid.Data.Message.Value.String(), bid.Data.Message.Header.BlockHash.String()) 193 | 194 | // send data to channel 195 | poller.bidC <- GetHeaderPollerBidsMsg{Slot: slot, Bid: bid, Relay: relay, ReceivedAt: time.Now().UTC()} 196 | } 197 | 198 | func GetHeaderToCommonBid(bid GetHeaderPollerBidsMsg) *bidcollecttypes.CommonBid { 199 | return &bidcollecttypes.CommonBid{ 200 | SourceType: bidcollecttypes.SourceTypeGetHeader, 201 | ReceivedAtMs: bid.ReceivedAt.UnixMilli(), 202 | Relay: bid.Relay.Hostname(), 203 | Slot: bid.Slot, 204 | 205 | BlockNumber: bid.Bid.Data.Message.Header.BlockNumber, 206 | BlockHash: strings.ToLower(bid.Bid.Data.Message.Header.BlockHash.String()), 207 | ParentHash: strings.ToLower(bid.Bid.Data.Message.Header.ParentHash.String()), 208 | Value: bid.Bid.Data.Message.Value.String(), 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /database/migrations/001_init_database.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "github.com/flashbots/relayscan/database/vars" 5 | migrate "github.com/rubenv/sql-migrate" 6 | ) 7 | 8 | var initialSchema = ` 9 | CREATE TABLE IF NOT EXISTS ` + vars.TableSignedBuilderBid + ` ( 10 | id bigint GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, 11 | inserted_at timestamp NOT NULL default current_timestamp, 12 | 13 | relay text NOT NULL, 14 | requested_at timestamp NOT NULL, 15 | received_at timestamp NOT NULL, 16 | duration_ms bigint NOT NULL, 17 | 18 | slot bigint NOT NULL, 19 | parent_hash varchar(66) NOT NULL, 20 | proposer_pubkey varchar(98) NOT NULL, 21 | 22 | pubkey varchar(98) NOT NULL, 23 | signature text NOT NULL, 24 | 25 | value NUMERIC(48, 0) NOT NULL, 26 | fee_recipient varchar(42) NOT NULL, 27 | block_hash varchar(66) NOT NULL, 28 | block_number bigint NOT NULL, 29 | gas_limit bigint NOT NULL, 30 | gas_used bigint NOT NULL, 31 | extra_data text NOT NULL, 32 | timestamp bigint NOT NULL, 33 | prev_randao text NOT NULL, 34 | 35 | epoch bigint NOT NULL 36 | ); 37 | 38 | CREATE UNIQUE INDEX IF NOT EXISTS ` + vars.TableSignedBuilderBid + `_u_relay_slot_n_hashes_idx ON ` + vars.TableSignedBuilderBid + `("relay", "slot", "parent_hash", "block_hash"); 39 | CREATE INDEX IF NOT EXISTS ` + vars.TableSignedBuilderBid + `_insertedat_idx ON ` + vars.TableSignedBuilderBid + `("inserted_at"); 40 | CREATE INDEX IF NOT EXISTS ` + vars.TableSignedBuilderBid + `_slot_idx ON ` + vars.TableSignedBuilderBid + `("slot"); 41 | CREATE INDEX IF NOT EXISTS ` + vars.TableSignedBuilderBid + `_block_number_idx ON ` + vars.TableSignedBuilderBid + `("block_number"); 42 | CREATE INDEX IF NOT EXISTS ` + vars.TableSignedBuilderBid + `_value_idx ON ` + vars.TableSignedBuilderBid + `("value"); 43 | 44 | 45 | CREATE TABLE IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + ` ( 46 | id bigint GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, 47 | inserted_at timestamp NOT NULL default current_timestamp, 48 | relay text NOT NULL, 49 | 50 | epoch bigint NOT NULL, 51 | slot bigint NOT NULL, 52 | 53 | parent_hash varchar(66) NOT NULL, 54 | block_hash varchar(66) NOT NULL, 55 | builder_pubkey varchar(98) NOT NULL, 56 | proposer_pubkey varchar(98) NOT NULL, 57 | proposer_fee_recipient varchar(42) NOT NULL, 58 | gas_limit bigint NOT NULL, 59 | gas_used bigint NOT NULL, 60 | value_claimed_wei NUMERIC(48, 0) NOT NULL, 61 | value_claimed_eth NUMERIC(16, 8) NOT NULL, 62 | num_tx int, 63 | block_number bigint, 64 | extra_data text NOT NULL, 65 | 66 | slot_missed boolean, -- null means not yet checked 67 | value_check_ok boolean, -- null means not yet checked 68 | value_check_method text, -- how value was checked (i.e. blockBalanceDiff) 69 | value_delivered_wei NUMERIC(48, 0), -- actually delivered value 70 | value_delivered_eth NUMERIC(16, 8), -- actually delivered value 71 | value_delivered_diff_wei NUMERIC(48, 0), -- value_delivered - value_claimed 72 | value_delivered_diff_eth NUMERIC(16, 8), -- value_delivered - value_claimed 73 | block_coinbase_addr varchar(42), -- block coinbase address 74 | block_coinbase_is_proposer boolean, -- true if coinbase == proposerFeeRecipient 75 | coinbase_diff_wei NUMERIC(48, 0), -- builder value difference 76 | coinbase_diff_eth NUMERIC(16, 8), -- builder value difference 77 | found_onchain boolean, -- whether the payload blockhash can be found on chain (at all) 78 | notes text 79 | ); 80 | 81 | CREATE UNIQUE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_u_relay_slot_blockhash_idx ON ` + vars.TableDataAPIPayloadDelivered + `("relay", "slot", "parent_hash", "block_hash"); 82 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_insertedat_idx ON ` + vars.TableDataAPIPayloadDelivered + `("inserted_at"); 83 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_slot_idx ON ` + vars.TableDataAPIPayloadDelivered + `("slot"); 84 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_builder_pubkey_idx ON ` + vars.TableDataAPIPayloadDelivered + `("builder_pubkey"); 85 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_block_number_idx ON ` + vars.TableDataAPIPayloadDelivered + `("block_number"); 86 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_value_wei_idx ON ` + vars.TableDataAPIPayloadDelivered + `("value_claimed_wei"); 87 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_valuecheck_ok_idx ON ` + vars.TableDataAPIPayloadDelivered + `("value_check_ok"); 88 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_slotmissed_idx ON ` + vars.TableDataAPIPayloadDelivered + `("slot_missed"); 89 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIPayloadDelivered + `_cb_diff_eth_idx ON ` + vars.TableDataAPIPayloadDelivered + `("coinbase_diff_eth"); 90 | 91 | 92 | CREATE TABLE IF NOT EXISTS ` + vars.TableDataAPIBuilderBid + ` ( 93 | id bigint GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, 94 | inserted_at timestamp NOT NULL default current_timestamp, 95 | relay text NOT NULL, 96 | 97 | epoch bigint NOT NULL, 98 | slot bigint NOT NULL, 99 | 100 | parent_hash varchar(66) NOT NULL, 101 | block_hash varchar(66) NOT NULL, 102 | builder_pubkey varchar(98) NOT NULL, 103 | proposer_pubkey varchar(98) NOT NULL, 104 | proposer_fee_recipient varchar(42) NOT NULL, 105 | gas_limit bigint NOT NULL, 106 | gas_used bigint NOT NULL, 107 | value NUMERIC(48, 0) NOT NULL, 108 | num_tx int, 109 | block_number bigint, 110 | timestamp timestamp NOT NULL 111 | ); 112 | 113 | CREATE UNIQUE INDEX IF NOT EXISTS ` + vars.TableDataAPIBuilderBid + `_unique_idx ON ` + vars.TableDataAPIBuilderBid + `("relay", "slot", "builder_pubkey", "parent_hash", "block_hash"); 114 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIBuilderBid + `_insertedat_idx ON ` + vars.TableDataAPIBuilderBid + `("inserted_at"); 115 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIBuilderBid + `_slot_idx ON ` + vars.TableDataAPIBuilderBid + `("slot"); 116 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIBuilderBid + `_builder_pubkey_idx ON ` + vars.TableDataAPIBuilderBid + `("builder_pubkey"); 117 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIBuilderBid + `_block_number_idx ON ` + vars.TableDataAPIBuilderBid + `("block_number"); 118 | CREATE INDEX IF NOT EXISTS ` + vars.TableDataAPIBuilderBid + `_value_idx ON ` + vars.TableDataAPIBuilderBid + `("value"); 119 | 120 | 121 | CREATE TABLE IF NOT EXISTS ` + vars.TableBlockBuilder + ` ( 122 | id bigint GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, 123 | inserted_at timestamp NOT NULL default current_timestamp, 124 | 125 | builder_pubkey varchar(98) NOT NULL, 126 | description text NOT NULL, 127 | 128 | UNIQUE (builder_pubkey) 129 | ); 130 | 131 | CREATE TABLE IF NOT EXISTS ` + vars.TableBlockBuilderInclusionStats + ` ( 132 | id bigint GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, 133 | inserted_at timestamp NOT NULL default current_timestamp, 134 | 135 | type text NOT NULL, -- "extra_data" or "builder_pubkey" 136 | hours int NOT NULL, -- the amount of hours aggregated over (i.e. 24 for daily) 137 | 138 | time_start timestamp NOT NULL, 139 | time_end timestamp NOT NULL, 140 | builder_name text NOT NULL, 141 | 142 | extra_data text NOT NULL, 143 | builder_pubkeys text NOT NULL, 144 | blocks_included int NOT NULL, 145 | 146 | UNIQUE (type, hours, time_start, time_end, builder_name) 147 | ); 148 | 149 | CREATE INDEX IF NOT EXISTS ` + vars.TableBlockBuilderInclusionStats + `_type_hours_idx ON ` + vars.TableBlockBuilderInclusionStats + `("type", "hours"); 150 | CREATE INDEX IF NOT EXISTS ` + vars.TableBlockBuilderInclusionStats + `_time_start_idx ON ` + vars.TableBlockBuilderInclusionStats + `("time_start"); 151 | CREATE INDEX IF NOT EXISTS ` + vars.TableBlockBuilderInclusionStats + `_time_end_idx ON ` + vars.TableBlockBuilderInclusionStats + `("time_end"); 152 | CREATE INDEX IF NOT EXISTS ` + vars.TableBlockBuilderInclusionStats + `_builder_name_idx ON ` + vars.TableBlockBuilderInclusionStats + `("builder_name"); 153 | CREATE INDEX IF NOT EXISTS ` + vars.TableBlockBuilderInclusionStats + `_extra_data_idx ON ` + vars.TableBlockBuilderInclusionStats + `("extra_data"); 154 | ` 155 | 156 | var Migration001InitDatabase = &migrate.Migration{ 157 | Id: "001-init-database", 158 | Up: []string{initialSchema}, 159 | 160 | DisableTransactionUp: false, 161 | DisableTransactionDown: true, 162 | } 163 | -------------------------------------------------------------------------------- /services/website/templates/daily-stats.html: -------------------------------------------------------------------------------- 1 | {{ define "content" }} 2 | 3 |
4 |
5 |

MEV-Boost Stats for {{ .Day }}

6 |

{{ .TimeSince }} {{ .TimeUntil }} (UTC)

7 |

8 | prev day | 9 | {{ if ne .DayNext "" }}next day {{ else }}next day {{ end}} 10 |

11 |
12 | 13 |
14 |
15 | 16 |
17 |
18 |
19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | {{ range .TopRelays }} 30 | 31 | 32 | 33 | 34 | 35 | {{ end }} 36 | 37 |
RelayPayloadsPercent
{{ .Relay }}{{ .NumPayloads | prettyInt }}{{ .Percent }} %
38 |
39 |
40 | 41 |
42 |
43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | {{ range .TopBuildersBySummary }} 53 | 54 | 55 | 56 | 57 | 58 | 59 | {{ if gt (len .Children) 1 }} 60 | {{ range .Children }} 61 | 62 | 63 | 64 | 65 | 66 | 67 | {{ end }} 68 | {{ end }} 69 | {{ end }} 70 | 71 |
Builder (extra_data)BlocksPercent
{{ if .Info.ExtraData }}{{ .Info.ExtraData }}{{ else }} {{ end }}{{ .Info.NumBlocks | prettyInt }}{{ .Info.Percent }} %{{ if gt (len .Children) 1 }}{{ end }}
{{ .ExtraData }}{{ .NumBlocks | prettyInt }}{{ .Percent }} %
72 |
73 |
74 | 75 |
76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | {{ range .BuilderProfits }} 89 | 90 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | {{ end }} 114 | 115 |
Builder extra_dataBlocksBlocks with profitBlocks with subsidyOverall profit (ETH)Subsidies (ETH)
91 | {{ if .ExtraData }}{{ .ExtraData }}{{ else }} {{ end }} 92 | {{ if ne (len .Aliases) 0 }} 93 | 94 | 95 | 104 | 105 | {{ end }} 106 | {{ .NumBlocks | prettyInt }}{{ .NumBlocksProfit | prettyInt }}{{ .NumBlocksSubsidised | prettyInt }}{{ .ProfitTotal }}{{ .SubsidiesTotal }}
116 |
117 |
118 |

⚠️ Disclaimer: Relayscan uses block.coinbase ETH balance difference to measure builder's profits which could introduce inaccuracies when builders:

119 | 120 |
    121 |
  • pay validator using another address
  • 122 |
  • pay for orderflow in other channels (e.g. offchain)
  • 123 |
  • have searching profits but use builder wallet funds to add bid value without depositing from searcher addresses
  • 124 |
125 |
126 |
127 | 128 |
129 | 130 | 131 | 132 | 164 | 165 | 166 | 167 | 168 | {{ end }} --------------------------------------------------------------------------------