├── .gitignore ├── .github └── assets │ └── example.png ├── fun ├── params.go ├── spec.go ├── types.go ├── db.go ├── http.go ├── eracli │ └── main.go ├── era │ ├── store.go │ └── era.go ├── tile_handler.go ├── index.html ├── tiles.go └── perf.go ├── main.go ├── go.mod ├── README.md ├── LICENSE ├── cmd ├── log.go ├── server.go ├── tiles.go └── perf.go └── go.sum /.gitignore: -------------------------------------------------------------------------------- 1 | /era 2 | *.era 3 | /out 4 | 5 | -------------------------------------------------------------------------------- /.github/assets/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/protolambda/consensus-actor/HEAD/.github/assets/example.png -------------------------------------------------------------------------------- /fun/params.go: -------------------------------------------------------------------------------- 1 | package fun 2 | 3 | const ( 4 | maxZoom = 9 5 | // maxArtificialZoom are zoom levels past maxZoom that just scale contents, rather than providing more detail. 6 | // diff with maxZoom may not be larger than log2(tileSize) 7 | // log2(128) = 7 8 | maxArtificialZoom = 13 9 | tileSize = 128 10 | tileSizeSquared = tileSize * tileSize 11 | ) 12 | -------------------------------------------------------------------------------- /fun/spec.go: -------------------------------------------------------------------------------- 1 | package fun 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/protolambda/zrnt/eth2/beacon/common" 9 | ) 10 | 11 | func loadSpec(specFilePath string) (*common.Spec, error) { 12 | data, err := os.ReadFile(specFilePath) 13 | if err != nil { 14 | return nil, fmt.Errorf("failed to read spec file: %w", err) 15 | } 16 | var x common.Spec 17 | if err := json.Unmarshal(data, &x); err != nil { 18 | return nil, fmt.Errorf("failed to unmarshal json spec: %w", err) 19 | } 20 | return &x, nil 21 | } 22 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/ethereum/go-ethereum/log" 8 | "github.com/urfave/cli/v2" 9 | 10 | "github.com/protolambda/consensus-actor/cmd" 11 | ) 12 | 13 | func main() { 14 | log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stdout, log.TerminalFormat(true)))) 15 | 16 | app := cli.NewApp() 17 | app.Version = "0.0.4" 18 | app.Name = "consensus-actor" 19 | app.Usage = "Consensus actor analysis tool by @protolambda" 20 | app.Description = "Build and serve a maps-like view of the consensus actor data of ethereum." 21 | app.Commands = []*cli.Command{ 22 | cmd.PerfCmd, 23 | cmd.ServerCmd, 24 | cmd.TilesCmd, 25 | } 26 | err := app.Run(os.Args) 27 | if err != nil { 28 | _, _ = fmt.Fprintf(os.Stderr, "error: %v", err) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /fun/types.go: -------------------------------------------------------------------------------- 1 | package fun 2 | 3 | import ( 4 | "github.com/protolambda/zrnt/eth2/beacon/common" 5 | "github.com/protolambda/zrnt/eth2/beacon/phase0" 6 | ) 7 | 8 | type BoundedIndices []common.BoundedIndex 9 | 10 | type RandaoLookup func(epoch common.Epoch) ([32]byte, error) 11 | 12 | type BlockRootLookup func(slot common.Slot) (common.Root, error) 13 | 14 | type AttestationsLookup func(slot common.Slot) (phase0.Attestations, error) 15 | 16 | type BlockLookup func(slot uint64, dest common.SSZObj) error 17 | 18 | type StateLookup func(slot uint64, dest common.SSZObj) error 19 | 20 | type SlotAttestations struct { 21 | Slot common.Slot 22 | Attestations phase0.Attestations 23 | } 24 | 25 | func loadIndicesFromState(validators phase0.ValidatorRegistry) BoundedIndices { 26 | indices := make([]common.BoundedIndex, len(validators)) 27 | for i, v := range validators { 28 | indices[i] = common.BoundedIndex{ 29 | Index: common.ValidatorIndex(i), 30 | Activation: v.ActivationEpoch, 31 | Exit: v.ExitEpoch, 32 | } 33 | } 34 | return indices 35 | } 36 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/protolambda/consensus-actor 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/ethereum/go-ethereum v1.11.6 7 | github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb 8 | github.com/protolambda/zrnt v0.30.0 9 | github.com/protolambda/ztyp v0.2.2 10 | github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 11 | github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa 12 | ) 13 | 14 | require ( 15 | github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect 16 | github.com/go-stack/stack v1.8.1 // indirect 17 | github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect 18 | github.com/kilic/bls12-381 v0.1.0 // indirect 19 | github.com/klauspost/cpuid/v2 v2.1.0 // indirect 20 | github.com/minio/sha256-simd v1.0.0 // indirect 21 | github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 // indirect 22 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 23 | github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect 24 | golang.org/x/sys v0.6.0 // indirect 25 | gopkg.in/yaml.v3 v3.0.1 // indirect 26 | ) 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # consensus.actor 2 | 3 | *work in progress, site is not currently deployed* 4 | 5 | Site to view Ethereum consensus-layer activity: 6 | a network-wide historical view of attester performance as interactive map. 7 | 8 | Mainnet Ethereum Beacon-chain, at ~201K epochs, ~624K validators: 9 | 10 | ![Example visualization](.github/assets/example.png) 11 | 12 | ## Background 13 | 14 | End sept 2021 (when I was still at the EF) I hacked together a similar but more limited tool; 15 | no live updates, and hooked straight to a Lighthouse leveldb dump, it was very hacky (no CLI, just hacks). 16 | 17 | July 2022 I updated it, with (mostly broken) live updates, and hit a DB-lookup bottleneck: 18 | indexing the data was extremely slow, because of the random access to Lighthouse leveldb data, 19 | and large amount of data copies. 20 | 21 | Nov 2022 Attempt at reducing data-copies, optimizing the program. Indexing was still very slow. 22 | 23 | May 2023 Big refactor, using the Nimbus Era file archive data for fast historical beaconchain data access. 24 | 25 | 26 | ## License 27 | 28 | MIT License, see [LICENSE file](./LICENSE). 29 | 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 @protolambda 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /fun/db.go: -------------------------------------------------------------------------------- 1 | package fun 2 | 3 | import ( 4 | "github.com/syndtr/goleveldb/leveldb" 5 | lvlerrs "github.com/syndtr/goleveldb/leveldb/errors" 6 | "github.com/syndtr/goleveldb/leveldb/filter" 7 | "github.com/syndtr/goleveldb/leveldb/opt" 8 | ) 9 | 10 | // OpenDB opens a level DB. 11 | // 12 | // Filepath to locate db at. 13 | // Readonly to limit db writes. 14 | // Cache in megabytes to trade memory for better performance. 15 | // writeBuf in megabytes to improve writing performance 16 | func OpenDB(file string, readonly bool, cache int, writeBuf int) (*leveldb.DB, error) { 17 | options := &opt.Options{ 18 | Filter: filter.NewBloomFilter(10), 19 | DisableSeeksCompaction: true, 20 | OpenFilesCacheCapacity: 1024, 21 | BlockCacheCapacity: cache * opt.MiB, 22 | WriteBuffer: writeBuf * opt.MiB, 23 | ReadOnly: readonly, 24 | } 25 | 26 | // Open the db and recover any potential corruptions 27 | db, err := leveldb.OpenFile(file, options) 28 | if _, corrupted := err.(*lvlerrs.ErrCorrupted); corrupted { 29 | db, err = leveldb.RecoverFile(file, nil) 30 | } 31 | if err != nil { 32 | return nil, err 33 | } 34 | return db, nil 35 | } 36 | -------------------------------------------------------------------------------- /cmd/log.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | "github.com/ethereum/go-ethereum/log" 9 | "github.com/urfave/cli/v2" 10 | ) 11 | 12 | var ( 13 | LogLevelFlag = &cli.StringFlag{ 14 | Name: "log.level", 15 | Usage: "The lowest log level that will be output", 16 | Value: "info", 17 | EnvVars: []string{"LOG_LEVEL"}, 18 | } 19 | LogFormatFlag = &cli.StringFlag{ 20 | Name: "log.format", 21 | Usage: "Format the log output. Supported formats: 'text', 'json'", 22 | Value: "text", 23 | EnvVars: []string{"LOG_FORMAT"}, 24 | } 25 | LogColorFlag = &cli.BoolFlag{ 26 | Name: "log.color", 27 | Usage: "Color the log output", 28 | EnvVars: []string{"LOG_COLOR"}, 29 | } 30 | ) 31 | 32 | func SetupLogger(ctx *cli.Context) (log.Logger, error) { 33 | fmtStr := ctx.String(LogFormatFlag.Name) 34 | lvlStr := ctx.String(LogLevelFlag.Name) 35 | 36 | var logFmt log.Format 37 | switch fmtStr { 38 | case "json": 39 | logFmt = log.JSONFormat() 40 | case "json-pretty": 41 | logFmt = log.JSONFormatEx(true, true) 42 | case "text", "terminal": 43 | logFmt = log.TerminalFormat(ctx.Bool(LogColorFlag.Name)) 44 | default: 45 | return nil, fmt.Errorf("unrecognized log format: %q", fmtStr) 46 | } 47 | 48 | lvl, err := log.LvlFromString(strings.ToLower(lvlStr)) 49 | if err != nil { 50 | return nil, fmt.Errorf("unrecognized log level: %w", err) 51 | } 52 | handler := log.StreamHandler(os.Stdout, logFmt) 53 | handler = log.SyncHandler(handler) 54 | handler = log.LvlFilterHandler(lvl, handler) 55 | logger := log.New() 56 | logger.SetHandler(handler) 57 | return logger, nil 58 | } 59 | -------------------------------------------------------------------------------- /fun/http.go: -------------------------------------------------------------------------------- 1 | package fun 2 | 3 | import ( 4 | "embed" 5 | "fmt" 6 | "html/template" 7 | "net/http" 8 | "time" 9 | 10 | "github.com/ethereum/go-ethereum/log" 11 | ) 12 | 13 | //go:embed index.html 14 | var indexFile embed.FS 15 | 16 | var indexTempl = func() *template.Template { 17 | indexTempl, err := template.ParseFS(indexFile, "index.html") 18 | if err != nil { 19 | panic(fmt.Errorf("failed to load index.html template: %v", err)) 20 | } 21 | return indexTempl 22 | }() 23 | 24 | // IndexData is the Go html template input for index.html 25 | type IndexData struct { 26 | Title string 27 | API string 28 | } 29 | 30 | type Backend interface { 31 | HandleImageRequest() 32 | } 33 | 34 | func StartHttpServer(log log.Logger, listenAddr string, indexData *IndexData, handleImgRequest func(tileType uint8) http.Handler) *http.Server { 35 | var mux http.ServeMux 36 | mux.Handle("/validator-order", http.StripPrefix("/validator-order", handleImgRequest(0))) 37 | mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 38 | err := indexTempl.Execute(w, indexData) 39 | if err != nil { 40 | log.Error("failed to serve index.html page", "err", err) 41 | } 42 | })) 43 | 44 | srv := &http.Server{ 45 | Addr: listenAddr, 46 | Handler: &mux, 47 | ReadTimeout: time.Second * 10, 48 | ReadHeaderTimeout: time.Second * 10, 49 | WriteTimeout: time.Second * 10, 50 | IdleTimeout: time.Second * 10, 51 | MaxHeaderBytes: 10_000, 52 | } 53 | 54 | go func() { 55 | err := srv.ListenAndServe() 56 | if err == nil || err == http.ErrServerClosed { 57 | log.Info("closed http server") 58 | } else { 59 | log.Error("http server listen error, shutting down app", "err", err) 60 | } 61 | }() 62 | 63 | return srv 64 | } 65 | -------------------------------------------------------------------------------- /cmd/server.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/urfave/cli/v2" 7 | 8 | "github.com/protolambda/consensus-actor/fun" 9 | ) 10 | 11 | var ( 12 | ServerListenAddrFlag = &cli.StringFlag{ 13 | Name: "listen", 14 | Usage: "listen address to bind to", 15 | Value: "0.0.0.0:8080", 16 | } 17 | ServerPublicFlag = cli.StringFlag{ 18 | Name: "public", 19 | Usage: "public endpoint to use for API queries", 20 | Value: "127.0.0.1:8080", 21 | } 22 | ServerTilesFlag = &cli.PathFlag{ 23 | Name: "tiles", 24 | Usage: "path to tiles db to read tile data from", 25 | Value: "tiles_db", 26 | } 27 | ) 28 | 29 | var ServerCmd = &cli.Command{ 30 | Name: "server", 31 | Usage: "Run http server.", 32 | Description: "Run http server.", 33 | Action: Server, 34 | Flags: []cli.Flag{ 35 | LogLevelFlag, 36 | LogFormatFlag, 37 | LogColorFlag, 38 | ServerListenAddrFlag, 39 | ServerTilesFlag, 40 | }, 41 | } 42 | 43 | func Server(ctx *cli.Context) error { 44 | log, err := SetupLogger(ctx) 45 | if err != nil { 46 | return err 47 | } 48 | 49 | listenAddr := ctx.String(ServerListenAddrFlag.Name) 50 | publicEndpoint := ctx.String(ServerPublicFlag.Name) 51 | 52 | tilesDB, err := fun.OpenDB(ctx.Path(ServerTilesFlag.Name), true, 100, 0) 53 | if err != nil { 54 | return fmt.Errorf("failed to open perf db: %w", err) 55 | } 56 | defer tilesDB.Close() 57 | 58 | log.Info("starting server", "listen", listenAddr, "public", publicEndpoint) 59 | 60 | imgHandler := &fun.ImageHandler{Log: log, TilesDB: tilesDB} 61 | 62 | srv := fun.StartHttpServer(log, listenAddr, &fun.IndexData{ 63 | Title: "Consensus.actor | mainnet", 64 | API: publicEndpoint, 65 | }, imgHandler.HandleImgRequest) 66 | 67 | <-ctx.Done() 68 | 69 | log.Info("closing server") 70 | 71 | if err := srv.Close(); err != nil { 72 | log.Error("failed to close server", "err", err) 73 | } 74 | 75 | log.Info("closed server") 76 | return nil 77 | } 78 | -------------------------------------------------------------------------------- /cmd/tiles.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/protolambda/zrnt/eth2/beacon/common" 7 | "github.com/urfave/cli/v2" 8 | 9 | "github.com/protolambda/consensus-actor/fun" 10 | ) 11 | 12 | var ( 13 | TilesPerfFlag = &cli.PathFlag{ 14 | Name: "perf", 15 | Usage: "Path to validator perf database to read from", 16 | TakesFile: true, 17 | Value: "perf_db", 18 | Required: false, 19 | } 20 | TilesTilesFlag = &cli.PathFlag{ 21 | Name: "tiles", 22 | Usage: "path to tiles db to write tile data to", 23 | Value: "tiles_db", 24 | } 25 | TilesStartEpochFlag = &cli.Uint64Flag{ 26 | Name: "start-epoch", 27 | Usage: "Start epoch (inclusive) of tiles to update", 28 | Value: uint64(0), 29 | } 30 | TilesEndEpochFlag = &cli.Uint64Flag{ 31 | Name: "end-epoch", 32 | Usage: "End epoch (exclusive) of tiles to update", 33 | Value: ^uint64(0), 34 | } 35 | ) 36 | 37 | var TilesCmd = &cli.Command{ 38 | Name: "tiles", 39 | Usage: "Compute tiles for range of epochs.", 40 | Description: "Compute tiles for range of epochs.", 41 | Action: Tiles, 42 | Flags: []cli.Flag{ 43 | LogLevelFlag, 44 | LogFormatFlag, 45 | LogColorFlag, 46 | TilesPerfFlag, 47 | TilesTilesFlag, 48 | TilesStartEpochFlag, 49 | TilesEndEpochFlag, 50 | }, 51 | } 52 | 53 | func Tiles(ctx *cli.Context) error { 54 | log, err := SetupLogger(ctx) 55 | if err != nil { 56 | return err 57 | } 58 | startEpoch := common.Epoch(ctx.Uint64(TilesStartEpochFlag.Name)) 59 | endEpoch := common.Epoch(ctx.Uint64(TilesEndEpochFlag.Name)) 60 | perfDB, err := fun.OpenDB(ctx.Path(TilesPerfFlag.Name), true, 100, 0) 61 | if err != nil { 62 | return fmt.Errorf("failed to open perf db: %w", err) 63 | } 64 | defer perfDB.Close() 65 | tilesDB, err := fun.OpenDB(ctx.Path(TilesTilesFlag.Name), false, 100, 100) 66 | if err != nil { 67 | return fmt.Errorf("failed to open perf db: %w", err) 68 | } 69 | defer tilesDB.Close() 70 | return fun.UpdateTiles(log, tilesDB, perfDB, startEpoch, endEpoch) 71 | } 72 | -------------------------------------------------------------------------------- /fun/eracli/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "log" 8 | 9 | "github.com/protolambda/zrnt/eth2/beacon/phase0" 10 | "github.com/protolambda/zrnt/eth2/configs" 11 | 12 | "github.com/protolambda/consensus-actor/fun/era" 13 | ) 14 | 15 | func readEraFile(f io.ReadSeeker) error { 16 | // start from end 17 | groupEnd, err := f.Seek(0, io.SeekEnd) 18 | if err != nil { 19 | return fmt.Errorf("seek err: %w", err) 20 | } 21 | 22 | var buf bytes.Buffer 23 | for { 24 | slot, err := era.SeekState(f, groupEnd) 25 | if err != nil { 26 | return fmt.Errorf("failed to seek to state: %w", err) 27 | } 28 | fmt.Printf("reading group with state at slot %d\n", slot) 29 | 30 | buf.Reset() 31 | if _, err := era.CopyEntry(f, &buf); err != nil { 32 | return fmt.Errorf("failed to load state data: %w", err) 33 | } 34 | fmt.Printf("state: %d\n", buf.Len()) 35 | 36 | if slot != 0 { 37 | for i := uint64(0); i < era.SlotsPerEra; i++ { 38 | err := era.SeekBlock(f, i, groupEnd) 39 | if err == era.ErrNotExist { 40 | fmt.Printf("block %d does not exist\n", i) 41 | continue 42 | } 43 | if err != nil { 44 | return fmt.Errorf("failed to seek to block %d: %w", i, err) 45 | } 46 | 47 | buf.Reset() 48 | if _, err := era.CopyEntry(f, &buf); err != nil { 49 | return fmt.Errorf("failed to load block %d data: %w", i, err) 50 | } 51 | fmt.Printf("block %d: %d\n", i, buf.Len()) 52 | } 53 | } else { 54 | break 55 | } 56 | 57 | err = era.SeekGroupStart(f, groupEnd) 58 | if err != nil { 59 | return fmt.Errorf("failed to seek to group start: %w", err) 60 | } 61 | groupEnd, err = era.Tell(f) 62 | if err != nil { 63 | return fmt.Errorf("unable to tell current offset: %w", err) 64 | } 65 | if groupEnd == 0 { 66 | break 67 | } 68 | } 69 | return nil 70 | } 71 | 72 | func dumpEntries(f io.ReadSeeker) error { 73 | i := 0 74 | for { 75 | typ, l, err := era.ReadHeader(f) 76 | if err == io.EOF { 77 | break 78 | } 79 | if err != nil { 80 | return fmt.Errorf("failed to read header of entry %d: %w", i, err) 81 | } 82 | if l > 0 { 83 | if _, err := f.Seek(int64(l), io.SeekCurrent); err != nil { 84 | return fmt.Errorf("failed to skip content of entry %d: %w", i, err) 85 | } 86 | } 87 | fmt.Printf("entry %d: type: %x length: %d\n", i, typ, l) 88 | i++ 89 | } 90 | return nil 91 | } 92 | 93 | //func main() { 94 | // // era/mainnet-00000-4b363db9.era 95 | // // era/mainnet-00001-40cf2f3c.era 96 | // f, err := os.OpenFile("era/mainnet-00001-40cf2f3c.era", os.O_RDONLY, 0) 97 | // if err != nil { 98 | // log.Fatal(err) 99 | // } 100 | // defer f.Close() 101 | // 102 | // err = readEraFile(f) 103 | // if err != nil { 104 | // log.Fatal(err) 105 | // } 106 | //} 107 | 108 | func main() { 109 | st := era.NewStore() 110 | if err := st.Load("era"); err != nil { 111 | log.Fatal(err) 112 | } 113 | var state phase0.BeaconState 114 | if err := st.State(0, configs.Mainnet.Wrap(&state)); err != nil { 115 | log.Fatal(err) 116 | } 117 | fmt.Printf("validators: %d\n", len(state.Validators)) 118 | 119 | var block phase0.SignedBeaconBlock 120 | if err := st.Block(1, configs.Mainnet.Wrap(&block)); err != nil { 121 | log.Fatal(err) 122 | } 123 | fmt.Printf("block: %v\n", block) 124 | } 125 | -------------------------------------------------------------------------------- /cmd/perf.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/protolambda/zrnt/eth2/beacon/common" 7 | "github.com/protolambda/zrnt/eth2/configs" 8 | "github.com/urfave/cli/v2" 9 | 10 | "github.com/protolambda/consensus-actor/fun" 11 | "github.com/protolambda/consensus-actor/fun/era" 12 | ) 13 | 14 | var ( 15 | PerfPerfFlag = &cli.PathFlag{ 16 | Name: "perf", 17 | Usage: "Path to validator perf database to output to", 18 | TakesFile: true, 19 | Value: "perf_db", 20 | Required: false, 21 | } 22 | PerfEraFlag = &cli.PathFlag{ 23 | Name: "era", 24 | Usage: "Path to era store dir", 25 | TakesFile: true, 26 | Required: true, 27 | } 28 | PerfStartEpochFlag = &cli.Uint64Flag{ 29 | Name: "start-epoch", 30 | Usage: "Start epoch (inclusive) of validator performance data to update", 31 | Value: uint64(0), 32 | } 33 | PerfEndEpochFlag = &cli.Uint64Flag{ 34 | Name: "end-epoch", 35 | Usage: "End epoch (exclusive) of validator performance data to update", 36 | Value: ^uint64(0), 37 | } 38 | PerfWorkersFlag = &cli.IntFlag{ 39 | Name: "workers", 40 | Usage: "number of workers to used to process in parallel", 41 | Value: 8, 42 | } 43 | // TODO spec flag 44 | ) 45 | 46 | var PerfCmd = &cli.Command{ 47 | Name: "perf", 48 | Usage: "Compute validators performance in epoch range.", 49 | Description: "Compute validators performance in epoch range.", 50 | Action: Perf, 51 | Flags: []cli.Flag{ 52 | LogLevelFlag, 53 | LogFormatFlag, 54 | LogColorFlag, 55 | PerfPerfFlag, 56 | PerfEraFlag, 57 | PerfStartEpochFlag, 58 | PerfEndEpochFlag, 59 | PerfWorkersFlag, 60 | }, 61 | } 62 | 63 | func Perf(ctx *cli.Context) error { 64 | log, err := SetupLogger(ctx) 65 | if err != nil { 66 | return err 67 | } 68 | startEpoch := common.Epoch(ctx.Uint64(PerfStartEpochFlag.Name)) 69 | endEpoch := common.Epoch(ctx.Uint64(PerfEndEpochFlag.Name)) 70 | 71 | workers := ctx.Int(PerfWorkersFlag.Name) 72 | if workers < 0 || workers > 128 { 73 | return fmt.Errorf("invalid workers count: %d", workers) 74 | } 75 | 76 | perfDB, err := fun.OpenDB(ctx.Path(PerfPerfFlag.Name), false, 100, 0) 77 | if err != nil { 78 | return fmt.Errorf("failed to open perf db: %w", err) 79 | } 80 | defer perfDB.Close() 81 | 82 | es := era.NewStore() 83 | if err := es.Load(ctx.Path(PerfEraFlag.Name)); err != nil { 84 | return fmt.Errorf("failed to index era store: %w", err) 85 | } 86 | spec := configs.Mainnet 87 | spec.BELLATRIX_FORK_EPOCH = 144896 88 | spec.CAPELLA_FORK_EPOCH = 194048 89 | 90 | minSlot, maxSlot := es.Bounds() 91 | minEpoch, maxEpoch := spec.SlotToEpoch(minSlot), spec.SlotToEpoch(maxSlot) 92 | if minEpoch >= maxEpoch { 93 | return fmt.Errorf("no era data") 94 | } 95 | 96 | epochsPerEra := spec.SlotToEpoch(era.SlotsPerEra) 97 | if minEpoch > startEpoch+epochsPerEra-2 { 98 | startEpoch = minEpoch - epochsPerEra + 2 99 | log.Warn("adjusting lower bound", "start_epoch", startEpoch, "min_era_epoch", minEpoch) 100 | } 101 | if maxEpoch < endEpoch { 102 | endEpoch = maxEpoch 103 | log.Warn("adjusting upper bound", "end_epoch", startEpoch, "max_era_epoch", maxEpoch) 104 | } 105 | 106 | if err := fun.UpdatePerf(ctx.Context, log, perfDB, spec, es, startEpoch, endEpoch, workers); err != nil { 107 | return fmt.Errorf("failed to update validator performance data: %w", err) 108 | } 109 | return nil 110 | } 111 | -------------------------------------------------------------------------------- /fun/era/store.go: -------------------------------------------------------------------------------- 1 | package era 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "io/fs" 8 | "os" 9 | "path/filepath" 10 | "strings" 11 | "sync" 12 | 13 | "github.com/golang/snappy" 14 | "github.com/protolambda/zrnt/eth2/beacon/common" 15 | "github.com/protolambda/ztyp/codec" 16 | ) 17 | 18 | type Store struct { 19 | // era file paths indexed by state starting-slot 20 | Files map[common.Slot]string 21 | 22 | // TODO cache open era files 23 | } 24 | 25 | var ( 26 | stateBufPool = sync.Pool{New: func() any { return bytes.NewBuffer(make([]byte, 100_000_000)) }} 27 | blockBufPool = sync.Pool{New: func() any { return bytes.NewBuffer(make([]byte, 10_000_000)) }} 28 | snappyPool = sync.Pool{New: func() any { return snappy.NewReader(nil) }} 29 | ) 30 | 31 | func NewStore() *Store { 32 | return &Store{ 33 | Files: make(map[common.Slot]string), 34 | } 35 | } 36 | 37 | func (s *Store) Load(dirPath string) error { 38 | return filepath.WalkDir(dirPath, func(path string, d fs.DirEntry, err error) error { 39 | if !d.IsDir() && strings.HasSuffix(path, ".era") { 40 | f, err := os.Open(path) 41 | if err != nil { 42 | return fmt.Errorf("failed to read %q: %w", path, err) 43 | } 44 | defer f.Close() 45 | end, err := f.Seek(0, io.SeekEnd) 46 | if err != nil { 47 | return fmt.Errorf("failed to seek era to end: %w", err) 48 | } 49 | startSlot, err := SeekState(f, end) 50 | if err != nil { 51 | return fmt.Errorf("failed to seek era to state: %w", err) 52 | } 53 | s.Files[startSlot] = path 54 | } 55 | return nil 56 | }) 57 | } 58 | 59 | func (s *Store) Bounds() (min, max common.Slot) { 60 | min = ^common.Slot(0) 61 | max = common.Slot(0) 62 | for k := range s.Files { 63 | if k < min { 64 | min = k 65 | } 66 | if k > max { 67 | max = k 68 | } 69 | } 70 | return 71 | } 72 | 73 | func (s *Store) openEra(slot common.Slot) (*os.File, error) { 74 | p, ok := s.Files[slot] 75 | if !ok { 76 | return nil, os.ErrNotExist 77 | } 78 | return os.Open(p) 79 | } 80 | 81 | func (s *Store) State(slot common.Slot, dest common.SSZObj) error { 82 | if slot%SlotsPerEra != 0 { 83 | return fmt.Errorf("can only open states at multiples of era size, but got request for %d", slot) 84 | } 85 | f, err := s.openEra(slot) 86 | if err != nil { 87 | return fmt.Errorf("failed to open era: %w", err) 88 | } 89 | defer f.Close() 90 | end, err := f.Seek(0, io.SeekEnd) 91 | if err != nil { 92 | return fmt.Errorf("failed to seek era to end: %w", err) 93 | } 94 | startSlot, err := SeekState(f, end) 95 | if err != nil { 96 | return fmt.Errorf("failed to seek era to state: %w", err) 97 | } 98 | if slot != startSlot { 99 | return fmt.Errorf("sanity check of state starting-slot slot failed: %w", err) 100 | } 101 | sr := snappyPool.Get().(*snappy.Reader) 102 | defer snappyPool.Put(sr) 103 | 104 | buf := stateBufPool.Get().(*bytes.Buffer) 105 | buf.Reset() 106 | defer stateBufPool.Put(buf) 107 | if err := CoppySnappyEntry(f, buf, sr, CompressedBeaconStateType); err != nil { 108 | return fmt.Errorf("failed to read compressed beacon state: %w", err) 109 | } 110 | dr := codec.NewDecodingReader(buf, uint64(buf.Len())) 111 | if err := dest.Deserialize(dr); err != nil { 112 | return fmt.Errorf("failed to deserialize beacon state: %w", err) 113 | } 114 | return nil 115 | } 116 | 117 | func (s *Store) Block(slot common.Slot, dest common.SSZObj) error { 118 | eraSlot := slot - (slot % SlotsPerEra) + SlotsPerEra 119 | f, err := s.openEra(eraSlot) 120 | if err != nil { 121 | return fmt.Errorf("failed to open era: %w", err) 122 | } 123 | defer f.Close() 124 | end, err := f.Seek(0, io.SeekEnd) 125 | if err != nil { 126 | return fmt.Errorf("failed to seek era to end: %w", err) 127 | } 128 | err = SeekBlock(f, uint64(slot%SlotsPerEra), end) 129 | if err != nil { 130 | return fmt.Errorf("failed to seek era to block: %w", err) 131 | } 132 | sr := snappyPool.Get().(*snappy.Reader) 133 | defer snappyPool.Put(sr) 134 | 135 | buf := blockBufPool.Get().(*bytes.Buffer) 136 | buf.Reset() 137 | defer blockBufPool.Put(buf) 138 | if err := CoppySnappyEntry(f, buf, sr, CompressedSignedBeaconBlockType); err != nil { 139 | return fmt.Errorf("failed to read compressed signed beacon block: %w", err) 140 | } 141 | dr := codec.NewDecodingReader(buf, uint64(buf.Len())) 142 | if err := dest.Deserialize(dr); err != nil { 143 | return fmt.Errorf("failed to deserialize signed beacon block: %w", err) 144 | } 145 | return nil 146 | } 147 | -------------------------------------------------------------------------------- /fun/tile_handler.go: -------------------------------------------------------------------------------- 1 | package fun 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "image" 7 | "image/color" 8 | "image/png" 9 | "net/http" 10 | "strconv" 11 | 12 | "github.com/ethereum/go-ethereum/log" 13 | "github.com/golang/snappy" 14 | "github.com/syndtr/goleveldb/leveldb" 15 | ) 16 | 17 | // Tile represents an image tile, as stored in the DB. 18 | // R,G,B,A are slices stored next to eachother, since they compress better individually. 19 | // 20 | // Each color slice encodes column by column first, since correlation is larger per-column, for better compression. 21 | // I.e. values 0,1,2,3,4...tilesize-1 all correspond to column 0 (X == 0). 22 | type Tile struct { 23 | R []byte 24 | G []byte 25 | B []byte 26 | A []byte 27 | // added to x lookups 28 | OffsetX int 29 | // added to y lookups 30 | OffsetY int 31 | // Shifts x and y by given amount to create zoomed image effect, 32 | // while really serving the same tile. 33 | // This is useful to zoom in more than 1:1 pixel definition, 34 | // enlarging tiles without image scaling artifacts on client side. 35 | Scale uint8 36 | } 37 | 38 | // ColorModel returns the Image's color model. 39 | func (t *Tile) ColorModel() color.Model { 40 | return color.RGBAModel 41 | } 42 | 43 | // Bounds returns the domain for which At can return non-zero color. 44 | // The bounds do not necessarily contain the point (0, 0). 45 | func (t *Tile) Bounds() image.Rectangle { 46 | return image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: tileSize, Y: tileSize}} 47 | } 48 | 49 | // At returns the color of the pixel at (x, y). 50 | // At(Bounds().Min.X, Bounds().Min.Y) returns the upper-left pixel of the grid. 51 | // At(Bounds().Max.X-1, Bounds().Max.Y-1) returns the lower-right one. 52 | func (t *Tile) At(x, y int) color.Color { 53 | return t.RGBAAt(x, y) 54 | } 55 | 56 | func (t *Tile) RGBAAt(x, y int) color.RGBA { 57 | x >>= t.Scale 58 | y >>= t.Scale 59 | x += t.OffsetX 60 | y += t.OffsetY 61 | if x < 0 || x >= tileSize || y < 0 || y >= tileSize { 62 | return color.RGBA{} 63 | } 64 | pos := x*tileSize + y 65 | return color.RGBA{ 66 | R: t.R[pos], 67 | G: t.G[pos], 68 | B: t.B[pos], 69 | A: t.A[pos], 70 | } 71 | } 72 | 73 | func (t *Tile) Opaque() bool { 74 | return false 75 | } 76 | 77 | type ImageHandler struct { 78 | Log log.Logger 79 | TilesDB *leveldb.DB 80 | } 81 | 82 | func (s *ImageHandler) HandleImgRequest(tileType uint8) http.Handler { 83 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 84 | q := r.URL.Query() 85 | xStr := q.Get("x") 86 | x, err := strconv.ParseInt(xStr, 10, 32) 87 | if err != nil { 88 | w.WriteHeader(400) 89 | s.Log.Debug("query with bad x value", "err", err) 90 | _, _ = w.Write([]byte(fmt.Sprintf("bad x value: %v", err))) 91 | return 92 | } 93 | yStr := q.Get("y") 94 | y, err := strconv.ParseInt(yStr, 10, 32) 95 | if err != nil { 96 | w.WriteHeader(400) 97 | s.Log.Debug("query with bad y value", "err", err) 98 | _, _ = w.Write([]byte(fmt.Sprintf("bad y value: %v", err))) 99 | return 100 | } 101 | zStr := q.Get("z") 102 | z, err := strconv.ParseInt(zStr, 10, 32) 103 | if err != nil { 104 | w.WriteHeader(400) 105 | s.Log.Debug("query with bad z value", "err", err) 106 | _, _ = w.Write([]byte(fmt.Sprintf("bad z value: %v", err))) 107 | return 108 | } 109 | if x < 0 || y < 0 || z < 0 { 110 | w.WriteHeader(404) 111 | _, _ = w.Write([]byte(fmt.Sprintf("negative x %d or y %d or z %d\n", x, y, z))) 112 | return 113 | } 114 | 115 | if z > int64(maxArtificialZoom) { 116 | w.WriteHeader(400) 117 | _, _ = w.Write([]byte(fmt.Sprintf("z too large: %d\n", z))) 118 | return 119 | } 120 | var zoom, scale uint8 121 | var tileX, tileY uint64 122 | var offsetX, offsetY int 123 | if z > int64(maxZoom) { 124 | zoom = 0 125 | scale = uint8(z) - maxZoom 126 | tileX = uint64(x) >> scale 127 | tileY = uint64(y) >> scale 128 | offsetX = int(uint64(x)-(tileX<> scale) 129 | offsetY = int(uint64(y)-(tileY<> scale) 130 | } else { 131 | zoom = maxZoom - uint8(z) 132 | scale = 0 133 | offsetX = 0 134 | offsetY = 0 135 | tileX = uint64(x) 136 | tileY = uint64(y) 137 | } 138 | 139 | key := tileDbKey(tileType, tileX, tileY, zoom) 140 | tilePix, err := s.TilesDB.Get(key, nil) 141 | if err == leveldb.ErrNotFound { 142 | w.WriteHeader(404) 143 | s.Log.Debug(fmt.Sprintf("could not find tile: %d:%d zoom %d (translated zoom: %d)\n", x, y, z, zoom)) 144 | _, _ = w.Write([]byte(fmt.Sprintf("could not find tile: %d:%d:%d", x, y, z))) 145 | return 146 | } else if err != nil { 147 | w.WriteHeader(500) 148 | s.Log.Debug(fmt.Sprintf("server error while getting tile: %d:%d zoom %d (translated zoom: %d)\n", x, y, z, zoom)) 149 | _, _ = w.Write([]byte(fmt.Sprintf("server error while getting tile: %d:%d:%d", x, y, z))) 150 | return 151 | } 152 | 153 | tilePix, err = snappy.Decode(nil, tilePix) 154 | if err != nil { 155 | s.Log.Warn("snappy err", "err", err) 156 | w.WriteHeader(500) 157 | return 158 | } 159 | 160 | // lowest validator index, first epoch, is top left 161 | img := Tile{ 162 | R: tilePix[:tileSizeSquared], 163 | G: tilePix[tileSizeSquared : tileSizeSquared*2], 164 | B: tilePix[tileSizeSquared*2 : tileSizeSquared*3], 165 | A: tilePix[tileSizeSquared*3:], 166 | OffsetX: offsetX, 167 | OffsetY: offsetY, 168 | Scale: scale, 169 | } 170 | 171 | var buf bytes.Buffer 172 | if err = png.Encode(&buf, &img); err != nil { 173 | s.Log.Warn("PNG encoding err", "err", err) 174 | w.WriteHeader(500) 175 | return 176 | } 177 | // TODO: set cache policy based on coordinates 178 | 179 | w.Header().Set("Content-Type", "image/png") 180 | _, _ = w.Write(buf.Bytes()) 181 | }) 182 | } 183 | -------------------------------------------------------------------------------- /fun/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | {{.Title}} 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 39 | 40 | 41 | 42 |
43 | 44 |
45 |
46 | 47 | 48 |
49 |
50 | 51 | 52 |
53 |
54 | 55 | 56 |
57 |
58 | 59 |
60 | 148 | 149 | 150 | 151 | -------------------------------------------------------------------------------- /fun/era/era.go: -------------------------------------------------------------------------------- 1 | package era 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "fmt" 7 | "io" 8 | 9 | "github.com/golang/snappy" 10 | "github.com/protolambda/zrnt/eth2/beacon/common" 11 | ) 12 | 13 | // Era file format 14 | // 15 | // from specs: https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md 16 | // 17 | // entry commons: 18 | // header := type | length | reserved 19 | // type := [2]byte 20 | // length := LE uint32 21 | // reserved := [2]byte zeroes 22 | // 23 | // Version := header | data 24 | // type: [0x65, 0x32] 25 | // length: 0 26 | // data: [] 27 | // 28 | // SlotIndex := header | data 29 | // type: [0x69, 0x32] 30 | // data: starting-slot | index | index | index ... | count 31 | // 32 | // era := group+ 33 | // group := Version | block* | era-state | other-entries* | slot-index(block)? | slot-index(state) 34 | // block := CompressedSignedBeaconBlock 35 | // era-state := CompressedBeaconState 36 | // slot-index(block) := SlotIndex where count == 8192 37 | // slot-index(state) := SlotIndex where count == 1 38 | 39 | const ( 40 | headerSize = 8 41 | slotIndexOverhead = headerSize + 8 + 8 // starting-slot, count 42 | stateSlotIndexSize = slotIndexOverhead + 8 43 | SlotsPerEra = 8192 44 | blockSlotIndexSize = slotIndexOverhead + 8*SlotsPerEra 45 | ) 46 | 47 | var ErrNotExist = errors.New("entry does not exist") 48 | 49 | func Tell(f io.Seeker) (int64, error) { 50 | offset, err := f.Seek(0, io.SeekCurrent) 51 | if err != nil { 52 | return 0, fmt.Errorf("can't tell current offset: %w", err) 53 | } 54 | return offset, nil 55 | } 56 | 57 | type EntryType [2]byte 58 | 59 | var ( 60 | SlotIndexType = EntryType{'i', '2'} // starting-slot | index | index | index ... | count 61 | VersionType = EntryType{'e', '2'} // always 0-length 62 | CompressedSignedBeaconBlockType = EntryType{1, 0} 63 | CompressedBeaconStateType = EntryType{2, 0} 64 | EmptyType = EntryType{0, 0} // may have a length, data should be skipped 65 | ) 66 | 67 | func CopyEntry(f io.Reader, w io.Writer) (EntryType, error) { 68 | typ, length, err := ReadHeader(f) 69 | if err != nil { 70 | return EmptyType, fmt.Errorf("failed to read entry header: %w", err) 71 | } 72 | _, err = io.CopyN(w, f, int64(length)) 73 | return typ, err 74 | } 75 | 76 | func CoppySnappyEntry(f io.Reader, w io.Writer, sr *snappy.Reader, expectType EntryType) error { 77 | typ, length, err := ReadHeader(f) 78 | if err != nil { 79 | return fmt.Errorf("failed to read entry header: %w", err) 80 | } 81 | if typ != expectType { 82 | return fmt.Errorf("expected type %x but got type %x", expectType, typ) 83 | } 84 | sr.Reset(io.LimitReader(f, int64(length))) 85 | _, err = io.Copy(w, sr) 86 | if err != nil { 87 | return fmt.Errorf("failed to copy snappy output into writer: %w", err) 88 | } 89 | return nil 90 | } 91 | 92 | func ReadHeader(f io.Reader) (EntryType, uint32, error) { 93 | var x [8]byte 94 | if _, err := io.ReadFull(f, x[:]); err != nil { 95 | return EmptyType, 0, fmt.Errorf("failed to read header: %w", err) 96 | } 97 | if x[6] != 0 || x[7] != 0 { 98 | return EmptyType, 0, fmt.Errorf("reserved value is not 0, got %04x", x[6:]) 99 | } 100 | return EntryType{x[0], x[1]}, binary.LittleEndian.Uint32(x[2:6]), nil 101 | } 102 | 103 | func ReadUint64(f io.Reader) (uint64, error) { 104 | var x [8]byte 105 | if _, err := io.ReadFull(f, x[:]); err != nil { 106 | return 0, fmt.Errorf("failed to read value: %w", err) 107 | } 108 | return binary.LittleEndian.Uint64(x[:]), nil 109 | } 110 | 111 | func ReadInt64(f io.Reader) (int64, error) { 112 | x, err := ReadUint64(f) 113 | return int64(x), err 114 | } 115 | 116 | func ReadSlot(f io.Reader) (common.Slot, error) { 117 | x, err := ReadUint64(f) 118 | return common.Slot(x), err 119 | } 120 | 121 | // ReadBlockOffset reads the file offset of block i (slot relative to group slot-index). 122 | // Seeker f must be positioned at the end of a group. 123 | // Returns offset relative to start of file. 124 | func ReadBlockOffset(f io.ReadSeeker, i uint64) (int64, error) { 125 | // find start of block slot-index entry, then skip header (8) and starting-slot (8) to find indices. 126 | x := 8 + 8 + 8*int64(i) 127 | n, err := f.Seek(-stateSlotIndexSize-blockSlotIndexSize+x, io.SeekCurrent) 128 | if err != nil { 129 | return 0, fmt.Errorf("failed to lookup first block offset: %w", err) 130 | } 131 | offset, err := ReadInt64(f) 132 | if err != nil { 133 | return 0, fmt.Errorf("failed to read offset: %w", err) 134 | } 135 | result := n - x + offset 136 | if result == 0 { 137 | return 0, ErrNotExist 138 | } 139 | return result, nil 140 | } 141 | 142 | // ReadStateOffsetAndSlot reads the file offset of the state. 143 | // Seeker f must be positioned at the end of a group. 144 | // Returns offset relative to start of file. 145 | // Seeker f will be positioned where it started after a successful read. 146 | // The slot of the state is returned as well. 147 | func ReadStateOffsetAndSlot(f io.ReadSeeker) (offset int64, slot common.Slot, err error) { 148 | n, err := f.Seek(-stateSlotIndexSize, io.SeekCurrent) 149 | if err != nil { 150 | return 0, 0, fmt.Errorf("failed to lookup state slot-index: %w", err) 151 | } 152 | 153 | typ, length, err := ReadHeader(f) 154 | if err != nil { 155 | return 0, 0, fmt.Errorf("failed to read state slot-index header: %w", err) 156 | } 157 | if typ != SlotIndexType { 158 | return 0, 0, fmt.Errorf("expected state slot-index type: %w", err) 159 | } 160 | if length != 8*3 { 161 | return 0, 0, fmt.Errorf("unexpected state slot-index size: %d", length) 162 | } 163 | 164 | slot, err = ReadSlot(f) 165 | if err != nil { 166 | return 0, 0, fmt.Errorf("failed to read starting slot: %w", err) 167 | } 168 | 169 | offset, err = ReadInt64(f) 170 | if err != nil { 171 | return 0, 0, fmt.Errorf("failed to read offset: %w", err) 172 | } 173 | 174 | count, err := ReadUint64(f) 175 | if err != nil { 176 | return 0, 0, fmt.Errorf("failed to read count: %w", err) 177 | } 178 | if count != 1 { 179 | return 0, 0, fmt.Errorf("unexpected number of states: %w", err) 180 | } 181 | 182 | return n + offset, slot, nil 183 | } 184 | 185 | func SeekGroupStart(f io.ReadSeeker, groupEnd int64) error { 186 | _, err := f.Seek(groupEnd, io.SeekStart) 187 | if err != nil { 188 | return fmt.Errorf("failed to seek to end of group: %w", err) 189 | } 190 | for i := uint64(0); i < SlotsPerEra; i++ { 191 | if err := SeekBlock(f, i, groupEnd); err == ErrNotExist { 192 | continue 193 | } else if err != nil { 194 | return fmt.Errorf("failed to seek to block %d to find group start: %w", i, err) 195 | } 196 | if _, err := f.Seek(-8, io.SeekCurrent); err != nil { 197 | return fmt.Errorf("failed to skip version part before first block entry (%d): %w", i, err) 198 | } 199 | return nil 200 | } 201 | if _, err := SeekState(f, groupEnd); err != nil { 202 | return fmt.Errorf("failed to seek to state to find group start: %w", err) 203 | } 204 | _, err = f.Seek(-8, io.SeekCurrent) 205 | if err != nil { 206 | return fmt.Errorf("failed to skip version part before state: %w", err) 207 | } 208 | return nil 209 | } 210 | 211 | func SeekBlock(f io.ReadSeeker, i uint64, groupEnd int64) error { 212 | if _, err := f.Seek(groupEnd, io.SeekStart); err != nil { 213 | return fmt.Errorf("failed to seek to end of group: %w", err) 214 | } 215 | offset, err := ReadBlockOffset(f, i) 216 | if offset == 0 { 217 | return ErrNotExist 218 | } 219 | if err != nil { 220 | return fmt.Errorf("failed to read block %d offset: %w", i, err) 221 | } 222 | if _, err := f.Seek(offset, io.SeekStart); err != nil { 223 | return fmt.Errorf("failed to seek to block %d at offset %d: %w", i, offset, err) 224 | } 225 | return nil 226 | } 227 | 228 | func SeekState(f io.ReadSeeker, groupEnd int64) (slot common.Slot, err error) { 229 | if _, err := f.Seek(groupEnd, io.SeekStart); err != nil { 230 | return 0, fmt.Errorf("failed to seek to end of group: %w", err) 231 | } 232 | offset, v, err := ReadStateOffsetAndSlot(f) 233 | if err != nil { 234 | return 0, fmt.Errorf("failed to read state offset: %w", err) 235 | } 236 | if _, err := f.Seek(offset, io.SeekStart); err != nil { 237 | return 0, fmt.Errorf("failed to seek to state at offset %d: %w", offset, err) 238 | } 239 | return v, nil 240 | } 241 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= 2 | github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= 3 | github.com/ethereum/go-ethereum v1.11.6 h1:2VF8Mf7XiSUfmoNOy3D+ocfl9Qu8baQBrCNbo2CXQ8E= 4 | github.com/ethereum/go-ethereum v1.11.6/go.mod h1:+a8pUj1tOyJ2RinsNQD4326YS+leSoKGiG/uVVb0x6Y= 5 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 6 | github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= 7 | github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= 8 | github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= 9 | github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= 10 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 11 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 12 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 13 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 14 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 15 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 16 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 17 | github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 18 | github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 19 | github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= 20 | github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 21 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 22 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 23 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 24 | github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= 25 | github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8= 26 | github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= 27 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 28 | github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= 29 | github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= 30 | github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= 31 | github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= 32 | github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= 33 | github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= 34 | github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= 35 | github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= 36 | github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= 37 | github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= 38 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 39 | github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= 40 | github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= 41 | github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= 42 | github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= 43 | github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= 44 | github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= 45 | github.com/protolambda/bls12-381-util v0.0.0-20210720105258-a772f2aac13e/go.mod h1:MPZvj2Pr0N8/dXyTPS5REeg2sdLG7t8DRzC1rLv925w= 46 | github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c= 47 | github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY= 48 | github.com/protolambda/messagediff v1.4.0/go.mod h1:LboJp0EwIbJsePYpzh5Op/9G1/4mIztMRYzzwR0dR2M= 49 | github.com/protolambda/zrnt v0.30.0 h1:pHEn69ZgaDFGpLGGYG1oD7DvYI7RDirbMBPfbC+8p4g= 50 | github.com/protolambda/zrnt v0.30.0/go.mod h1:qcdX9CXFeVNCQK/q0nswpzhd+31RHMk2Ax/2lMsJ4Jw= 51 | github.com/protolambda/ztyp v0.2.2 h1:rVcL3vBu9W/aV646zF6caLS/dyn9BN8NYiuJzicLNyY= 52 | github.com/protolambda/ztyp v0.2.2/go.mod h1:9bYgKGqg3wJqT9ac1gI2hnVb0STQq7p/1lapqrqY1dU= 53 | github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= 54 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 55 | github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= 56 | github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= 57 | github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q= 58 | github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= 59 | github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= 60 | github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= 61 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 62 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 63 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 64 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 65 | golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= 66 | golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= 67 | golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= 68 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 69 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 70 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 71 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 72 | golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 73 | golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 74 | golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 75 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 76 | golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 77 | golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 78 | golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 79 | golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 80 | golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= 81 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 82 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 83 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 84 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 85 | golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= 86 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 87 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 88 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 89 | golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= 90 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 91 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 92 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 93 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 94 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 95 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 96 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 97 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 98 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 99 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 100 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 101 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 102 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 103 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 104 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 105 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 106 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 107 | -------------------------------------------------------------------------------- /fun/tiles.go: -------------------------------------------------------------------------------- 1 | package fun 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | 7 | "github.com/ethereum/go-ethereum/log" 8 | "github.com/golang/snappy" 9 | "github.com/protolambda/zrnt/eth2/beacon/common" 10 | "github.com/syndtr/goleveldb/leveldb" 11 | "github.com/syndtr/goleveldb/leveldb/util" 12 | ) 13 | 14 | const ( 15 | // KeyTile is a: 16 | // 3 byte prefix for tile keying, followed by: 17 | // 1 byte tile type 18 | // 1 byte zoom. 19 | // 4 byte big endian X 20 | // 4 byte big endian Y 21 | // 22 | // Note: the X is first, so the DB range iterate can range over epochs at zoom 0. 23 | // 24 | // Values under this key are snappy block-compressed. 25 | // 26 | // The uncompressed value is 4 squares of tileSize x tileSize, one for R, one for G, B, and A 27 | // The squares encode column by column. 28 | // 29 | // We can encode tiles in with R, G, B, A grouped together separately. 30 | // And implement the image.Image interface to map back to an image. 31 | // This way we compress better, and don't store as much alpha-channel data. 32 | KeyTile string = "til" 33 | ) 34 | 35 | func tileDbKey(tileType uint8, tX uint64, tY uint64, zoom uint8) []byte { 36 | var key [3 + 1 + 4 + 4 + 1]byte 37 | copy(key[:3], KeyTile) 38 | key[3] = tileType 39 | key[4] = zoom 40 | binary.BigEndian.PutUint32(key[3+1+1:3+1+1+4], uint32(tX)) 41 | binary.BigEndian.PutUint32(key[3+1+1+4:3+1+1+4+4], uint32(tY)) 42 | return key[:] 43 | } 44 | 45 | func performanceToTiles(log log.Logger, tilesDB *leveldb.DB, perfDB *leveldb.DB, tileType uint8, tX uint64) error { 46 | maxValidators := uint64(0) 47 | for x := uint64(0); x < tileSize; x++ { 48 | epoch := common.Epoch(tX*tileSize + x) 49 | perf, err := getPerf(perfDB, epoch) 50 | if err != nil { 51 | // no data for this epoch 52 | continue 53 | } 54 | if uint64(len(perf)) > maxValidators { 55 | maxValidators = uint64(len(perf)) 56 | } 57 | } 58 | 59 | tilesY := (maxValidators + tileSize - 1) / tileSize 60 | // each tile is an array of 4 byte items. tileSize consecutive of those form a row, and then tileSize rows. 61 | // RGBA 62 | tiles := make([][]byte, tilesY) 63 | tileBytes := 4 * tileSize * tileSize 64 | for tY := uint64(0); tY < tilesY; tY++ { 65 | tiles[tY] = make([]byte, tileBytes) 66 | } 67 | for x := uint64(0); x < tileSize; x++ { 68 | epoch := common.Epoch(tX*tileSize + x) 69 | //fmt.Printf("processing epoch %d\n", epoch) 70 | perf, err := getPerf(perfDB, epoch) 71 | if err != nil { 72 | log.Info("no performance data for epoch", "epoch", epoch) 73 | continue 74 | //return fmt.Errorf("failed to get epoch data %d: %v", epoch, err) 75 | } 76 | 77 | // TODO: based on tileType apply a y-axis remapping on the performance 78 | 79 | for vi, vPerf := range perf { 80 | tY := uint64(vi) / tileSize 81 | tile := tiles[tY] 82 | tileR := tile[:tileSizeSquared] 83 | tileG := tile[tileSizeSquared : tileSizeSquared*2] 84 | tileB := tile[tileSizeSquared*2 : tileSizeSquared*3] 85 | tileA := tile[tileSizeSquared*3:] 86 | 87 | y := uint64(vi) % tileSize 88 | pos := x*tileSize + y 89 | // max alpha 90 | tileA[pos] = 0xff 91 | 92 | if vPerf&ValidatorExists == 0 { 93 | // if not existing, then black pixel 94 | tileR[pos] = 0 95 | tileG[pos] = 0 96 | tileB[pos] = 0 97 | } else { 98 | // if existent, but not participating, then color it a special gray 99 | if vPerf == ValidatorExists { 100 | tileR[pos] = 0x20 101 | tileG[pos] = 0x20 102 | tileB[pos] = 0x20 103 | } else { 104 | // higher head distance becomes darker (unknown is 0xff) 105 | headDist := uint32((vPerf >> 24) & 0xff) 106 | if headDist == 0xff { 107 | tileR[pos] = 0x30 108 | } else { 109 | q := 64 - headDist 110 | q = (q * q * q * q * q) >> 22 111 | tileR[pos] = uint8(q) 112 | } 113 | // correct target is 0xff, incorrect is 0 114 | tileG[pos] = byte(vPerf >> 16) 115 | // higher inclusion distance becomes darker 116 | inclDist := uint32((vPerf >> 8) & 0xff) 117 | if inclDist == 0xff { 118 | tileB[pos] = 0x30 119 | } else { 120 | q := 64 - inclDist 121 | q = (q * q * q * q * q) >> 22 122 | tileB[pos] = uint8(q) 123 | } 124 | } 125 | } 126 | } 127 | for vi := uint64(len(perf)); vi < maxValidators; vi++ { 128 | tY := vi / tileSize 129 | tile := tiles[tY] 130 | tileR := tile[:tileSizeSquared] 131 | tileG := tile[tileSizeSquared : tileSizeSquared*2] 132 | tileB := tile[tileSizeSquared*2 : tileSizeSquared*3] 133 | tileA := tile[tileSizeSquared*3:] 134 | 135 | y := vi % tileSize 136 | pos := x*tileSize + y 137 | // transparent pixel 138 | tileR[pos] = 0 139 | tileG[pos] = 0 140 | tileB[pos] = 0 141 | tileA[pos] = 0 142 | } 143 | } 144 | for tY, tile := range tiles { 145 | // TODO more types 146 | key := tileDbKey(tileType, tX, uint64(tY), 0) 147 | // compress the tile image 148 | tile = snappy.Encode(nil, tile) 149 | if err := tilesDB.Put(key, tile, nil); err != nil { 150 | return fmt.Errorf("failed to write tile %d:%d (zoom 0): %v", tX, tY, err) 151 | } 152 | } 153 | return nil 154 | } 155 | 156 | func convTiles(tilesDB *leveldb.DB, tileType uint8, tX uint64, zoom uint8) error { 157 | for tY := uint64(0); true; tY += 1 { 158 | topLeft := tileDbKey(tileType, tX*2, tY*2, zoom-1) 159 | topRight := tileDbKey(tileType, tX*2+1, tY*2, zoom-1) 160 | // remember, y is downwards 161 | bottomLeft := tileDbKey(tileType, tX*2, tY*2+1, zoom-1) 162 | bottomRight := tileDbKey(tileType, tX*2+1, tY*2+1, zoom-1) 163 | 164 | hasY := false 165 | for _, key := range [][]byte{topLeft, topRight, bottomLeft, bottomRight} { 166 | if haz, err := tilesDB.Has(key, nil); err != nil { 167 | return fmt.Errorf("failed to check key presence: %w", err) 168 | } else if haz { 169 | hasY = true 170 | } 171 | } 172 | if !hasY { 173 | break 174 | } 175 | 176 | getTile := func(key []byte) ([]byte, error) { 177 | tile, err := tilesDB.Get(key, nil) 178 | if err == leveldb.ErrNotFound { 179 | // use empty tile instead 180 | tile = make([]byte, 4*tileSize*tileSize) 181 | return tile, nil 182 | } else if err != nil { 183 | return nil, fmt.Errorf("failed to get top left of (%d; %d): %v", tX, tY, err) 184 | } else { 185 | return snappy.Decode(nil, tile) 186 | } 187 | } 188 | topLeftTile, err := getTile(topLeft) 189 | if err != nil { 190 | return err 191 | } 192 | topRightTile, err := getTile(topRight) 193 | if err != nil { 194 | return err 195 | } 196 | bottomLeftTile, err := getTile(bottomLeft) 197 | if err != nil { 198 | return err 199 | } 200 | bottomRightTile, err := getTile(bottomRight) 201 | if err != nil { 202 | return err 203 | } 204 | 205 | outTile := make([]byte, 4*tileSize*tileSize) 206 | mix := func(a, b, c, d byte) byte { 207 | return uint8((uint16(a) + uint16(b) + uint16(c) + uint16(d)) / 4) 208 | } 209 | // compress a N x N tile into a N/2 x N/2 tile, writing it to a tile at the given offset. 210 | compressedFn := func(offX uint64, offY uint64, inTile []byte) { 211 | for x := uint64(0); x < tileSize/2; x++ { 212 | for y := uint64(0); y < tileSize/2; y++ { 213 | // top left, top right, bottom left, bottom right 214 | p0 := x*2*tileSize + y*2 215 | p1 := p0 + tileSize 216 | p2 := p0 + 1 217 | p3 := p2 + tileSize 218 | 219 | r0, r1, r2, r3 := inTile[p0], inTile[p1], inTile[p2], inTile[p3] 220 | p0, p1, p2, p3 = p0+tileSizeSquared, p1+tileSizeSquared, p2+tileSizeSquared, p3+tileSizeSquared 221 | g0, g1, g2, g3 := inTile[p0], inTile[p1], inTile[p2], inTile[p3] 222 | p0, p1, p2, p3 = p0+tileSizeSquared, p1+tileSizeSquared, p2+tileSizeSquared, p3+tileSizeSquared 223 | b0, b1, b2, b3 := inTile[p0], inTile[p1], inTile[p2], inTile[p3] 224 | p0, p1, p2, p3 = p0+tileSizeSquared, p1+tileSizeSquared, p2+tileSizeSquared, p3+tileSizeSquared 225 | a0, a1, a2, a3 := inTile[p0], inTile[p1], inTile[p2], inTile[p3] 226 | 227 | r := mix(r0, r1, r2, r3) 228 | g := mix(g0, g1, g2, g3) 229 | b := mix(b0, b1, b2, b3) 230 | a := mix(a0, a1, a2, a3) 231 | pos := (offX+x)*tileSize + (offY + y) 232 | outTile[pos] = r 233 | pos += tileSizeSquared 234 | outTile[pos] = g 235 | pos += tileSizeSquared 236 | outTile[pos] = b 237 | pos += tileSizeSquared 238 | outTile[pos] = a 239 | } 240 | } 241 | } 242 | compressedFn(0, 0, topLeftTile) 243 | compressedFn(tileSize/2, 0, topRightTile) 244 | compressedFn(0, tileSize/2, bottomLeftTile) 245 | compressedFn(tileSize/2, tileSize/2, bottomRightTile) 246 | 247 | key := tileDbKey(tileType, tX, tY, zoom) 248 | // compress the tile image 249 | outTile = snappy.Encode(nil, outTile) 250 | if err := tilesDB.Put(key, outTile, nil); err != nil { 251 | return fmt.Errorf("failed to write tile %d:%d (zoom %d): %v", tX, tY, zoom, err) 252 | } 253 | } 254 | return nil 255 | } 256 | 257 | func lastTileEpoch(tilesDB *leveldb.DB, tileType uint8) (common.Epoch, error) { 258 | iter := tilesDB.NewIterator(util.BytesPrefix(append([]byte(KeyTile), tileType, 0)), nil) 259 | defer iter.Release() 260 | if iter.Last() { 261 | epoch := common.Epoch(binary.BigEndian.Uint32(iter.Key()[3+1+1:3+1+1+4])) * tileSize 262 | return epoch, nil 263 | } else { 264 | return 0, iter.Error() 265 | } 266 | } 267 | 268 | func resetTilesTyped(tilesDB *leveldb.DB, spec *common.Spec, tileType uint8, resetSlot common.Slot) error { 269 | resetEpoch := spec.SlotToEpoch(resetSlot) 270 | 271 | lastEpoch, err := lastTileEpoch(tilesDB, tileType) 272 | if err != nil { 273 | return err 274 | } 275 | 276 | if resetEpoch > lastEpoch { // check if there's anything to reset 277 | return nil 278 | } 279 | 280 | var batch leveldb.Batch 281 | for z := uint8(0); z < maxZoom; z++ { 282 | start := uint32(resetEpoch >> z) 283 | end := uint32(lastEpoch >> z) 284 | r := &util.Range{ 285 | Start: make([]byte, 3+1+1+4), 286 | Limit: make([]byte, 3+1+1+4), 287 | } 288 | copy(r.Start[:3], KeyTile) 289 | r.Start[3] = tileType 290 | r.Start[3+1] = z 291 | binary.BigEndian.PutUint32(r.Start[3+1+1:], start) 292 | 293 | copy(r.Limit[:3], KeyTile) 294 | r.Limit[3] = tileType 295 | r.Limit[3+1] = z 296 | binary.BigEndian.PutUint32(r.Limit[3+1+1:], end+1) 297 | 298 | iter := tilesDB.NewIterator(r, nil) 299 | for iter.Next() { 300 | batch.Delete(iter.Key()) 301 | } 302 | iter.Release() 303 | } 304 | if err := tilesDB.Write(&batch, nil); err != nil { 305 | return fmt.Errorf("failed to remove tile data of type %d, resetting to slot %d: %v", tileType, resetSlot, err) 306 | } 307 | return nil 308 | } 309 | 310 | func UpdateTiles(log log.Logger, tiles, perf *leveldb.DB, startEpoch, endEpoch common.Epoch) error { 311 | if endEpoch < startEpoch { 312 | return fmt.Errorf("end epoch cannot be lower than start epoch: %d < %d", endEpoch, startEpoch) 313 | } 314 | lastPerfEpoch, err := lastPerfEpoch(perf) 315 | if err != nil { 316 | return fmt.Errorf("could not read max block slot: %w", err) 317 | } 318 | if lastPerfEpoch < endEpoch { 319 | log.Info("reducing end epoch to available performance data", "end", lastPerfEpoch) 320 | endEpoch = lastPerfEpoch 321 | } 322 | 323 | for tX := uint64(startEpoch) / tileSize; tX <= uint64(endEpoch)/tileSize; tX++ { 324 | log.Info("creating base tiles", "tX", tX, "zoom", 0) 325 | if err := performanceToTiles(log, tiles, perf, 0, tX); err != nil { 326 | return fmt.Errorf("failed to update zoom 0 tiles at tX %d: %v", tX, err) 327 | } 328 | } 329 | 330 | for z := uint8(1); z <= maxZoom; z++ { 331 | tileSizeAbs := uint64(tileSize) << z 332 | tilesXStart := uint64(startEpoch) / tileSizeAbs 333 | tilesXEnd := (uint64(endEpoch) + tileSizeAbs - 1) / tileSizeAbs 334 | for i := tilesXStart; i < tilesXEnd; i++ { 335 | log.Info("computing conv tiles", "tX", i, "zoom", z) 336 | if err := convTiles(tiles, 0, i, z); err != nil { 337 | return fmt.Errorf("failed tile convolution layer at zoom %d tX %d: %v", z, i, err) 338 | } 339 | } 340 | } 341 | 342 | log.Info("finished computing tile data") 343 | return nil 344 | } 345 | -------------------------------------------------------------------------------- /fun/perf.go: -------------------------------------------------------------------------------- 1 | package fun 2 | 3 | import ( 4 | "context" 5 | "encoding/binary" 6 | "errors" 7 | "fmt" 8 | "sync" 9 | 10 | "github.com/ethereum/go-ethereum/log" 11 | "github.com/golang/snappy" 12 | "github.com/protolambda/zrnt/eth2/beacon/altair" 13 | "github.com/protolambda/zrnt/eth2/beacon/bellatrix" 14 | "github.com/protolambda/zrnt/eth2/beacon/capella" 15 | "github.com/protolambda/zrnt/eth2/beacon/common" 16 | "github.com/protolambda/zrnt/eth2/beacon/phase0" 17 | "github.com/protolambda/zrnt/eth2/util/hashing" 18 | "github.com/syndtr/goleveldb/leveldb" 19 | "github.com/syndtr/goleveldb/leveldb/util" 20 | 21 | "github.com/protolambda/consensus-actor/fun/era" 22 | ) 23 | 24 | const ( 25 | // KeyPerf is a: 26 | // 3 byte prefix for per-epoch performance keying, followed by: 27 | // 8 byte big-endian epoch value. (big endian to make db byte-prefix iteration and range-slices follow epoch order) 28 | // 29 | // The epoch key represents the boundary when the data became available. 30 | // I.e. epoch == 2 means that 0 == prev and 1 == current were processed. 31 | // 32 | // Values under this key are snappy block-compressed. 33 | // 34 | // The value is a []ValidatorPerformance 35 | KeyPerf string = "prf" 36 | ) 37 | 38 | type ValidatorPerformance uint32 39 | 40 | const ( 41 | // and the next 64 values (6 bits). Always non-zero 42 | InclusionDistance ValidatorPerformance = 0x00_00_01_00 43 | 44 | InclusionDistanceMask = 0x00_00_ff_00 45 | 46 | // source is always correct, or wouldn't be included on-chain 47 | TargetCorrect ValidatorPerformance = 0x00_ff_00_00 48 | 49 | // up to 64, or 0xff if unknown 50 | HeadDistance ValidatorPerformance = 0x01_00_00_00 51 | 52 | ValidatorExists ValidatorPerformance = 0x00_00_00_01 53 | ) 54 | 55 | func shufflingSeed(spec *common.Spec, randaoFn RandaoLookup, epoch common.Epoch) ([32]byte, error) { 56 | buf := make([]byte, 4+8+32) 57 | 58 | // domain type 59 | copy(buf[0:4], common.DOMAIN_BEACON_ATTESTER[:]) 60 | 61 | // epoch 62 | binary.LittleEndian.PutUint64(buf[4:4+8], uint64(epoch)) 63 | 64 | mix, err := randaoFn(epoch) 65 | if err != nil { 66 | return [32]byte{}, err 67 | } 68 | copy(buf[4+8:], mix[:]) 69 | 70 | return hashing.Hash(buf), nil 71 | } 72 | 73 | func shuffling(spec *common.Spec, randaoFn RandaoLookup, indicesBounded []common.BoundedIndex, epoch common.Epoch) (*common.ShufflingEpoch, error) { 74 | seed, err := shufflingSeed(spec, randaoFn, epoch) 75 | if err != nil { 76 | return nil, fmt.Errorf("failed to compute seed: %v", err) 77 | } 78 | return common.NewShufflingEpoch(spec, indicesBounded, seed, epoch), nil 79 | } 80 | 81 | // with 1 epoch delay (inclusion can be delayed), check validator performance 82 | // if currEp == 0, then process only 0, filtered for target == 0 83 | // if currEp == 1, then process 0 and 1, filtered for target == 0 84 | // if currEp == 2, then process 1 and 2, filtered for target == 1 85 | // etc. 86 | func processPerf(spec *common.Spec, 87 | blockRootFn BlockRootLookup, 88 | attFn AttestationsLookup, randaoFn RandaoLookup, 89 | indicesBounded []common.BoundedIndex, currEp common.Epoch) ([]ValidatorPerformance, error) { 90 | // don't have to re-hash the block if we just load the hashes 91 | 92 | // get all block roots in previous and current epoch (or just current if genesis) 93 | var roots []common.Root 94 | 95 | // clips to start 96 | prevEp := currEp.Previous() 97 | prevStart, err := spec.EpochStartSlot(prevEp) 98 | if err != nil { 99 | return nil, fmt.Errorf("bad epoch start slot of prev epoch: %w", err) 100 | } 101 | 102 | count := spec.SLOTS_PER_EPOCH * 2 103 | if prevEp == currEp { 104 | count = spec.SLOTS_PER_EPOCH 105 | } 106 | 107 | for i := common.Slot(0); i < spec.SLOTS_PER_EPOCH; i++ { 108 | slot := prevStart + i 109 | blockRoot, err := blockRootFn(slot) 110 | if err != nil { 111 | return nil, fmt.Errorf("failed to get block root of slot: %d", slot) 112 | } 113 | roots = append(roots, blockRoot) 114 | } 115 | 116 | // get all blocks in previous and/or current epoch 117 | blocks := make([]SlotAttestations, 0, count) 118 | for i := common.Slot(0); i < count; i++ { 119 | slot := prevStart + i 120 | if atts, err := attFn(slot); err != nil { 121 | return nil, fmt.Errorf("failed to get block at slot %d: %v", slot, err) 122 | } else { 123 | blocks = append(blocks, SlotAttestations{Slot: slot, Attestations: atts}) 124 | } 125 | } 126 | 127 | prevShuf, err := shuffling(spec, randaoFn, indicesBounded, prevEp) 128 | if err != nil { 129 | return nil, fmt.Errorf("failed to get shuffling for epoch %d: %v", prevEp, err) 130 | } 131 | 132 | // figure out how much space we need. There may be some gaps, 133 | // if validators didn't immediately activate, those values will just be 0 134 | maxValidatorIndex := common.ValidatorIndex(0) 135 | for _, vi := range prevShuf.ActiveIndices { 136 | if vi > maxValidatorIndex { 137 | maxValidatorIndex = vi 138 | } 139 | } 140 | // per validator, track who was already included for work this epoch 141 | validatorPerfs := make([]ValidatorPerformance, maxValidatorIndex+1) 142 | for i := range validatorPerfs { 143 | validatorPerfs[i] = ValidatorExists 144 | } 145 | // TODO: second perf array, in order of committees, so next stage doesn't deal with shuffling 146 | /// and per slot / committee index, instead of per epoch 147 | 148 | expectedTargetRoot := roots[0] 149 | 150 | // early blocks first, previous epoch (if any), then current epoch 151 | for _, bl := range blocks { 152 | for _, att := range bl.Attestations { 153 | // skip newer attestations. Anyone who votes for the same target epoch in two conflicting ways is slashable, 154 | // and although it is accounted for in performance on-chain, we ignore it here. 155 | if att.Data.Target.Epoch != prevEp { 156 | continue 157 | } 158 | 159 | perf := ValidatorExists 160 | // target performance 161 | if expectedTargetRoot == att.Data.Target.Root { 162 | perf |= TargetCorrect 163 | } 164 | 165 | // head accuracy 166 | headDist := 1 167 | found := false 168 | for i := int(att.Data.Slot); i >= int(prevStart); i-- { 169 | if att.Data.BeaconBlockRoot != roots[i-int(prevStart)] { 170 | headDist++ 171 | } else { 172 | found = true 173 | break 174 | } 175 | } 176 | if !found { 177 | headDist = 0xff 178 | } 179 | perf |= HeadDistance * ValidatorPerformance(headDist) 180 | 181 | // inclusion distance 182 | perf |= InclusionDistance * ValidatorPerformance(bl.Slot-att.Data.Slot) 183 | 184 | comm := prevShuf.Committees[att.Data.Slot-prevStart][att.Data.Index] 185 | for bitIndex, valIndex := range comm { 186 | if bl := att.AggregationBits.BitLen(); bl != uint64(len(comm)) { 187 | return nil, fmt.Errorf("unexpected attestation bitfield length: %d (expected %d) in epoch %d", bl, len(comm), prevEp) 188 | } 189 | if att.AggregationBits.GetBit(uint64(bitIndex)) { 190 | // only if the validator was not already seen 191 | if validatorPerfs[valIndex]&InclusionDistanceMask == 0 { 192 | validatorPerfs[valIndex] = perf 193 | } 194 | } 195 | } 196 | } 197 | } 198 | return validatorPerfs, nil 199 | } 200 | 201 | func getPerf(perfDB *leveldb.DB, currEp common.Epoch) ([]ValidatorPerformance, error) { 202 | var key [3 + 8]byte 203 | copy(key[:3], KeyPerf) 204 | binary.BigEndian.PutUint64(key[3:], uint64(currEp)) 205 | out, err := perfDB.Get(key[:], nil) 206 | if err != nil { 207 | return nil, err 208 | } 209 | out, err = snappy.Decode(nil, out) 210 | if err != nil { 211 | return nil, err 212 | } 213 | perf := make([]ValidatorPerformance, len(out)/4) 214 | for i := 0; i < len(out); i += 4 { 215 | perf[i/4] = ValidatorPerformance(binary.LittleEndian.Uint32(out[i : i+4])) 216 | } 217 | return perf, nil 218 | } 219 | 220 | func lastPerfEpoch(perfDB *leveldb.DB) (common.Epoch, error) { 221 | iter := perfDB.NewIterator(util.BytesPrefix([]byte(KeyPerf)), nil) 222 | defer iter.Release() 223 | if iter.Last() { 224 | epoch := common.Epoch(binary.BigEndian.Uint64(iter.Key()[3:])) 225 | return epoch, nil 226 | } else { 227 | return 0, iter.Error() 228 | } 229 | } 230 | 231 | func resetPerf(perfDB *leveldb.DB, spec *common.Spec, resetSlot common.Slot) error { 232 | ep, err := lastPerfEpoch(perfDB) 233 | if err != nil { 234 | return err 235 | } 236 | if ep < spec.SlotToEpoch(resetSlot) { 237 | return nil 238 | } 239 | 240 | prefix := []byte(KeyPerf) 241 | start := uint64(spec.SlotToEpoch(resetSlot)) 242 | end := uint64(ep) + 1 243 | 244 | keyRange := &util.Range{ 245 | Start: make([]byte, 3+8), 246 | Limit: make([]byte, 3+8), 247 | } 248 | copy(keyRange.Start[:3], prefix) 249 | binary.BigEndian.PutUint64(keyRange.Start[3:], start) 250 | copy(keyRange.Limit[:3], prefix) 251 | binary.BigEndian.PutUint64(keyRange.Limit[3:], end) 252 | 253 | iter := perfDB.NewIterator(keyRange, nil) 254 | defer iter.Release() 255 | 256 | var batch leveldb.Batch 257 | for iter.Next() { 258 | batch.Delete(iter.Key()) 259 | } 260 | 261 | if err := perfDB.Write(&batch, nil); err != nil { 262 | return fmt.Errorf("failed to cleanup conflicting perf mix data with key %v", err) 263 | } 264 | 265 | return nil 266 | } 267 | 268 | type perfJob struct { 269 | start common.Epoch 270 | end common.Epoch 271 | } 272 | 273 | func UpdatePerf(ctx context.Context, log log.Logger, perf *leveldb.DB, spec *common.Spec, st *era.Store, start, end common.Epoch, workers int) error { 274 | if end < start { 275 | return fmt.Errorf("invalid epoch range %d - %d", start, end) 276 | } 277 | epochsPerEra := common.Epoch(era.SlotsPerEra / spec.SLOTS_PER_EPOCH) 278 | log.Info("starting", "start_epoch", start, "end_epoch", end, "epochs_per_era", epochsPerEra) 279 | 280 | work := make(chan perfJob, workers) 281 | 282 | var wg sync.WaitGroup 283 | wg.Add(workers) 284 | 285 | ctx, cancelCause := context.WithCancelCause(ctx) 286 | for i := 0; i < workers; i++ { 287 | go func(i int) { 288 | defer wg.Done() 289 | 290 | log.Info("started worker", "i", i) 291 | 292 | for { 293 | select { 294 | case <-ctx.Done(): 295 | return 296 | case job, ok := <-work: 297 | if !ok { 298 | return 299 | } 300 | err := updateJob(ctx, log, perf, spec, st, job.start, job.end) 301 | if err != nil { 302 | cancelCause(fmt.Errorf("worker %d failed job (%d - %d): %w", i, job.start, job.end, err)) 303 | } 304 | } 305 | } 306 | }(i) 307 | } 308 | 309 | // We can make jobs smaller than an era for more parallel work, 310 | // but then we just end up using more resources in total because of overhead, and we only have limited workers 311 | // TODO: consider scheduling smaller work jobs 312 | 313 | // schedule all the work 314 | go func() { 315 | for ep := start; ep < end; ep += epochsPerEra - (ep % epochsPerEra) { 316 | jobStart := ep 317 | jobEnd := ep + epochsPerEra 318 | if jobEnd > end { 319 | jobEnd = end 320 | } 321 | select { 322 | case work <- perfJob{start: jobStart, end: jobEnd}: 323 | continue 324 | case <-ctx.Done(): 325 | wg.Wait() // wait for workers to all shut down 326 | return 327 | } 328 | } 329 | // signal all work has been scheduled 330 | close(work) 331 | }() 332 | 333 | // wait for all workers to shut down 334 | wg.Wait() 335 | 336 | if err := context.Cause(ctx); err != nil { 337 | log.Error("interrupted work", "err", err) 338 | return err 339 | } 340 | 341 | log.Info("finished", "start_epoch", start, "end_epoch", end) 342 | return nil 343 | } 344 | 345 | func updateJob(ctx context.Context, log log.Logger, perfDB *leveldb.DB, spec *common.Spec, st *era.Store, start, end common.Epoch) error { 346 | log.Info("starting job", "start_epoch", start, "end_epoch", end) 347 | 348 | if spec.SLOTS_PER_HISTORICAL_ROOT != era.SlotsPerEra { 349 | return fmt.Errorf("weird spec, expected %d slots per historical root: %w") 350 | } 351 | if start+era.SlotsPerEra < end { 352 | return fmt.Errorf("range too large: %d ... %d: %d diff", start, end, end-start) 353 | } 354 | 355 | epochsPerEra := common.Epoch(era.SlotsPerEra / spec.SLOTS_PER_EPOCH) 356 | currEraEpoch := end 357 | if rem := end % epochsPerEra; rem > 0 { 358 | currEraEpoch += epochsPerEra - rem 359 | } 360 | currEraSlot, _ := spec.EpochStartSlot(currEraEpoch) 361 | 362 | var currEraBlockRoots phase0.HistoricalBatchRoots 363 | var prevEraBlockRoots phase0.HistoricalBatchRoots 364 | var randaoMixes phase0.RandaoMixes 365 | 366 | var indicesBounded BoundedIndices 367 | 368 | if currEraEpoch < spec.ALTAIR_FORK_EPOCH { 369 | var state phase0.BeaconState 370 | if err := st.State(currEraSlot, spec.Wrap(&state)); err != nil { 371 | return err 372 | } 373 | currEraBlockRoots = state.BlockRoots 374 | randaoMixes = state.RandaoMixes 375 | indicesBounded = loadIndicesFromState(state.Validators) 376 | } else if currEraEpoch < spec.BELLATRIX_FORK_EPOCH { 377 | var state altair.BeaconState 378 | if err := st.State(currEraSlot, spec.Wrap(&state)); err != nil { 379 | return err 380 | } 381 | currEraBlockRoots = state.BlockRoots 382 | randaoMixes = state.RandaoMixes 383 | indicesBounded = loadIndicesFromState(state.Validators) 384 | } else if currEraEpoch < spec.CAPELLA_FORK_EPOCH { 385 | var state bellatrix.BeaconState 386 | if err := st.State(currEraSlot, spec.Wrap(&state)); err != nil { 387 | return err 388 | } 389 | currEraBlockRoots = state.BlockRoots 390 | randaoMixes = state.RandaoMixes 391 | indicesBounded = loadIndicesFromState(state.Validators) 392 | } else { 393 | var state capella.BeaconState 394 | if err := st.State(currEraSlot, spec.Wrap(&state)); err != nil { 395 | return err 396 | } 397 | currEraBlockRoots = state.BlockRoots 398 | randaoMixes = state.RandaoMixes 399 | indicesBounded = loadIndicesFromState(state.Validators) 400 | } 401 | 402 | if currEraEpoch >= epochsPerEra { 403 | prevEraEpoch := currEraEpoch - epochsPerEra 404 | prevEraSlot, _ := spec.EpochStartSlot(prevEraEpoch) 405 | if prevEraEpoch+2 >= start { // if the start is close to the era boundary, we'll need to load the prev era state. 406 | if prevEraEpoch < spec.ALTAIR_FORK_EPOCH { 407 | var state phase0.BeaconState 408 | if err := st.State(prevEraSlot, spec.Wrap(&state)); err != nil { 409 | return err 410 | } 411 | prevEraBlockRoots = state.BlockRoots 412 | } else if prevEraEpoch < spec.BELLATRIX_FORK_EPOCH { 413 | var state altair.BeaconState 414 | if err := st.State(prevEraSlot, spec.Wrap(&state)); err != nil { 415 | return err 416 | } 417 | prevEraBlockRoots = state.BlockRoots 418 | } else if prevEraEpoch < spec.CAPELLA_FORK_EPOCH { 419 | var state bellatrix.BeaconState 420 | if err := st.State(prevEraSlot, spec.Wrap(&state)); err != nil { 421 | return err 422 | } 423 | prevEraBlockRoots = state.BlockRoots 424 | } else { 425 | var state capella.BeaconState 426 | if err := st.State(prevEraSlot, spec.Wrap(&state)); err != nil { 427 | return err 428 | } 429 | prevEraBlockRoots = state.BlockRoots 430 | } 431 | } 432 | } 433 | 434 | blockRootFn := BlockRootLookup(func(slot common.Slot) (common.Root, error) { 435 | if slot > currEraSlot { 436 | return common.Root{}, fmt.Errorf("cannot get block root of slot %d, era stops at slot %d", slot, currEraSlot) 437 | } 438 | if slot+era.SlotsPerEra >= currEraSlot { 439 | return currEraBlockRoots[slot%era.SlotsPerEra], nil 440 | } 441 | if prevEraBlockRoots == nil { 442 | return common.Root{}, fmt.Errorf("no previous era block roots, cannot get block root of slot %d", slot) 443 | } 444 | if slot+era.SlotsPerEra*2 >= currEraSlot { 445 | return prevEraBlockRoots[slot%era.SlotsPerEra], nil 446 | } 447 | return common.Root{}, fmt.Errorf("slot %d too old to serve", slot) 448 | }) 449 | 450 | attFn := AttestationsLookup(func(slot common.Slot) (phase0.Attestations, error) { 451 | if slot == 0 { 452 | return nil, nil 453 | } 454 | ep := spec.SlotToEpoch(slot) 455 | if ep < spec.ALTAIR_FORK_EPOCH { 456 | var block phase0.SignedBeaconBlock 457 | if err := st.Block(slot, spec.Wrap(&block)); errors.Is(err, era.ErrNotExist) { 458 | return nil, nil 459 | } else if err != nil { 460 | return nil, err 461 | } 462 | if slot != block.Message.Slot { 463 | return nil, fmt.Errorf("loaded wrong block, got slot %d, but requested %d", block.Message.Slot, slot) 464 | } 465 | return block.Message.Body.Attestations, nil 466 | } else if ep < spec.BELLATRIX_FORK_EPOCH { 467 | var block altair.SignedBeaconBlock 468 | if err := st.Block(slot, spec.Wrap(&block)); errors.Is(err, era.ErrNotExist) { 469 | return nil, nil 470 | } else if err != nil { 471 | return nil, err 472 | } 473 | if slot != block.Message.Slot { 474 | return nil, fmt.Errorf("loaded wrong block, got slot %d, but requested %d", block.Message.Slot, slot) 475 | } 476 | return block.Message.Body.Attestations, nil 477 | } else if ep < spec.CAPELLA_FORK_EPOCH { 478 | var block bellatrix.SignedBeaconBlock 479 | if err := st.Block(slot, spec.Wrap(&block)); errors.Is(err, era.ErrNotExist) { 480 | return nil, nil 481 | } else if err != nil { 482 | return nil, err 483 | } 484 | if slot != block.Message.Slot { 485 | return nil, fmt.Errorf("loaded wrong block, got slot %d, but requested %d", block.Message.Slot, slot) 486 | } 487 | return block.Message.Body.Attestations, nil 488 | } else { 489 | var block capella.SignedBeaconBlock 490 | if err := st.Block(slot, spec.Wrap(&block)); errors.Is(err, era.ErrNotExist) { 491 | return nil, nil 492 | } else if err != nil { 493 | return nil, err 494 | } 495 | if slot != block.Message.Slot { 496 | return nil, fmt.Errorf("loaded wrong block, got slot %d, but requested %d", block.Message.Slot, slot) 497 | } 498 | return block.Message.Body.Attestations, nil 499 | } 500 | }) 501 | 502 | randaoFn := RandaoLookup(func(epoch common.Epoch) ([32]byte, error) { 503 | if epoch > currEraEpoch { 504 | return [32]byte{}, fmt.Errorf("epoch too high, cannot get randao mix of epoch %d from era state at epoch %d", epoch, currEraEpoch) 505 | } 506 | if epoch+spec.EPOCHS_PER_HISTORICAL_VECTOR < currEraEpoch { 507 | return [32]byte{}, fmt.Errorf("epoch too low, cannot get randao mix of epoch %d from era state at epoch %d", epoch, currEraEpoch) 508 | } 509 | i := epoch + spec.EPOCHS_PER_HISTORICAL_VECTOR - spec.MIN_SEED_LOOKAHEAD - 1 510 | return randaoMixes[i%spec.EPOCHS_PER_HISTORICAL_VECTOR], nil 511 | }) 512 | 513 | for currEp := start; currEp < end; currEp++ { 514 | if err := ctx.Err(); err != nil { 515 | return fmt.Errorf("stopped before processing epoch %d: %w", currEp, err) 516 | } 517 | validatorPerfs, err := processPerf(spec, blockRootFn, attFn, randaoFn, indicesBounded, currEp) 518 | if err != nil { 519 | return fmt.Errorf("failed to process epoch %d: %w", currEp, err) 520 | } 521 | 522 | out := make([]byte, len(validatorPerfs)*4) 523 | for i, v := range validatorPerfs { 524 | binary.LittleEndian.PutUint32(out[i*4:i*4+4], uint32(v)) 525 | } 526 | 527 | // compress the output (validators often behave the same, and there are a lot of them) 528 | out = snappy.Encode(nil, out) 529 | 530 | var outKey [3 + 8]byte 531 | copy(outKey[:3], KeyPerf) 532 | binary.BigEndian.PutUint64(outKey[3:], uint64(currEp)) 533 | if err := perfDB.Put(outKey[:], out, nil); err != nil { 534 | return fmt.Errorf("failed to store epoch performance") 535 | } 536 | } 537 | return nil 538 | } 539 | --------------------------------------------------------------------------------