├── .github └── workflows │ ├── checks.yaml │ ├── docker-utils-release.yaml │ └── release.yaml ├── .gitignore ├── .goreleaser.yaml ├── Dockerfile ├── Makefile ├── README.md ├── assertoor └── config │ └── playground_deposits.yaml ├── cl-proxy ├── cl-proxy.go └── cmd │ └── main.go ├── examples ├── op-stack-rollup-boost.md └── running-two-chains.md ├── go.mod ├── go.sum ├── main.go ├── mev-boost-relay ├── cmd │ └── main.go └── mev-boost-relay.go ├── playground ├── artifacts.go ├── artifacts_test.go ├── catalog.go ├── components.go ├── config.yaml.tmpl ├── genesis_op.go ├── genesis_op_test.go ├── inspect.go ├── local_runner.go ├── manifest.go ├── manifest_test.go ├── recipe_buildernet.go ├── recipe_l1.go ├── recipe_opstack.go ├── releases.go ├── testcases │ └── l2_genesis_ishtmus.json ├── utils.go ├── utils │ ├── README.md │ ├── genesis.json │ ├── intent.toml │ ├── query.sh │ ├── rollup.json │ └── state.json ├── watchdog.go └── watchers.go └── scripts ├── ci-build-playground-utils.sh ├── ci-copy-playground-logs.sh └── ci-setup-docker-compose.sh /.github/workflows/checks.yaml: -------------------------------------------------------------------------------- 1 | name: Checks 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | merge_group: 7 | push: 8 | branches: [main] 9 | 10 | jobs: 11 | test: 12 | name: E2E test (${{ matrix.flags }}) 13 | runs-on: ubuntu-latest 14 | strategy: 15 | matrix: 16 | flags: 17 | - "l1" 18 | - "l1 --use-native-reth" 19 | - "l1 --with-prometheus" 20 | - "opstack" 21 | - "opstack --external-builder http://host.docker.internal:4444" 22 | - "opstack --enable-latest-fork=10" 23 | steps: 24 | - name: Check out code 25 | uses: actions/checkout@v2 26 | 27 | - name: Set up Go 28 | uses: actions/setup-go@v2 29 | with: 30 | go-version: 1.24 31 | 32 | - name: Install docker compose 33 | run: ./scripts/ci-setup-docker-compose.sh 34 | 35 | - name: Build playground utils 36 | run: ./scripts/ci-build-playground-utils.sh 37 | 38 | - name: Run playground 39 | run: go run main.go cook ${{ matrix.flags }} --output /tmp/playground --timeout 4m --watchdog 40 | 41 | - name: Copy playground logs 42 | if: ${{ failure() }} 43 | run: ./scripts/ci-copy-playground-logs.sh /tmp/playground /tmp/playground-logs 44 | 45 | - name: Archive playground logs 46 | uses: actions/upload-artifact@v4 47 | if: ${{ failure() }} 48 | with: 49 | name: playground-logs-${{ matrix.flags }} 50 | path: /tmp/playground-logs 51 | retention-days: 5 52 | 53 | artifacts: 54 | name: Artifacts 55 | strategy: 56 | matrix: 57 | os: [ubuntu-latest, macos-13] 58 | runs-on: ${{ matrix.os }} 59 | steps: 60 | - name: Check out code 61 | uses: actions/checkout@v2 62 | 63 | - name: Set up Go 64 | uses: actions/setup-go@v2 65 | with: 66 | go-version: 1.24 67 | 68 | - name: Download and test artifacts 69 | run: go run main.go artifacts-all 70 | -------------------------------------------------------------------------------- /.github/workflows/docker-utils-release.yaml: -------------------------------------------------------------------------------- 1 | name: docker-utils-release 2 | # This action is used to update the utils docker image 3 | # whenever there is a change in the utils repo that lands on main 4 | 5 | on: 6 | workflow_dispatch: 7 | push: 8 | branches: 9 | - main 10 | paths: 11 | - 'cl-proxy/**' 12 | - 'mev-boost-relay/**' 13 | - 'go.mod' 14 | - 'go.sum' 15 | - 'Dockerfile' 16 | 17 | jobs: 18 | docker: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | 24 | - name: Set up Docker Buildx 25 | uses: docker/setup-buildx-action@v3 26 | 27 | - name: Login to Docker Hub 28 | uses: docker/login-action@v3 29 | with: 30 | username: ${{ secrets.FLASHBOTS_DOCKERHUB_USERNAME }} 31 | password: ${{ secrets.FLASHBOTS_DOCKERHUB_TOKEN }} 32 | 33 | - name: Build and push 34 | uses: docker/build-push-action@v5 35 | with: 36 | context: . 37 | push: true 38 | platforms: linux/amd64,linux/arm64 39 | tags: | 40 | docker.io/flashbots/playground-utils:latest 41 | docker.io/flashbots/playground-utils:${{ github.sha }} 42 | cache-from: type=gha 43 | cache-to: type=gha,mode=max 44 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | # .github/workflows/release.yml 2 | name: release 3 | 4 | on: 5 | workflow_dispatch: 6 | push: 7 | tags: 8 | - "*" 9 | 10 | jobs: 11 | release: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | with: 17 | fetch-depth: 0 18 | 19 | - name: setup dependencies 20 | uses: actions/setup-go@v2 21 | 22 | - name: Log tag name 23 | run: echo "Build for tag ${{ github.ref_name }}" 24 | 25 | - name: Create release 26 | run: make ci-release 27 | env: 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | TAG: ${{ github.ref_name }} 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | local-testnet 2 | output -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | env: 2 | - CGO_ENABLED=1 3 | builds: 4 | - id: builder-playground-darwin-amd64 5 | binary: builder-playground 6 | goarch: 7 | - amd64 8 | goos: 9 | - darwin 10 | env: 11 | - CC=o64-clang 12 | - CXX=o64-clang++ 13 | flags: 14 | - -trimpath 15 | - id: builder-playground-darwin-arm64 16 | binary: builder-playground 17 | goarch: 18 | - arm64 19 | goos: 20 | - darwin 21 | env: 22 | - CC=oa64-clang 23 | - CXX=oa64-clang++ 24 | flags: 25 | - -trimpath 26 | - id: builder-playground-linux-amd64 27 | binary: builder-playground 28 | env: 29 | - CC=x86_64-linux-gnu-gcc 30 | - CXX=x86_64-linux-gnu-g++ 31 | goarch: 32 | - amd64 33 | goos: 34 | - linux 35 | flags: 36 | - -trimpath 37 | ldflags: 38 | - -extldflags "-Wl,-z,stack-size=0x800000 --static" 39 | tags: 40 | - netgo 41 | - osusergo 42 | - id: builder-playground-linux-arm64 43 | binary: builder-playground 44 | goarch: 45 | - arm64 46 | goos: 47 | - linux 48 | env: 49 | - CC=aarch64-linux-gnu-gcc 50 | - CXX=aarch64-linux-gnu-g++ 51 | flags: 52 | - -trimpath 53 | ldflags: 54 | - -extldflags "-Wl,-z,stack-size=0x800000 --static" 55 | tags: 56 | - netgo 57 | - osusergo 58 | 59 | archives: 60 | - id: w/version 61 | builds: 62 | - builder-playground-darwin-amd64 63 | - builder-playground-darwin-arm64 64 | - builder-playground-linux-amd64 65 | - builder-playground-linux-arm64 66 | name_template: "builder-playground_v{{ .Version }}_{{ .Os }}_{{ .Arch }}" 67 | wrap_in_directory: false 68 | format: zip 69 | files: 70 | - none* 71 | 72 | checksum: 73 | name_template: "checksums.txt" 74 | 75 | release: 76 | draft: true 77 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24 2 | WORKDIR /app 3 | 4 | # Install build dependencies required for CGo 5 | RUN apt-get update && apt-get install -y gcc musl-dev 6 | 7 | # Copy go mod files first 8 | COPY go.* ./ 9 | RUN go mod download 10 | 11 | # Copy the rest of the source code 12 | COPY . . 13 | 14 | # Build all applications with CGo enabled 15 | RUN go build -o /usr/local/bin/cl-proxy ./cl-proxy/cmd/main.go && \ 16 | go build -o /usr/local/bin/mev-boost-relay ./mev-boost-relay/cmd/main.go 17 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | ci-release: 3 | docker run \ 4 | --rm \ 5 | -e CGO_ENABLED=1 \ 6 | -e GITHUB_TOKEN="$(GITHUB_TOKEN)" \ 7 | -v /var/run/docker.sock:/var/run/docker.sock \ 8 | -v $(HOME)/.docker/config.json:/root/.docker/config.json \ 9 | -v `pwd`:/go/src/$(PACKAGE_NAME) \ 10 | -v `pwd`/sysroot:/sysroot \ 11 | -w /go/src/$(PACKAGE_NAME) \ 12 | ghcr.io/goreleaser/goreleaser-cross:v1.21.12 \ 13 | release --clean --auto-snapshot 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Builder Playground 2 | 3 | [![Goreport status](https://goreportcard.com/badge/github.com/flashbots/builder-playground)](https://goreportcard.com/report/github.com/flashbots/builder-playground) 4 | [![Test status](https://github.com/flashbots/builder-playground/actions/workflows/checks.yaml/badge.svg?branch=main)](https://github.com/flashbots/builder-playground/actions?query=workflow%3A%22Checks%22) 5 | 6 | The builder playground is a tool to deploy an end-to-end environment to locally test EVM block builders. 7 | 8 | ## Usage 9 | 10 | Clone the repository and use the `cook` command to deploy a specific recipe: 11 | 12 | ```bash 13 | $ builder-playground cook 14 | ``` 15 | 16 | Currently available recipes: 17 | 18 | ### L1 Recipe 19 | 20 | Deploys a complete L1 environment with: 21 | 22 | - A beacon node + validator client ([lighthouse](https://github.com/sigp/lighthouse)). 23 | - An execution client ([reth](https://github.com/paradigmxyz/reth)). 24 | - An in-memory [mev-boost-relay](https://github.com/flashbots/mev-boost-relay). 25 | 26 | ```bash 27 | $ builder-playground cook l1 [flags] 28 | ``` 29 | 30 | Flags: 31 | 32 | - `--latest-fork`: Enable the latest fork at startup 33 | - `--use-reth-for-validation`: Use Reth EL for block validation in mev-boost. 34 | - `--secondary-el`: Port to use for a secondary el (enables the internal cl-proxy proxy) 35 | - `--use-native-reth`: Run the Reth EL binary on the host instead of docker (recommended to bind to the Reth DB) 36 | 37 | ### OpStack Recipe 38 | 39 | Deploys an L2 environment with: 40 | 41 | - Complete L1 setup (as above minus mev-boost) 42 | - A complete sequencer with op-node, op-geth and op-batcher 43 | 44 | ```bash 45 | $ builder-playground cook opstack [flags] 46 | ``` 47 | 48 | Flags: 49 | 50 | - `--external-builder`: URL of an external builder to use (enables rollup-boost) 51 | - `--enable-latest-fork` (int): Enables the latest fork (isthmus) at startup (0) or n blocks after genesis. 52 | 53 | ### Example Commands 54 | 55 | Here's a complete example showing how to run the L1 recipe with the latest fork enabled and custom output directory: 56 | 57 | ```bash 58 | $ builder-playground cook l1 --latest-fork --output ~/my-builder-testnet --genesis-delay 15 --log-level debug 59 | ``` 60 | 61 | ## Common Options 62 | 63 | - `--output` (string): The directory where the chain data and artifacts are stored. Defaults to `$HOME/.playground/devnet` 64 | - `--genesis-delay` (int): The delay in seconds before the genesis block is created. Defaults to `10` seconds 65 | - `--watchdog` (bool): Enable the watchdog service to monitor the specific chain 66 | - `--dry-run` (bool): Generates the artifacts and manifest but does not deploy anything (also enabled with the `--mise-en-place` flag) 67 | - `--log-level` (string): Log level to use (debug, info, warn, error, fatal). Defaults to `info`. 68 | - `--labels` (key=val): Custom labels to apply to your deployment. 69 | - `--disable-logs` (bool): Disable the logs for the services. Defaults to `false`. 70 | 71 | To stop the playground, press `Ctrl+C`. 72 | 73 | ## Inspect 74 | 75 | Builder-playground supports inspecting the connection of a service to a specific port. 76 | 77 | ```bash 78 | $ builder-playground inspect 79 | ``` 80 | 81 | Example: 82 | 83 | ```bash 84 | $ builder-playground cook opstack 85 | $ builder-playground inspect op-geth authrpc 86 | ``` 87 | 88 | This command starts a `tcpflow` container in the same network interface as the service and captures the traffic to the specified port. 89 | 90 | ## Internals 91 | 92 | ### Execution Flow 93 | 94 | The playground executes in three main phases: 95 | 96 | 1. **Artifact Generation**: Creates all necessary files and configurations (genesis files, keys, etc.) 97 | 2. **Manifest Generation**: The recipe creates a manifest describing all services to be deployed, their ports, and configurations 98 | 3. **Deployment**: Uses Docker Compose to deploy the services described in the manifest 99 | 100 | When running in dry-run mode (`--dry-run` flag), only the first two phases are executed. This is useful for alternative deployment targets - while the playground uses Docker Compose by default, the manifest could be used to deploy to other platforms like Kubernetes. 101 | 102 | ### System Architecture 103 | 104 | The playground is structured in two main layers: 105 | 106 | #### Components 107 | 108 | Components are the basic building blocks of the system. Each component implements the `Service` interface: 109 | 110 | ```go 111 | type Service interface { 112 | Run(service *service) 113 | } 114 | ``` 115 | 116 | Components represent individual compute resources like: 117 | 118 | - Execution clients (Reth) 119 | - Consensus clients (Lighthouse) 120 | - Sidecar applications (MEV-Boost Relay) 121 | 122 | Each component, given its input parameters, outputs a Docker container description with its specific configuration. 123 | 124 | #### Recipes 125 | 126 | Recipes combine components in specific ways to create complete environments. They implement this interface: 127 | 128 | ```go 129 | type Recipe interface { 130 | Apply(artifacts *Artifacts) *Manifest 131 | } 132 | ``` 133 | 134 | The key output of a recipe is a `Manifest`, which represents a complete description of the environment to be deployed. A Manifest contains: 135 | 136 | - A list of services to deploy 137 | - Their interconnections and dependencies 138 | - Port mappings and configurations 139 | - Volume mounts and environment variables 140 | 141 | While the current recipes (L1 and OpStack) are relatively simple, this architecture allows for more complex setups. For example, you could create recipes for: 142 | 143 | - Multiple execution clients with a shared bootnode 144 | - Testing specific MEV scenarios 145 | - Interop L2 testing environments 146 | 147 | The separation between components and recipes makes it easy to create new environments by reusing and combining existing components in different ways. The Manifest provides an abstraction between the recipe's description of what should be deployed and how it actually gets deployed (Docker Compose, Kubernetes, etc.). 148 | 149 | ## Design Philosophy 150 | 151 | The Builder Playground is focused exclusively on block building testing and development. Unlike general-purpose tools like Kurtosis that support any Ethereum setup, we take an opinionated approach optimized for block building workflows. 152 | 153 | We deliberately limit configuration options and client diversity to keep the tool simple and maintainable. This focused approach allows us to provide a smooth developer experience for block building testing scenarios while keeping the codebase simple and maintainable. 154 | 155 | This means we make specific choices: 156 | 157 | - Fixed client implementations (Lighthouse, Reth) 158 | - Recipe-based rather than modular configurations 159 | - Pre-backed configurations 160 | 161 | For use cases outside our scope, consider using a more general-purpose tool like Kurtosis. 162 | -------------------------------------------------------------------------------- /assertoor/config/playground_deposits.yaml: -------------------------------------------------------------------------------- 1 | # To use this test: 2 | # cd into this directory 3 | # run: 4 | # docker run -v $(pwd):/config -p 8080:8080 -it ethpandaops/assertoor:latest --config=/config/playground_deposits.yaml 5 | # 6 | # you can follow along either in the console output or open http://localhost:8080/ to see the 7 | # assertoor web UI. 8 | endpoints: 9 | - name: "playground" 10 | executionUrl: "http://host.docker.internal:8545/" 11 | consensusUrl: "http://host.docker.internal:3500" 12 | 13 | web: 14 | server: 15 | host: "0.0.0.0" 16 | port: 8080 17 | frontend: 18 | enabled: true 19 | debug: true 20 | pprof: true 21 | tests: 22 | - id: fillup-deposit-queue 23 | name: "Fillup deposit queue" 24 | timeout: 1h 25 | config: 26 | # 0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65 27 | walletPrivkey: "47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a" 28 | depositCount: 1010 29 | depositMaxIndex: 100 30 | depositContract: "0x4242424242424242424242424242424242424242" 31 | throughputPerIndex: 20 32 | maxPendingPerIndex: 40 33 | walletSeed: "" 34 | tasks: 35 | - name: check_clients_are_healthy 36 | title: "Check if at least one client is ready" 37 | timeout: 5m 38 | config: 39 | minClientCount: 1 40 | 41 | - name: run_shell 42 | id: prepare 43 | title: "Prepare workers" 44 | config: 45 | envVars: 46 | depositCount: "depositCount" 47 | depositMaxIndex: "depositMaxIndex" 48 | command: | 49 | depositCount=$(echo $depositCount | jq -r .) 50 | depositMaxIndex=$(echo $depositMaxIndex | jq -r .) 51 | 52 | minDepositCount=$(expr $depositCount \/ $depositMaxIndex) 53 | plusOneDepositCount=$(expr $depositCount - $minDepositCount \* $depositMaxIndex) 54 | 55 | workers="[]" 56 | 57 | while read index; do 58 | depositCount=$minDepositCount 59 | if [ "$index" -lt "$plusOneDepositCount" ]; then 60 | depositCount=$(expr $depositCount + 1) 61 | fi 62 | 63 | worker=$(echo "{\"index\": $index, \"depositCount\": $depositCount}" ) 64 | workers=$(echo $workers | jq -c ". += [$worker]") 65 | done <<< $(seq 0 1 $(expr $depositMaxIndex - 1)) 66 | 67 | echo "::set-out-json workers $workers" 68 | 69 | - name: run_task_matrix 70 | title: "Generate ${depositCount} topup deposits for first ${depositMaxIndex} keys" 71 | configVars: 72 | matrixValues: "tasks.prepare.outputs.workers" 73 | config: 74 | runConcurrent: true 75 | matrixVar: "worker" 76 | task: 77 | name: run_tasks 78 | title: "Generate ${{worker.depositCount}} topup deposits for key ${{worker.index}}" 79 | config: 80 | tasks: 81 | - name: check_consensus_validator_status 82 | title: "Get validator pubkey for key ${{worker.index}}" 83 | id: "get_validator" 84 | timeout: 1m 85 | configVars: 86 | validatorIndex: "worker.index" 87 | 88 | - name: generate_child_wallet 89 | id: depositor_wallet 90 | title: "Generate wallet for lifecycle test" 91 | configVars: 92 | walletSeed: "| \"fillup-deposit-queue-\" + .walletSeed + (.worker.index | tostring)" 93 | prefundMinBalance: "| (.worker.depositCount + 1) * 1000000000000000000" 94 | privateKey: "walletPrivkey" 95 | 96 | - name: sleep 97 | title: "Sleep 10s to ensure propagation of last block with wallet fundings to all clients" 98 | config: 99 | duration: 10s 100 | 101 | - name: run_task_options 102 | title: "Generate ${{worker.depositCount}} top up deposits with 1 ETH each" 103 | config: 104 | task: 105 | name: generate_deposits 106 | title: "Generate top up deposits for key ${{worker.index}} (${{tasks.get_validator.outputs.pubkey}})" 107 | config: 108 | depositAmount: 1 109 | topUpDeposit: true 110 | awaitReceipt: true 111 | failOnReject: true 112 | configVars: 113 | limitTotal: "worker.depositCount" 114 | limitPerSlot: "throughputPerIndex" 115 | limitPending: "maxPendingPerIndex" 116 | walletPrivkey: "tasks.depositor_wallet.outputs.childWallet.privkey" 117 | publicKey: "tasks.get_validator.outputs.pubkey" 118 | depositContract: "depositContract" 119 | -------------------------------------------------------------------------------- /cl-proxy/cl-proxy.go: -------------------------------------------------------------------------------- 1 | package clproxy 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "os" 11 | "strings" 12 | "time" 13 | 14 | "github.com/flashbots/mev-boost-relay/common" 15 | "github.com/sirupsen/logrus" 16 | ) 17 | 18 | type Config struct { 19 | LogOutput io.Writer 20 | Port uint64 21 | Primary string 22 | Secondary string 23 | } 24 | 25 | func DefaultConfig() *Config { 26 | return &Config{ 27 | LogOutput: os.Stdout, 28 | Port: 5656, 29 | } 30 | } 31 | 32 | type ClProxy struct { 33 | config *Config 34 | log *logrus.Entry 35 | server *http.Server 36 | } 37 | 38 | func New(config *Config) (*ClProxy, error) { 39 | log := common.LogSetup(false, "info") 40 | log.Logger.SetOutput(config.LogOutput) 41 | 42 | proxy := &ClProxy{ 43 | config: config, 44 | log: log, 45 | } 46 | 47 | return proxy, nil 48 | } 49 | 50 | // Run starts the HTTP server 51 | func (s *ClProxy) Run() error { 52 | mux := http.NewServeMux() 53 | s.server = &http.Server{ 54 | Addr: fmt.Sprintf(":%d", s.config.Port), 55 | ReadTimeout: 10 * time.Second, 56 | WriteTimeout: 10 * time.Second, 57 | Handler: mux, 58 | } 59 | 60 | mux.HandleFunc("/", s.handleRequest) 61 | 62 | s.log.Infof("Starting server on port %d", s.config.Port) 63 | if err := s.server.ListenAndServe(); err != http.ErrServerClosed { 64 | return fmt.Errorf("server error: %v", err) 65 | } 66 | return nil 67 | } 68 | 69 | // Close gracefully shuts down the server 70 | func (s *ClProxy) Close() error { 71 | s.log.Info("Shutting down server...") 72 | 73 | // Create a context with timeout for shutdown 74 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 75 | defer cancel() 76 | 77 | // Attempt graceful shutdown 78 | if err := s.server.Shutdown(ctx); err != nil { 79 | return fmt.Errorf("server shutdown error: %v", err) 80 | } 81 | 82 | return nil 83 | } 84 | 85 | type jsonrpcMessage struct { 86 | Version string `json:"jsonrpc,omitempty"` 87 | ID json.RawMessage `json:"id,omitempty"` 88 | Method string `json:"method,omitempty"` 89 | Params []json.RawMessage `json:"params,omitempty"` 90 | Result json.RawMessage `json:"result,omitempty"` 91 | } 92 | 93 | func (s *ClProxy) handleRequest(w http.ResponseWriter, r *http.Request) { 94 | // Only accept POST requests 95 | if r.Method != http.MethodPost { 96 | http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) 97 | return 98 | } 99 | 100 | data, err := io.ReadAll(r.Body) 101 | if err != nil { 102 | http.Error(w, "Bad request", http.StatusBadRequest) 103 | return 104 | } 105 | 106 | // Multiplex all the request to both primary and secondary but omit the 107 | // block building requests (this is, remove 'params' field from FCU and omit get payload). 108 | // There are two reasons for this: 109 | // - The secondary builder does not use the Engine API to build blocks but the relayer so these requests are not necessary. 110 | // - The CL->EL setup is not configured anyway to handle two block builders throught the Engine API. 111 | // Note that we still have to relay this request to the primary EL node since we need 112 | // to have a fallback node in the CL. 113 | var jsonRPCRequest jsonrpcMessage 114 | if err := json.Unmarshal(data, &jsonRPCRequest); err != nil { 115 | http.Error(w, "Bad request", http.StatusBadRequest) 116 | return 117 | } 118 | 119 | s.log.Info(fmt.Sprintf("Received request: method=%s", jsonRPCRequest.Method)) 120 | 121 | // proxy to primary and consider its response as the final response to send back to the CL 122 | resp, err := s.proxy(s.config.Primary, r, data) 123 | if err != nil { 124 | s.log.Errorf("Error multiplexing to primary: %v", err) 125 | http.Error(w, "Internal server error", http.StatusInternalServerError) 126 | return 127 | } 128 | defer resp.Body.Close() 129 | 130 | respData, err := io.ReadAll(resp.Body) 131 | if err != nil { 132 | s.log.Errorf("Error reading response from primary: %v", err) 133 | http.Error(w, "Internal server error", http.StatusInternalServerError) 134 | return 135 | } 136 | 137 | w.Header().Set("Content-Type", "application/json") 138 | w.WriteHeader(resp.StatusCode) 139 | w.Write(respData) 140 | 141 | if s.config.Secondary == "" { 142 | return 143 | } 144 | 145 | if strings.HasPrefix(jsonRPCRequest.Method, "engine_getPayload") { 146 | // the only request we do not send since the secondary builder does not have the payload id 147 | // and it will always fail 148 | return 149 | } 150 | 151 | if strings.HasPrefix(jsonRPCRequest.Method, "engine_forkchoiceUpdated") { 152 | // set to nil the second parameter of the forkchoiceUpdated call 153 | if len(jsonRPCRequest.Params) == 1 { 154 | // not expected 155 | s.log.Warn("ForkchoiceUpdated call with only one parameter") 156 | } else { 157 | jsonRPCRequest.Params[1] = nil 158 | 159 | data, err = json.Marshal(jsonRPCRequest) 160 | if err != nil { 161 | s.log.Errorf("Error marshalling forkchoiceUpdated request: %v", err) 162 | return 163 | } 164 | } 165 | } 166 | 167 | // proxy to secondary 168 | s.log.Info(fmt.Sprintf("Multiplexing request to secondary: method=%s", jsonRPCRequest.Method)) 169 | if _, err := s.proxy(s.config.Secondary, r, data); err != nil { 170 | s.log.Errorf("Error multiplexing to secondary: %v", err) 171 | } 172 | } 173 | 174 | func (s *ClProxy) proxy(dst string, r *http.Request, data []byte) (*http.Response, error) { 175 | // Create a new request 176 | req, err := http.NewRequest(http.MethodPost, dst, bytes.NewBuffer(data)) 177 | if err != nil { 178 | return nil, err 179 | } 180 | 181 | // Copy headers. It is important since we have to copy 182 | // the JWT header from the CL 183 | req.Header = r.Header 184 | 185 | // Perform the request 186 | client := &http.Client{} 187 | resp, err := client.Do(req) 188 | if err != nil { 189 | return nil, err 190 | } 191 | 192 | return resp, nil 193 | } 194 | -------------------------------------------------------------------------------- /cl-proxy/cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | clproxy "github.com/flashbots/builder-playground/cl-proxy" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var ( 12 | primaryBuilder string 13 | secondaryBuilder string 14 | port int 15 | ) 16 | 17 | var rootCmd = &cobra.Command{ 18 | Use: "clproxy", 19 | Short: "", 20 | Long: ``, 21 | RunE: func(cmd *cobra.Command, args []string) error { 22 | return runCLProxy() 23 | }, 24 | } 25 | 26 | func main() { 27 | rootCmd.Flags().StringVar(&primaryBuilder, "primary-builder", "http://localhost:8551", "") 28 | rootCmd.Flags().StringVar(&secondaryBuilder, "secondary-builder", "", "") 29 | rootCmd.Flags().IntVar(&port, "port", 5656, "") 30 | 31 | if err := rootCmd.Execute(); err != nil { 32 | fmt.Println(err) 33 | os.Exit(1) 34 | } 35 | } 36 | 37 | func runCLProxy() error { 38 | // Start the cl proxy 39 | cfg := &clproxy.Config{ 40 | LogOutput: os.Stdout, 41 | Port: uint64(port), 42 | Primary: primaryBuilder, 43 | Secondary: secondaryBuilder, 44 | } 45 | 46 | clproxy, err := clproxy.New(cfg) 47 | if err != nil { 48 | return fmt.Errorf("failed to create cl proxy: %w", err) 49 | } 50 | return clproxy.Run() 51 | } 52 | -------------------------------------------------------------------------------- /examples/op-stack-rollup-boost.md: -------------------------------------------------------------------------------- 1 | # Op Stack Rollup Boost 2 | 3 | This example shows how to deploy an Op Stack with rollup-boost with an external block builder (op-reth). 4 | 5 | First, download the `op-reth` binary: 6 | 7 | ```bash 8 | $ go run main.go artifacts op-reth 9 | ``` 10 | 11 | This will download the op-reth binary and save it under `$HOME/.playground/op-reth-v1.3.12`. 12 | 13 | Second, we can deploy the Op Stack with rollup-boost: 14 | 15 | ```bash 16 | $ go run main.go cook opstack --external-builder http://host.docker.internal:4444 17 | ``` 18 | 19 | This will deploy an Op Stack chain with: 20 | 21 | - A complete L1 setup (CL/EL/Mev-boost) 22 | - A complete L2 sequencer (op-geth/op-node/op-batcher) 23 | - Rollup-boost to enable external block building 24 | 25 | Note that we use `host.docker.internal` as the hostname because the Op Stack components run in Docker containers, while the external builder (op-reth) runs directly on the host machine. 26 | 27 | By default, the EL node for the Op-stack is deployed with a deterministic P2P key, ensuring the enode address remains consistent across all runs. The enode address is: 28 | 29 | `enode://3479db4d9217fb5d7a8ed4d61ac36e120b05d36c2eefb795dc42ff2e971f251a2315f5649ea1833271e020b9adc98d5db9973c7ed92d6b2f1f2223088c3d852f@127.0.0.1:30304` 30 | 31 | You will see this enode address displayed in the output when running the Op-stack recipe. 32 | 33 | The `--external-builder` flag is used to specify the URL of the external block builder. Even though the external builder is not active at this point, this does not affect the liveness of the system as the sequencer will continue to produce blocks normally. 34 | 35 | Third, we can start the `op-reth` binary as the external block builder: 36 | 37 | ```bash 38 | $ $HOME/.playground/op-reth-v1.3.12 node --http --http.port 2222 \ 39 | --authrpc.addr 0.0.0.0 --authrpc.port 4444 --authrpc.jwtsecret $HOME/.playground/devnet/jwtsecret \ 40 | --chain $HOME/.playground/devnet/l2-genesis.json --datadir /tmp/builder --disable-discovery --port 30333 \ 41 | --trusted-peers enode://3479db4d9217fb5d7a8ed4d61ac36e120b05d36c2eefb795dc42ff2e971f251a2315f5649ea1833271e020b9adc98d5db9973c7ed92d6b2f1f2223088c3d852f@127.0.0.1:30304 42 | ``` 43 | 44 | The command above starts op-reth as an external block builder with the following key parameters: 45 | 46 | - `--authrpc.port 4444`: Matches the port specified in the `--external-builder` flag earlier 47 | - `--authrpc.jwtsecret`: Uses the JWT secret generated during Op Stack deployment 48 | - `--trusted-peers`: Connects to our Op Stack's EL node using the deterministic enode address 49 | 50 | Once `op-reth` is running, it will connect to the Op Stack and begin participating in block building. You can verify it's working by checking the logs of both the sequencer and op-reth for successful block proposals. 51 | 52 | ## Internal block builder 53 | 54 | To use an internal `op-reth` as a block builder, run: 55 | 56 | ``` 57 | $ go run main.go cook opstack --external-builder op-reth 58 | ``` 59 | -------------------------------------------------------------------------------- /examples/running-two-chains.md: -------------------------------------------------------------------------------- 1 | # Running two chains 2 | 3 | This example shows how to run two chains on the same machine. 4 | 5 | First, we need to deploy the first chain: 6 | 7 | ```bash 8 | $ go run main.go cook opstack 9 | ``` 10 | 11 | This chain is going to run under the default `ethplayground` Docker network. Playground uses DNS resolution to discover services in the same network. 12 | 13 | In order to run a second chain, we can use the same command and specify a different network name: 14 | 15 | ```bash 16 | $ go run main.go cook opstack --network eth2 17 | ``` 18 | 19 | This will deploy the second chain under the `eth2` Docker network. 20 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/flashbots/builder-playground 2 | 3 | go 1.24.0 4 | 5 | require ( 6 | github.com/OffchainLabs/prysm/v6 v6.0.2 7 | github.com/alicebob/miniredis/v2 v2.34.0 8 | github.com/charmbracelet/bubbles v0.20.0 9 | github.com/charmbracelet/lipgloss v1.0.0 10 | github.com/docker/docker v28.0.1+incompatible 11 | github.com/ethereum/go-ethereum v1.15.9 12 | github.com/flashbots/go-boost-utils v1.9.0 13 | github.com/flashbots/mev-boost-relay v0.30.0-rc1 14 | github.com/google/uuid v1.6.0 15 | github.com/hashicorp/go-uuid v1.0.3 16 | github.com/holiman/uint256 v1.3.2 17 | github.com/sirupsen/logrus v1.9.3 18 | github.com/spf13/cobra v1.9.1 19 | github.com/spf13/pflag v1.0.6 20 | github.com/stretchr/testify v1.10.0 21 | github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3 22 | gopkg.in/yaml.v2 v2.4.0 23 | ) 24 | 25 | require ( 26 | github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect 27 | github.com/Microsoft/go-winio v0.6.2 // indirect 28 | github.com/NYTimes/gziphandler v1.1.1 // indirect 29 | github.com/VictoriaMetrics/fastcache v1.12.2 // indirect 30 | github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect 31 | github.com/allegro/bigcache v1.2.1 // indirect 32 | github.com/aohorodnyk/mimeheader v0.0.6 // indirect 33 | github.com/attestantio/go-builder-client v0.6.1 // indirect 34 | github.com/attestantio/go-eth2-client v0.24.0 // indirect 35 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect 36 | github.com/beorn7/perks v1.0.1 // indirect 37 | github.com/bits-and-blooms/bitset v1.20.0 // indirect 38 | github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 // indirect 39 | github.com/buger/jsonparser v1.1.1 // indirect 40 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect 41 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 42 | github.com/charmbracelet/bubbletea v1.3.4 // indirect 43 | github.com/charmbracelet/x/ansi v0.8.0 // indirect 44 | github.com/charmbracelet/x/term v0.2.1 // indirect 45 | github.com/consensys/bavard v0.1.29 // indirect 46 | github.com/consensys/gnark-crypto v0.16.0 // indirect 47 | github.com/containerd/log v0.1.0 // indirect 48 | github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect 49 | github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect 50 | github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect 51 | github.com/davecgh/go-spew v1.1.1 // indirect 52 | github.com/deckarep/golang-set/v2 v2.6.0 // indirect 53 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect 54 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 55 | github.com/distribution/reference v0.6.0 // indirect 56 | github.com/docker/go-connections v0.5.0 // indirect 57 | github.com/docker/go-units v0.5.0 // indirect 58 | github.com/emicklei/dot v1.6.4 // indirect 59 | github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect 60 | github.com/ethereum/c-kzg-4844 v1.0.3 // indirect 61 | github.com/ethereum/go-verkle v0.2.2 // indirect 62 | github.com/felixge/httpsnoop v1.0.4 // indirect 63 | github.com/ferranbt/fastssz v0.1.4 // indirect 64 | github.com/flashbots/go-utils v0.8.3 // indirect 65 | github.com/fsnotify/fsnotify v1.6.0 // indirect 66 | github.com/go-gorp/gorp/v3 v3.1.0 // indirect 67 | github.com/go-logr/logr v1.4.2 // indirect 68 | github.com/go-logr/stdr v1.2.2 // indirect 69 | github.com/go-ole/go-ole v1.3.0 // indirect 70 | github.com/goccy/go-yaml v1.15.23 // indirect 71 | github.com/gofrs/flock v0.12.1 // indirect 72 | github.com/gogo/protobuf v1.3.2 // indirect 73 | github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect 74 | github.com/google/go-cmp v0.7.0 // indirect 75 | github.com/google/gofuzz v1.2.0 // indirect 76 | github.com/gorilla/mux v1.8.1 // indirect 77 | github.com/gorilla/websocket v1.5.3 // indirect 78 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect 79 | github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect 80 | github.com/herumi/bls-eth-go-binary v1.31.0 // indirect 81 | github.com/holiman/bloomfilter/v2 v2.0.3 // indirect 82 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 83 | github.com/jmoiron/sqlx v1.4.0 // indirect 84 | github.com/json-iterator/go v1.1.12 // indirect 85 | github.com/klauspost/compress v1.18.0 // indirect 86 | github.com/klauspost/cpuid/v2 v2.2.9 // indirect 87 | github.com/lib/pq v1.10.9 // indirect 88 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 89 | github.com/mattn/go-isatty v0.0.20 // indirect 90 | github.com/mattn/go-localereader v0.0.1 // indirect 91 | github.com/mattn/go-runewidth v0.0.16 // indirect 92 | github.com/minio/highwayhash v1.0.2 // indirect 93 | github.com/minio/sha256-simd v1.0.1 // indirect 94 | github.com/mitchellh/mapstructure v1.5.0 // indirect 95 | github.com/mmcloughlin/addchain v0.4.0 // indirect 96 | github.com/moby/docker-image-spec v1.3.1 // indirect 97 | github.com/moby/term v0.5.2 // indirect 98 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 99 | github.com/modern-go/reflect2 v1.0.2 // indirect 100 | github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect 101 | github.com/morikuni/aec v1.0.0 // indirect 102 | github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect 103 | github.com/muesli/cancelreader v0.2.2 // indirect 104 | github.com/muesli/termenv v0.15.2 // indirect 105 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 106 | github.com/olekukonko/tablewriter v0.0.5 // indirect 107 | github.com/opencontainers/go-digest v1.0.0 // indirect 108 | github.com/opencontainers/image-spec v1.1.1 // indirect 109 | github.com/patrickmn/go-cache v2.1.0+incompatible // indirect 110 | github.com/pkg/errors v0.9.1 // indirect 111 | github.com/pmezard/go-difflib v1.0.0 // indirect 112 | github.com/prometheus/client_golang v1.21.0 // indirect 113 | github.com/prometheus/client_model v0.6.1 // indirect 114 | github.com/prometheus/common v0.62.0 // indirect 115 | github.com/prometheus/procfs v0.15.1 // indirect 116 | github.com/prysmaticlabs/fastssz v0.0.0-20241008181541-518c4ce73516 // indirect 117 | github.com/prysmaticlabs/go-bitfield v0.0.0-20240618144021-706c95b2dd15 // indirect 118 | github.com/prysmaticlabs/gohashtree v0.0.4-beta.0.20240624100937-73632381301b // indirect 119 | github.com/r3labs/sse/v2 v2.10.0 // indirect 120 | github.com/redis/go-redis/v9 v9.7.0 // indirect 121 | github.com/rivo/uniseg v0.4.7 // indirect 122 | github.com/rubenv/sql-migrate v1.7.1 // indirect 123 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 124 | github.com/shirou/gopsutil v3.21.11+incompatible // indirect 125 | github.com/supranational/blst v0.3.14 // indirect 126 | github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect 127 | github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e // indirect 128 | github.com/tklauser/go-sysconf v0.3.14 // indirect 129 | github.com/tklauser/numcpus v0.9.0 // indirect 130 | github.com/urfave/cli/v2 v2.27.5 // indirect 131 | github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect 132 | github.com/yuin/gopher-lua v1.1.1 // indirect 133 | github.com/yusufpapurcu/wmi v1.2.4 // indirect 134 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 135 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect 136 | go.opentelemetry.io/otel v1.35.0 // indirect 137 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect 138 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect 139 | go.opentelemetry.io/otel/exporters/prometheus v0.56.0 // indirect 140 | go.opentelemetry.io/otel/metric v1.35.0 // indirect 141 | go.opentelemetry.io/otel/sdk v1.35.0 // indirect 142 | go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect 143 | go.opentelemetry.io/otel/trace v1.35.0 // indirect 144 | go.opentelemetry.io/proto/otlp v1.5.0 // indirect 145 | go.uber.org/atomic v1.11.0 // indirect 146 | go.uber.org/multierr v1.11.0 // indirect 147 | go.uber.org/zap v1.27.0 // indirect 148 | golang.org/x/crypto v0.36.0 // indirect 149 | golang.org/x/net v0.38.0 // indirect 150 | golang.org/x/oauth2 v0.26.0 // indirect 151 | golang.org/x/sync v0.12.0 // indirect 152 | golang.org/x/sys v0.31.0 // indirect 153 | golang.org/x/term v0.30.0 // indirect 154 | golang.org/x/text v0.23.0 // indirect 155 | golang.org/x/time v0.9.0 // indirect 156 | google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect 157 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect 158 | google.golang.org/grpc v1.71.0 // indirect 159 | google.golang.org/protobuf v1.36.5 // indirect 160 | gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect 161 | gopkg.in/inf.v0 v0.9.1 // indirect 162 | gopkg.in/yaml.v3 v3.0.1 // indirect 163 | gotest.tools/v3 v3.5.2 // indirect 164 | k8s.io/apimachinery v0.30.4 // indirect 165 | k8s.io/client-go v0.30.4 // indirect 166 | k8s.io/klog/v2 v2.120.1 // indirect 167 | k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect 168 | rsc.io/tmplfunc v0.0.3 // indirect 169 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 170 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 171 | sigs.k8s.io/yaml v1.3.0 // indirect 172 | ) 173 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | _ "embed" 6 | "fmt" 7 | "log" 8 | "os" 9 | "os/exec" 10 | "os/signal" 11 | "sort" 12 | "strings" 13 | "time" 14 | 15 | "github.com/flashbots/builder-playground/playground" 16 | "github.com/spf13/cobra" 17 | ) 18 | 19 | var outputFlag string 20 | var genesisDelayFlag uint64 21 | var withOverrides []string 22 | var watchdog bool 23 | var dryRun bool 24 | var interactive bool 25 | var timeout time.Duration 26 | var logLevelFlag string 27 | var bindExternal bool 28 | var withPrometheus bool 29 | var networkName string 30 | var labels playground.MapStringFlag 31 | var disableLogs bool 32 | 33 | var rootCmd = &cobra.Command{ 34 | Use: "playground", 35 | Short: "", 36 | Long: ``, 37 | RunE: func(cmd *cobra.Command, args []string) error { 38 | return nil 39 | }, 40 | } 41 | 42 | var cookCmd = &cobra.Command{ 43 | Use: "cook", 44 | Short: "Cook a recipe", 45 | RunE: func(cmd *cobra.Command, args []string) error { 46 | recipeNames := []string{} 47 | for _, recipe := range recipes { 48 | recipeNames = append(recipeNames, recipe.Name()) 49 | } 50 | return fmt.Errorf("please specify a recipe to cook. Available recipes: %s", recipeNames) 51 | }, 52 | } 53 | 54 | var artifactsCmd = &cobra.Command{ 55 | Use: "artifacts", 56 | Short: "List available artifacts", 57 | RunE: func(cmd *cobra.Command, args []string) error { 58 | if len(args) != 1 { 59 | return fmt.Errorf("please specify a service name") 60 | } 61 | serviceName := args[0] 62 | component := playground.FindComponent(serviceName) 63 | if component == nil { 64 | return fmt.Errorf("service %s not found", serviceName) 65 | } 66 | releaseService, ok := component.(playground.ReleaseService) 67 | if !ok { 68 | return fmt.Errorf("service %s is not a release service", serviceName) 69 | } 70 | output := outputFlag 71 | if output == "" { 72 | homeDir, err := playground.GetHomeDir() 73 | if err != nil { 74 | return fmt.Errorf("failed to get home directory: %w", err) 75 | } 76 | output = homeDir 77 | } 78 | location, err := playground.DownloadRelease(output, releaseService.ReleaseArtifact()) 79 | if err != nil { 80 | return fmt.Errorf("failed to download release: %w", err) 81 | } 82 | fmt.Println(location) 83 | return nil 84 | }, 85 | } 86 | 87 | var artifactsAllCmd = &cobra.Command{ 88 | Use: "artifacts-all", 89 | Short: "Download all the artifacts available in the catalog (Used for testing purposes)", 90 | RunE: func(cmd *cobra.Command, args []string) error { 91 | fmt.Println("Downloading all artifacts...") 92 | 93 | output := outputFlag 94 | if output == "" { 95 | homeDir, err := playground.GetHomeDir() 96 | if err != nil { 97 | return fmt.Errorf("failed to get home directory: %w", err) 98 | } 99 | output = homeDir 100 | } 101 | for _, component := range playground.Components { 102 | releaseService, ok := component.(playground.ReleaseService) 103 | if !ok { 104 | continue 105 | } 106 | location, err := playground.DownloadRelease(output, releaseService.ReleaseArtifact()) 107 | if err != nil { 108 | return fmt.Errorf("failed to download release: %w", err) 109 | } 110 | 111 | // make sure the artifact is valid to be executed on this platform 112 | log.Printf("Downloaded %s to %s\n", releaseService.ReleaseArtifact().Name, location) 113 | if err := isExecutableValid(location); err != nil { 114 | return fmt.Errorf("failed to check if artifact is valid: %w", err) 115 | } 116 | } 117 | return nil 118 | }, 119 | } 120 | 121 | var inspectCmd = &cobra.Command{ 122 | Use: "inspect", 123 | Short: "Inspect a connection between two services", 124 | RunE: func(cmd *cobra.Command, args []string) error { 125 | // two arguments, the name of the service and the name of the connection 126 | if len(args) != 2 { 127 | return fmt.Errorf("please specify a service name and a connection name") 128 | } 129 | serviceName := args[0] 130 | connectionName := args[1] 131 | 132 | sig := make(chan os.Signal, 1) 133 | signal.Notify(sig, os.Interrupt) 134 | 135 | ctx, cancel := context.WithCancel(context.Background()) 136 | go func() { 137 | <-sig 138 | cancel() 139 | }() 140 | 141 | if err := playground.Inspect(ctx, serviceName, connectionName); err != nil { 142 | return fmt.Errorf("failed to inspect connection: %w", err) 143 | } 144 | return nil 145 | }, 146 | } 147 | 148 | var recipes = []playground.Recipe{ 149 | &playground.L1Recipe{}, 150 | &playground.OpRecipe{}, 151 | &playground.BuilderNetRecipe{}, 152 | } 153 | 154 | func main() { 155 | for _, recipe := range recipes { 156 | recipeCmd := &cobra.Command{ 157 | Use: recipe.Name(), 158 | Short: recipe.Description(), 159 | RunE: func(cmd *cobra.Command, args []string) error { 160 | return runIt(recipe) 161 | }, 162 | } 163 | // add the flags from the recipe 164 | recipeCmd.Flags().AddFlagSet(recipe.Flags()) 165 | // add the common flags 166 | recipeCmd.Flags().StringVar(&outputFlag, "output", "", "Output folder for the artifacts") 167 | recipeCmd.Flags().BoolVar(&watchdog, "watchdog", false, "enable watchdog") 168 | recipeCmd.Flags().StringArrayVar(&withOverrides, "override", []string{}, "override a service's config") 169 | recipeCmd.Flags().BoolVar(&dryRun, "dry-run", false, "dry run the recipe") 170 | recipeCmd.Flags().BoolVar(&dryRun, "mise-en-place", false, "mise en place mode") 171 | recipeCmd.Flags().Uint64Var(&genesisDelayFlag, "genesis-delay", playground.MinimumGenesisDelay, "") 172 | recipeCmd.Flags().BoolVar(&interactive, "interactive", false, "interactive mode") 173 | recipeCmd.Flags().DurationVar(&timeout, "timeout", 0, "") // Used for CI 174 | recipeCmd.Flags().StringVar(&logLevelFlag, "log-level", "info", "log level") 175 | recipeCmd.Flags().BoolVar(&bindExternal, "bind-external", false, "bind host ports to external interface") 176 | recipeCmd.Flags().BoolVar(&withPrometheus, "with-prometheus", false, "whether to gather the Prometheus metrics") 177 | recipeCmd.Flags().StringVar(&networkName, "network", "", "network name") 178 | recipeCmd.Flags().Var(&labels, "labels", "list of labels to apply to the resources") 179 | recipeCmd.Flags().BoolVar(&disableLogs, "disable-logs", false, "disable logs") 180 | 181 | cookCmd.AddCommand(recipeCmd) 182 | } 183 | 184 | // reuse the same output flag for the artifacts command 185 | artifactsCmd.Flags().StringVar(&outputFlag, "output", "", "Output folder for the artifacts") 186 | artifactsAllCmd.Flags().StringVar(&outputFlag, "output", "", "Output folder for the artifacts") 187 | 188 | rootCmd.AddCommand(cookCmd) 189 | rootCmd.AddCommand(artifactsCmd) 190 | rootCmd.AddCommand(artifactsAllCmd) 191 | rootCmd.AddCommand(inspectCmd) 192 | 193 | if err := rootCmd.Execute(); err != nil { 194 | fmt.Println(err) 195 | os.Exit(1) 196 | } 197 | } 198 | 199 | func runIt(recipe playground.Recipe) error { 200 | var logLevel playground.LogLevel 201 | if err := logLevel.Unmarshal(logLevelFlag); err != nil { 202 | return fmt.Errorf("failed to parse log level: %w", err) 203 | } 204 | 205 | log.Printf("Log level: %s\n", logLevel) 206 | 207 | // parse the overrides 208 | overrides := map[string]string{} 209 | for _, val := range withOverrides { 210 | parts := strings.SplitN(val, "=", 2) 211 | if len(parts) != 2 { 212 | return fmt.Errorf("invalid override format: %s, expected service=val", val) 213 | } 214 | overrides[parts[0]] = parts[1] 215 | } 216 | 217 | builder := recipe.Artifacts() 218 | builder.OutputDir(outputFlag) 219 | builder.GenesisDelay(genesisDelayFlag) 220 | artifacts, err := builder.Build() 221 | if err != nil { 222 | return err 223 | } 224 | 225 | svcManager := recipe.Apply(&playground.ExContext{LogLevel: logLevel}, artifacts) 226 | if err := svcManager.Validate(); err != nil { 227 | return fmt.Errorf("failed to validate manifest: %w", err) 228 | } 229 | 230 | // generate the dot graph 231 | dotGraph := svcManager.GenerateDotGraph() 232 | if err := artifacts.Out.WriteFile("graph.dot", dotGraph); err != nil { 233 | return err 234 | } 235 | 236 | // save the manifest.json file 237 | if err := svcManager.SaveJson(); err != nil { 238 | return fmt.Errorf("failed to save manifest: %w", err) 239 | } 240 | 241 | if withPrometheus { 242 | if err := playground.CreatePrometheusServices(svcManager, artifacts.Out); err != nil { 243 | return fmt.Errorf("failed to create prometheus services: %w", err) 244 | } 245 | } 246 | 247 | if dryRun { 248 | return nil 249 | } 250 | 251 | // validate that override is being applied to a service in the manifest 252 | for k := range overrides { 253 | if _, ok := svcManager.GetService(k); !ok { 254 | return fmt.Errorf("service '%s' in override not found in manifest", k) 255 | } 256 | } 257 | 258 | dockerRunner, err := playground.NewLocalRunner(artifacts.Out, svcManager, overrides, interactive, !bindExternal, networkName, labels, !disableLogs) 259 | if err != nil { 260 | return fmt.Errorf("failed to create docker runner: %w", err) 261 | } 262 | 263 | sig := make(chan os.Signal, 1) 264 | signal.Notify(sig, os.Interrupt) 265 | 266 | ctx, cancel := context.WithCancel(context.Background()) 267 | go func() { 268 | <-sig 269 | cancel() 270 | }() 271 | 272 | if err := dockerRunner.Run(); err != nil { 273 | dockerRunner.Stop() 274 | return fmt.Errorf("failed to run docker: %w", err) 275 | } 276 | 277 | if !interactive { 278 | // print services info 279 | fmt.Printf("\n========= Services started =========\n") 280 | for _, ss := range svcManager.Services { 281 | ports := ss.GetPorts() 282 | sort.Slice(ports, func(i, j int) bool { 283 | return ports[i].Name < ports[j].Name 284 | }) 285 | 286 | portsStr := []string{} 287 | for _, p := range ports { 288 | protocol := "" 289 | if p.Protocol == playground.ProtocolUDP { 290 | protocol = "/udp" 291 | } 292 | portsStr = append(portsStr, fmt.Sprintf("%s: %d/%d%s", p.Name, p.Port, p.HostPort, protocol)) 293 | } 294 | fmt.Printf("- %s (%s)\n", ss.Name, strings.Join(portsStr, ", ")) 295 | } 296 | } 297 | 298 | if err := dockerRunner.WaitForReady(ctx, 20*time.Second); err != nil { 299 | dockerRunner.Stop() 300 | return fmt.Errorf("failed to wait for service readiness: %w", err) 301 | } 302 | 303 | if err := playground.CompleteReady(dockerRunner.Instances()); err != nil { 304 | dockerRunner.Stop() 305 | return fmt.Errorf("failed to complete ready: %w", err) 306 | } 307 | 308 | // get the output from the recipe 309 | output := recipe.Output(svcManager) 310 | if len(output) > 0 { 311 | fmt.Printf("\n========= Output =========\n") 312 | for k, v := range output { 313 | fmt.Printf("- %s: %v\n", k, v) 314 | } 315 | } 316 | 317 | watchdogErr := make(chan error, 1) 318 | if watchdog { 319 | go func() { 320 | if err := playground.RunWatchdog(artifacts.Out, dockerRunner.Instances()); err != nil { 321 | watchdogErr <- fmt.Errorf("watchdog failed: %w", err) 322 | } 323 | }() 324 | } 325 | 326 | var timerCh <-chan time.Time 327 | if timeout > 0 { 328 | timerCh = time.After(timeout) 329 | } 330 | 331 | select { 332 | case <-ctx.Done(): 333 | fmt.Println("Stopping...") 334 | case err := <-dockerRunner.ExitErr(): 335 | fmt.Println("Service failed:", err) 336 | case err := <-watchdogErr: 337 | fmt.Println("Watchdog failed:", err) 338 | case <-timerCh: 339 | fmt.Println("Timeout reached") 340 | } 341 | 342 | if err := dockerRunner.Stop(); err != nil { 343 | return fmt.Errorf("failed to stop docker: %w", err) 344 | } 345 | return nil 346 | } 347 | 348 | func isExecutableValid(path string) error { 349 | // First check if file exists 350 | _, err := os.Stat(path) 351 | if err != nil { 352 | return fmt.Errorf("file does not exist or is inaccessible: %w", err) 353 | } 354 | 355 | // Try to execute with a harmless flag or in a way that won't run the main program 356 | cmd := exec.Command(path, "--version") 357 | // Redirect output to /dev/null 358 | cmd.Stdout = nil 359 | cmd.Stderr = nil 360 | 361 | if err := cmd.Start(); err != nil { 362 | return fmt.Errorf("cannot start executable: %w", err) 363 | } 364 | 365 | // Immediately kill the process since we just want to test if it starts 366 | cmd.Process.Kill() 367 | 368 | return nil 369 | } 370 | -------------------------------------------------------------------------------- /mev-boost-relay/cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | mevboostrelay "github.com/flashbots/builder-playground/mev-boost-relay" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var ( 12 | apiListenAddr string 13 | apiListenPort uint64 14 | beaconClientAddr string 15 | validationServerAddr string 16 | ) 17 | 18 | var rootCmd = &cobra.Command{ 19 | Use: "local-mev-boost-relay", 20 | Short: "", 21 | Long: ``, 22 | RunE: func(cmd *cobra.Command, args []string) error { 23 | return runMevBoostRelay() 24 | }, 25 | } 26 | 27 | func main() { 28 | rootCmd.Flags().StringVar(&apiListenAddr, "api-listen-addr", "127.0.0.1", "") 29 | rootCmd.Flags().Uint64Var(&apiListenPort, "api-listen-port", 5555, "") 30 | rootCmd.Flags().StringVar(&beaconClientAddr, "beacon-client-addr", "http://localhost:3500", "") 31 | rootCmd.Flags().StringVar(&validationServerAddr, "validation-server-addr", "", "") 32 | 33 | if err := rootCmd.Execute(); err != nil { 34 | fmt.Println(err) 35 | os.Exit(1) 36 | } 37 | } 38 | 39 | func runMevBoostRelay() error { 40 | cfg := mevboostrelay.DefaultConfig() 41 | cfg.ApiListenAddr = apiListenAddr 42 | cfg.ApiListenPort = apiListenPort 43 | cfg.BeaconClientAddr = beaconClientAddr 44 | cfg.ValidationServerAddr = validationServerAddr 45 | 46 | relay, err := mevboostrelay.New(cfg) 47 | if err != nil { 48 | return fmt.Errorf("failed to create relay: %w", err) 49 | } 50 | 51 | return relay.Start() 52 | } 53 | -------------------------------------------------------------------------------- /mev-boost-relay/mev-boost-relay.go: -------------------------------------------------------------------------------- 1 | package mevboostrelay 2 | 3 | import ( 4 | "encoding/hex" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "log" 9 | "net" 10 | "net/http" 11 | "os" 12 | "strings" 13 | "sync" 14 | "time" 15 | 16 | "github.com/alicebob/miniredis/v2" 17 | "github.com/flashbots/go-boost-utils/bls" 18 | "github.com/flashbots/mev-boost-relay/beaconclient" 19 | "github.com/flashbots/mev-boost-relay/common" 20 | "github.com/flashbots/mev-boost-relay/database" 21 | "github.com/flashbots/mev-boost-relay/datastore" 22 | "github.com/flashbots/mev-boost-relay/services/api" 23 | "github.com/flashbots/mev-boost-relay/services/housekeeper" 24 | "github.com/sirupsen/logrus" 25 | ) 26 | 27 | var DefaultSecretKey = "5eae315483f028b5cdd5d1090ff0c7618b18737ea9bf3c35047189db22835c48" 28 | 29 | type Config struct { 30 | ApiListenAddr string 31 | ApiListenPort uint64 32 | ApiSecretKey string 33 | BeaconClientAddr string 34 | LogOutput io.Writer 35 | 36 | ValidationServerAddr string 37 | } 38 | 39 | func DefaultConfig() *Config { 40 | return &Config{ 41 | ApiListenAddr: "127.0.0.1", 42 | ApiListenPort: 5555, 43 | ApiSecretKey: DefaultSecretKey, 44 | BeaconClientAddr: "http://localhost:3500", 45 | LogOutput: os.Stdout, 46 | ValidationServerAddr: "", 47 | } 48 | } 49 | 50 | type MevBoostRelay struct { 51 | log *logrus.Entry 52 | apiSrv *api.RelayAPI 53 | housekeeperSrv *housekeeper.Housekeeper 54 | } 55 | 56 | func New(config *Config) (*MevBoostRelay, error) { 57 | log := common.LogSetup(false, "info") 58 | log.Logger.SetOutput(config.LogOutput) 59 | 60 | // connect to the beacon client 61 | bClient := beaconclient.NewMultiBeaconClient(log, []beaconclient.IBeaconInstance{ 62 | beaconclient.NewProdBeaconInstance(log, config.BeaconClientAddr, config.BeaconClientAddr), 63 | }) 64 | 65 | // enable feature flags 66 | featureFlags := []string{ 67 | "ENABLE_BUILDER_CANCELLATIONS", 68 | } 69 | for _, flag := range featureFlags { 70 | if err := os.Setenv(flag, "1"); err != nil { 71 | return nil, fmt.Errorf("failed to feature flag %s: %w", flag, err) 72 | } 73 | } 74 | 75 | // wait until the beacon client is ready, otherwise, the api and housekeeper services 76 | // will fail at startup 77 | syncTimeoutCh := time.After(10 * time.Second) 78 | for { 79 | if _, err := bClient.BestSyncStatus(); err == nil { 80 | break 81 | } 82 | select { 83 | case <-syncTimeoutCh: 84 | return nil, fmt.Errorf("beacon client failed to start") 85 | default: 86 | time.Sleep(100 * time.Millisecond) 87 | } 88 | } 89 | log.Info("Beacon client synced") 90 | 91 | // get the spec and genesis info to compute the eth network details 92 | spec, err := getSpec(config.BeaconClientAddr) 93 | if err != nil { 94 | return nil, fmt.Errorf("failed to get spec: %w", err) 95 | } 96 | info, err := bClient.GetGenesis() 97 | if err != nil { 98 | return nil, fmt.Errorf("failed to get genesis: %w", err) 99 | } 100 | ethNetworkDetails, err := generateEthNetworkDetails(spec, info) 101 | if err != nil { 102 | return nil, fmt.Errorf("failed to generate eth network details: %w", err) 103 | } 104 | 105 | // start redis in-memory 106 | redis, err := startInMemoryRedisDatastore() 107 | if err != nil { 108 | return nil, fmt.Errorf("failed to start in-memory redis: %w", err) 109 | } 110 | 111 | // create the mockDB 112 | pqDB := newInmemoryDB() 113 | 114 | // datastore 115 | ds, err := datastore.NewDatastore(redis, nil, pqDB) 116 | if err != nil { 117 | log.WithError(err).Fatalf("Failed setting up prod datastore") 118 | } 119 | 120 | // Refresh the initial set of validators from the beacon node. This adds the validators 121 | // as known validators in the chain. (not registered yet). 122 | ds.RefreshKnownValidatorsWithoutChecks(log, bClient, 0) 123 | 124 | // start housekeeping service 125 | housekeeperOpts := &housekeeper.HousekeeperOpts{ 126 | Log: log.WithField("service", "housekeeper"), 127 | Redis: redis, 128 | DB: pqDB, 129 | BeaconClient: bClient, 130 | } 131 | 132 | housekeeperSrv := housekeeper.NewHousekeeper(housekeeperOpts) 133 | 134 | var blockSimURL string 135 | if config.ValidationServerAddr != "" { 136 | log.Infof("Using remote block validation server: %s", config.ValidationServerAddr) 137 | blockSimURL = config.ValidationServerAddr 138 | } else { 139 | // start a mock block validation service that always 140 | // returns the blocks as valids. 141 | apiBlockSimURL, err := startMockBlockValidationServiceServer() 142 | if err != nil { 143 | return nil, fmt.Errorf("failed to start mock block validation service: %w", err) 144 | } 145 | log.Info("Started mock block validation service, addr: ", apiBlockSimURL) 146 | blockSimURL = apiBlockSimURL 147 | } 148 | 149 | // decode the secret key 150 | envSkBytes, err := hex.DecodeString(strings.TrimPrefix(config.ApiSecretKey, "0x")) 151 | if err != nil { 152 | return nil, fmt.Errorf("incorrect secret key provided '%s': %w", config.ApiSecretKey, err) 153 | } 154 | secretKey, err := bls.SecretKeyFromBytes(envSkBytes[:]) 155 | if err != nil { 156 | return nil, fmt.Errorf("incorrect builder API secret key provided '%s': %w", config.ApiSecretKey, err) 157 | } 158 | 159 | apiOpts := api.RelayAPIOpts{ 160 | Log: log.WithField("service", "api"), 161 | ListenAddr: fmt.Sprintf("%s:%d", config.ApiListenAddr, config.ApiListenPort), 162 | BeaconClient: bClient, 163 | Datastore: ds, 164 | Redis: redis, 165 | DB: pqDB, 166 | SecretKey: secretKey, 167 | EthNetDetails: *ethNetworkDetails, 168 | BlockSimURL: blockSimURL, 169 | ProposerAPI: true, 170 | BlockBuilderAPI: true, 171 | DataAPI: true, 172 | } 173 | apiSrv, err := api.NewRelayAPI(apiOpts) 174 | if err != nil { 175 | return nil, fmt.Errorf("failed to create service") 176 | } 177 | 178 | return &MevBoostRelay{ 179 | log: log, 180 | apiSrv: apiSrv, 181 | housekeeperSrv: housekeeperSrv, 182 | }, nil 183 | } 184 | 185 | func (m *MevBoostRelay) Start() error { 186 | errChan := make(chan error, 2) 187 | 188 | m.log.Info("Starting housekeeper service...") 189 | go func() { 190 | err := m.housekeeperSrv.Start() 191 | m.log.WithError(err).Error("Housekeeper service stopped") 192 | errChan <- err 193 | }() 194 | 195 | m.log.Info("Starting API service...") 196 | go func() { 197 | err := m.apiSrv.StartServer() 198 | m.log.WithError(err).Error("API service stopped") 199 | errChan <- err 200 | }() 201 | 202 | go func() { 203 | // We only require to do this at startup once, because otherwise we will 204 | // just keep with the normal workflow of the mev-boost-relay. 205 | <-m.apiSrv.ValidatorUpdateCh() 206 | 207 | m.log.Info("Forcing validator registration at startup") 208 | 209 | m.housekeeperSrv.UpdateProposerDutiesWithoutChecks(0) 210 | m.apiSrv.UpdateProposerDutiesWithoutChecks(0) 211 | }() 212 | 213 | err := <-errChan 214 | return err 215 | } 216 | 217 | func generateEthNetworkDetails(spec *Spec, info *beaconclient.GetGenesisResponse) (*common.EthNetworkDetails, error) { 218 | envs := map[string]string{ 219 | "GENESIS_FORK_VERSION": info.Data.GenesisForkVersion, 220 | "GENESIS_VALIDATORS_ROOT": info.Data.GenesisValidatorsRoot, 221 | "BELLATRIX_FORK_VERSION": spec.BellatrixForkVersion, 222 | "CAPELLA_FORK_VERSION": spec.CapellaForkVersion, 223 | "DENEB_FORK_VERSION": spec.DenebForkVersion, 224 | "ELECTRA_FORK_VERSION": spec.ElectraForkVersion, 225 | } 226 | for k, v := range envs { 227 | if err := os.Setenv(k, v); err != nil { 228 | return nil, fmt.Errorf("failed to set env var %s: %w", k, err) 229 | } 230 | } 231 | 232 | netDetails, err := common.NewEthNetworkDetails("custom") 233 | if err != nil { 234 | return nil, err 235 | } 236 | return netDetails, nil 237 | } 238 | 239 | func startInMemoryRedisDatastore() (*datastore.RedisCache, error) { 240 | redisTestServer, err := miniredis.Run() 241 | if err != nil { 242 | return nil, fmt.Errorf("failed to start miniredis: %w", err) 243 | } 244 | redisService, err := datastore.NewRedisCache("", redisTestServer.Addr(), "") 245 | if err != nil { 246 | return nil, fmt.Errorf("failed to create redis cache: %w", err) 247 | } 248 | return redisService, nil 249 | } 250 | 251 | var emptyResponse = `{ 252 | "jsonrpc": "2.0", 253 | "id": 1, 254 | "result": null 255 | }` 256 | 257 | func startMockBlockValidationServiceServer() (string, error) { 258 | listener, err := net.Listen("tcp", ":0") 259 | if err != nil { 260 | return "", err 261 | } 262 | 263 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 264 | w.Header().Set("Content-Type", "application/json") 265 | w.WriteHeader(http.StatusOK) 266 | // body, _ := io.ReadAll(r.Body) 267 | // log.Printf("Received validation request request: %s", body) 268 | _, _ = fmt.Fprint(w, emptyResponse) 269 | }) 270 | 271 | go func() { 272 | if err := http.Serve(listener, nil); err != nil { 273 | log.Fatalf("HTTP server error: %v", err) 274 | } 275 | }() 276 | 277 | addr := fmt.Sprintf("http://%s", listener.Addr().String()) 278 | return addr, nil 279 | } 280 | 281 | // inmemoryDB is an extension of the MockDB that stores the validator registry entries in memory. 282 | type inmemoryDB struct { 283 | *database.MockDB 284 | 285 | validatorRegistryEntriesLock sync.Mutex 286 | validatorRegistryEntries map[string]*database.ValidatorRegistrationEntry 287 | 288 | deliveredPayloadsLock sync.Mutex 289 | deliveredPayloads []*database.DeliveredPayloadEntry 290 | } 291 | 292 | func newInmemoryDB() *inmemoryDB { 293 | return &inmemoryDB{ 294 | MockDB: &database.MockDB{}, 295 | validatorRegistryEntries: make(map[string]*database.ValidatorRegistrationEntry), 296 | deliveredPayloads: make([]*database.DeliveredPayloadEntry, 0), 297 | } 298 | } 299 | 300 | // -- endpoints for the validator registry --- 301 | 302 | func (i *inmemoryDB) NumRegisteredValidators() (count uint64, err error) { 303 | return uint64(len(i.validatorRegistryEntries)), nil 304 | } 305 | 306 | func (i *inmemoryDB) SaveValidatorRegistration(entry database.ValidatorRegistrationEntry) error { 307 | i.validatorRegistryEntriesLock.Lock() 308 | defer i.validatorRegistryEntriesLock.Unlock() 309 | 310 | i.validatorRegistryEntries[entry.Pubkey] = &entry 311 | return nil 312 | } 313 | 314 | func (i *inmemoryDB) GetLatestValidatorRegistrations(timestampOnly bool) ([]*database.ValidatorRegistrationEntry, error) { 315 | i.validatorRegistryEntriesLock.Lock() 316 | defer i.validatorRegistryEntriesLock.Unlock() 317 | 318 | entries := make([]*database.ValidatorRegistrationEntry, 0, len(i.validatorRegistryEntries)) 319 | for _, entry := range i.validatorRegistryEntries { 320 | entries = append(entries, entry) 321 | } 322 | return entries, nil 323 | } 324 | 325 | func (i *inmemoryDB) GetValidatorRegistration(pubkey string) (*database.ValidatorRegistrationEntry, error) { 326 | i.validatorRegistryEntriesLock.Lock() 327 | defer i.validatorRegistryEntriesLock.Unlock() 328 | 329 | entry, found := i.validatorRegistryEntries[pubkey] 330 | if !found { 331 | return nil, fmt.Errorf("validator registration not found") 332 | } 333 | return entry, nil 334 | } 335 | 336 | func (i *inmemoryDB) GetValidatorRegistrationsForPubkeys(pubkeys []string) ([]*database.ValidatorRegistrationEntry, error) { 337 | i.validatorRegistryEntriesLock.Lock() 338 | defer i.validatorRegistryEntriesLock.Unlock() 339 | 340 | entries := make([]*database.ValidatorRegistrationEntry, 0, len(pubkeys)) 341 | for _, pubkey := range pubkeys { 342 | entry, found := i.validatorRegistryEntries[pubkey] 343 | if found { 344 | entries = append(entries, entry) 345 | } 346 | } 347 | return entries, nil 348 | } 349 | 350 | // -- endpoints for the delivered payloads --- 351 | 352 | func (i *inmemoryDB) SaveDeliveredPayload(bidTrace *common.BidTraceV2WithBlobFields, signedBlindedBeaconBlock *common.VersionedSignedBlindedBeaconBlock, signedAt time.Time, publishMs uint64) error { 353 | i.deliveredPayloadsLock.Lock() 354 | defer i.deliveredPayloadsLock.Unlock() 355 | 356 | _signedBlindedBeaconBlock, err := json.Marshal(signedBlindedBeaconBlock) 357 | if err != nil { 358 | return err 359 | } 360 | 361 | deliveredPayloadEntry := database.DeliveredPayloadEntry{ 362 | SignedAt: database.NewNullTime(signedAt), 363 | SignedBlindedBeaconBlock: database.NewNullString(string(_signedBlindedBeaconBlock)), 364 | 365 | Slot: bidTrace.Slot, 366 | Epoch: bidTrace.Slot / common.SlotsPerEpoch, 367 | 368 | BuilderPubkey: bidTrace.BuilderPubkey.String(), 369 | ProposerPubkey: bidTrace.ProposerPubkey.String(), 370 | ProposerFeeRecipient: bidTrace.ProposerFeeRecipient.String(), 371 | 372 | ParentHash: bidTrace.ParentHash.String(), 373 | BlockHash: bidTrace.BlockHash.String(), 374 | BlockNumber: bidTrace.BlockNumber, 375 | 376 | GasUsed: bidTrace.GasUsed, 377 | GasLimit: bidTrace.GasLimit, 378 | 379 | NumTx: bidTrace.NumTx, 380 | Value: bidTrace.Value.ToBig().String(), 381 | 382 | NumBlobs: bidTrace.NumBlobs, 383 | BlobGasUsed: bidTrace.BlobGasUsed, 384 | ExcessBlobGas: bidTrace.ExcessBlobGas, 385 | 386 | PublishMs: publishMs, 387 | } 388 | 389 | i.deliveredPayloads = append(i.deliveredPayloads, &deliveredPayloadEntry) 390 | return nil 391 | } 392 | 393 | func (i *inmemoryDB) GetNumDeliveredPayloads() (uint64, error) { 394 | i.deliveredPayloadsLock.Lock() 395 | defer i.deliveredPayloadsLock.Unlock() 396 | 397 | return uint64(len(i.deliveredPayloads)), nil 398 | } 399 | 400 | func (i *inmemoryDB) GetRecentDeliveredPayloads(filters database.GetPayloadsFilters) ([]*database.DeliveredPayloadEntry, error) { 401 | i.deliveredPayloadsLock.Lock() 402 | defer i.deliveredPayloadsLock.Unlock() 403 | 404 | entries := []*database.DeliveredPayloadEntry{} 405 | for _, entry := range i.deliveredPayloads { 406 | filtered := filterPayload(entry, filters) 407 | if !filtered { 408 | entries = append(entries, entry) 409 | } 410 | } 411 | 412 | return entries, nil 413 | } 414 | 415 | func filterPayload(entry *database.DeliveredPayloadEntry, filter database.GetPayloadsFilters) bool { 416 | if filter.BlockNumber != 0 { 417 | if entry.BlockNumber != uint64(filter.BlockNumber) { 418 | return true 419 | } 420 | } 421 | 422 | if filter.BuilderPubkey != "" { 423 | if entry.BuilderPubkey != filter.BuilderPubkey { 424 | return true 425 | } 426 | } 427 | 428 | return false 429 | } 430 | 431 | type Spec struct { 432 | SecondsPerSlot uint64 `json:"SECONDS_PER_SLOT,string"` //nolint:tagliatelle 433 | DepositContractAddress string `json:"DEPOSIT_CONTRACT_ADDRESS"` //nolint:tagliatelle 434 | DepositNetworkID string `json:"DEPOSIT_NETWORK_ID"` //nolint:tagliatelle 435 | DomainAggregateAndProof string `json:"DOMAIN_AGGREGATE_AND_PROOF"` //nolint:tagliatelle 436 | InactivityPenaltyQuotient string `json:"INACTIVITY_PENALTY_QUOTIENT"` //nolint:tagliatelle 437 | InactivityPenaltyQuotientAltair string `json:"INACTIVITY_PENALTY_QUOTIENT_ALTAIR"` //nolint:tagliatelle 438 | BellatrixForkVersion string `json:"BELLATRIX_FORK_VERSION"` //nolint:tagliatelle 439 | CapellaForkVersion string `json:"CAPELLA_FORK_VERSION"` //nolint:tagliatelle 440 | DenebForkVersion string `json:"DENEB_FORK_VERSION"` //nolint:tagliatelle 441 | ElectraForkVersion string `json:"ELECTRA_FORK_VERSION"` //nolint:tagliatelle 442 | } 443 | 444 | func getSpec(beaconURL string) (*Spec, error) { 445 | uri := fmt.Sprintf("%s/eth/v1/config/spec", beaconURL) 446 | 447 | resp, err := http.Get(uri) 448 | if err != nil { 449 | return nil, err 450 | } 451 | 452 | defer resp.Body.Close() 453 | 454 | data, err := io.ReadAll(resp.Body) 455 | if err != nil { 456 | return nil, err 457 | } 458 | 459 | var spec struct { 460 | Data *Spec `json:"data"` 461 | } 462 | if err := json.Unmarshal(data, &spec); err != nil { 463 | return nil, err 464 | } 465 | 466 | return spec.Data, nil 467 | } 468 | -------------------------------------------------------------------------------- /playground/artifacts.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "context" 7 | "crypto/ecdsa" 8 | "encoding/base64" 9 | "encoding/hex" 10 | "encoding/json" 11 | "fmt" 12 | "io" 13 | "log" 14 | "math/big" 15 | "os" 16 | "path/filepath" 17 | "reflect" 18 | "strings" 19 | "sync" 20 | "text/template" 21 | "time" 22 | 23 | _ "embed" 24 | 25 | "github.com/OffchainLabs/prysm/v6/config/params" 26 | "github.com/OffchainLabs/prysm/v6/crypto/bls/common" 27 | "github.com/OffchainLabs/prysm/v6/runtime/interop" 28 | "github.com/OffchainLabs/prysm/v6/runtime/version" 29 | gethcommon "github.com/ethereum/go-ethereum/common" 30 | "github.com/ethereum/go-ethereum/common/hexutil" 31 | "github.com/ethereum/go-ethereum/core/types" 32 | ecrypto "github.com/ethereum/go-ethereum/crypto" 33 | "github.com/ethereum/go-ethereum/p2p/enode" 34 | "github.com/hashicorp/go-uuid" 35 | keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" 36 | "gopkg.in/yaml.v2" 37 | ) 38 | 39 | var ( 40 | defaultOpBlockTimeSeconds = uint64(2) 41 | ) 42 | 43 | // minimumGenesisDelay is the minimum delay for the genesis time. This is required 44 | // because lighthouse takes some time to start and we need to make sure it is ready 45 | // otherwise, some blocks are missed. 46 | var MinimumGenesisDelay uint64 = 10 47 | 48 | //go:embed utils/rollup.json 49 | var opRollupConfig []byte 50 | 51 | //go:embed utils/genesis.json 52 | var opGenesis []byte 53 | 54 | //go:embed utils/state.json 55 | var opState []byte 56 | 57 | //go:embed config.yaml.tmpl 58 | var clConfigContent []byte 59 | 60 | //go:embed utils/query.sh 61 | var queryReadyCheck []byte 62 | 63 | type ArtifactsBuilder struct { 64 | outputDir string 65 | applyLatestL1Fork bool 66 | genesisDelay uint64 67 | applyLatestL2Fork *uint64 68 | OpblockTime uint64 69 | } 70 | 71 | func NewArtifactsBuilder() *ArtifactsBuilder { 72 | return &ArtifactsBuilder{ 73 | outputDir: "", 74 | applyLatestL1Fork: false, 75 | genesisDelay: MinimumGenesisDelay, 76 | OpblockTime: defaultOpBlockTimeSeconds, 77 | } 78 | } 79 | 80 | func (b *ArtifactsBuilder) OutputDir(outputDir string) *ArtifactsBuilder { 81 | b.outputDir = outputDir 82 | return b 83 | } 84 | 85 | func (b *ArtifactsBuilder) ApplyLatestL1Fork(applyLatestL1Fork bool) *ArtifactsBuilder { 86 | b.applyLatestL1Fork = applyLatestL1Fork 87 | return b 88 | } 89 | 90 | func (b *ArtifactsBuilder) ApplyLatestL2Fork(applyLatestL2Fork *uint64) *ArtifactsBuilder { 91 | b.applyLatestL2Fork = applyLatestL2Fork 92 | return b 93 | } 94 | 95 | func (b *ArtifactsBuilder) GenesisDelay(genesisDelaySeconds uint64) *ArtifactsBuilder { 96 | b.genesisDelay = genesisDelaySeconds 97 | return b 98 | } 99 | 100 | func (b *ArtifactsBuilder) OpBlockTime(blockTimeSeconds uint64) *ArtifactsBuilder { 101 | b.OpblockTime = blockTimeSeconds 102 | return b 103 | } 104 | 105 | type Artifacts struct { 106 | Out *output 107 | } 108 | 109 | func (b *ArtifactsBuilder) Build() (*Artifacts, error) { 110 | homeDir, err := GetHomeDir() 111 | if err != nil { 112 | return nil, err 113 | } 114 | if b.outputDir == "" { 115 | // Use the $HOMEDIR/devnet as the default output 116 | b.outputDir = filepath.Join(homeDir, "devnet") 117 | } 118 | 119 | out := &output{dst: b.outputDir, homeDir: homeDir} 120 | 121 | // check if the output directory exists 122 | if out.Exists("") { 123 | log.Printf("deleting existing output directory %s", b.outputDir) 124 | if err := out.Remove(""); err != nil { 125 | return nil, err 126 | } 127 | } 128 | 129 | if b.genesisDelay < MinimumGenesisDelay { 130 | log.Printf("genesis delay must be at least %d seconds, using %d", MinimumGenesisDelay, MinimumGenesisDelay) 131 | b.genesisDelay = MinimumGenesisDelay 132 | } 133 | 134 | // enable the latest fork in config.yaml or not 135 | var latestForkEpoch string 136 | if b.applyLatestL1Fork { 137 | latestForkEpoch = "0" 138 | } else { 139 | latestForkEpoch = "18446744073709551615" 140 | } 141 | clConfigContentStr := strings.Replace(string(clConfigContent), "{{.LatestForkEpoch}}", latestForkEpoch, 1) 142 | 143 | // load the config.yaml file 144 | clConfig, err := params.UnmarshalConfig([]byte(clConfigContentStr), nil) 145 | if err != nil { 146 | return nil, err 147 | } 148 | if err := params.SetActive(clConfig); err != nil { 149 | return nil, err 150 | } 151 | 152 | genesisTime := uint64(time.Now().Add(time.Duration(b.genesisDelay) * time.Second).Unix()) 153 | config := params.BeaconConfig() 154 | 155 | gen := interop.GethTestnetGenesis(genesisTime, config) 156 | // HACK: fix this in prysm? 157 | gen.Config.DepositContractAddress = gethcommon.HexToAddress(config.DepositContractAddress) 158 | 159 | // add pre-funded accounts 160 | prefundedBalance, _ := new(big.Int).SetString("10000000000000000000000", 16) 161 | 162 | for _, privStr := range prefundedAccounts { 163 | priv, err := getPrivKey(privStr) 164 | if err != nil { 165 | return nil, err 166 | } 167 | addr := ecrypto.PubkeyToAddress(priv.PublicKey) 168 | gen.Alloc[addr] = types.Account{ 169 | Balance: prefundedBalance, 170 | Nonce: 1, 171 | } 172 | } 173 | 174 | // Apply Optimism pre-state 175 | { 176 | var state struct { 177 | L1StateDump string `json:"l1StateDump"` 178 | } 179 | if err := json.Unmarshal(opState, &state); err != nil { 180 | return nil, fmt.Errorf("failed to unmarshal opState: %w", err) 181 | } 182 | 183 | decoded, err := base64.StdEncoding.DecodeString(state.L1StateDump) 184 | if err != nil { 185 | return nil, fmt.Errorf("failed to decode opState: %w", err) 186 | } 187 | 188 | // Create gzip reader from the base64 decoded data 189 | gr, err := gzip.NewReader(bytes.NewReader(decoded)) 190 | if err != nil { 191 | return nil, fmt.Errorf("failed to create gzip reader: %w", err) 192 | } 193 | defer gr.Close() 194 | 195 | // Read and decode the contents 196 | contents, err := io.ReadAll(gr) 197 | if err != nil { 198 | return nil, fmt.Errorf("failed to read opState: %w", err) 199 | } 200 | 201 | var alloc types.GenesisAlloc 202 | if err := json.Unmarshal(contents, &alloc); err != nil { 203 | return nil, fmt.Errorf("failed to unmarshal opState: %w", err) 204 | } 205 | for addr, account := range alloc { 206 | gen.Alloc[addr] = account 207 | } 208 | } 209 | 210 | block := gen.ToBlock() 211 | log.Printf("Genesis block hash: %s", block.Hash()) 212 | 213 | var v int 214 | if b.applyLatestL1Fork { 215 | v = version.Electra 216 | } else { 217 | v = version.Deneb 218 | } 219 | 220 | priv, pub, err := interop.DeterministicallyGenerateKeys(0, 100) 221 | if err != nil { 222 | return nil, err 223 | } 224 | 225 | depositData, roots, err := interop.DepositDataFromKeysWithExecCreds(priv, pub, 100) 226 | if err != nil { 227 | return nil, err 228 | } 229 | 230 | opts := make([]interop.PremineGenesisOpt, 0) 231 | opts = append(opts, interop.WithDepositData(depositData, roots)) 232 | 233 | state, err := interop.NewPreminedGenesis(context.Background(), genesisTime, 0, 100, v, block, opts...) 234 | if err != nil { 235 | return nil, err 236 | } 237 | 238 | err = out.WriteBatch(map[string]interface{}{ 239 | "testnet/config.yaml": func() ([]byte, error) { return convert(config) }, 240 | "testnet/genesis.ssz": state, 241 | "genesis.json": gen, 242 | "jwtsecret": defaultJWTToken, 243 | "testnet/boot_enr.yaml": "[]", 244 | "testnet/deploy_block.txt": "0", 245 | "testnet/deposit_contract_block.txt": "0", 246 | "testnet/genesis_validators_root.txt": hex.EncodeToString(state.GenesisValidatorsRoot()), 247 | "data_validator/": &lighthouseKeystore{privKeys: priv}, 248 | "scripts/query.sh": queryReadyCheck, 249 | }) 250 | if err != nil { 251 | return nil, err 252 | } 253 | 254 | { 255 | // We have to start slightly ahead of L1 genesis time 256 | opTimestamp := genesisTime + 2 257 | 258 | // If the latest fork is applied, convert the time to a fork time. 259 | // If the time is 0, apply on genesis, the fork time is zero. 260 | // if the time b is > 0, apply b * opBlockTimeSeconds to the genesis time. 261 | var forkTime *uint64 262 | if b.applyLatestL2Fork != nil { 263 | forkTime = new(uint64) 264 | 265 | if *b.applyLatestL2Fork != 0 { 266 | *forkTime = opTimestamp + b.OpblockTime*(*b.applyLatestL2Fork) 267 | } else { 268 | *forkTime = 0 269 | } 270 | } 271 | 272 | // override l2 genesis, make the timestamp start 2 seconds after the L1 genesis 273 | input := map[string]interface{}{ 274 | "timestamp": hexutil.Uint64(opTimestamp).String(), 275 | } 276 | if forkTime != nil { 277 | // We need to enable prague on the EL to enable the engine v4 calls 278 | input["config"] = map[string]interface{}{ 279 | "pragueTime": *forkTime, 280 | "isthmusTime": *forkTime, 281 | } 282 | } 283 | 284 | // Update the allocs to include the same prefunded accounts as the L1 genesis. 285 | allocs := make(map[string]interface{}) 286 | input["alloc"] = allocs 287 | for _, privStr := range prefundedAccounts { 288 | priv, err := getPrivKey(privStr) 289 | if err != nil { 290 | return nil, err 291 | } 292 | addr := ecrypto.PubkeyToAddress(priv.PublicKey) 293 | allocs[addr.String()] = map[string]interface{}{ 294 | "balance": "0x10000000000000000000000", 295 | "nonce": "0x1", 296 | } 297 | } 298 | 299 | newOpGenesis, err := overrideJSON(opGenesis, input) 300 | if err != nil { 301 | return nil, err 302 | } 303 | 304 | // the hash of the genesis has changed beause of the timestamp so we need to account for that 305 | opGenesisBlock, err := toOpBlock(newOpGenesis) 306 | if err != nil { 307 | return nil, fmt.Errorf("failed to convert opGenesis to block: %w", err) 308 | } 309 | 310 | opGenesisHash := opGenesisBlock.Hash() 311 | 312 | // override rollup.json with the real values for the L1 chain and the correct timestamp 313 | rollupInput := map[string]interface{}{ 314 | "genesis": map[string]interface{}{ 315 | "l2_time": opTimestamp, // this one not in hex 316 | "l1": map[string]interface{}{ 317 | "hash": block.Hash().String(), 318 | "number": 0, 319 | }, 320 | "l2": map[string]interface{}{ 321 | "hash": opGenesisHash.String(), 322 | "number": 0, 323 | }, 324 | }, 325 | "block_time": b.OpblockTime, 326 | "chain_op_config": map[string]interface{}{ // TODO: Read this from somewhere (genesis??) 327 | "eip1559Elasticity": 6, 328 | "eip1559Denominator": 50, 329 | "eip1559DenominatorCanyon": 250, 330 | }, 331 | } 332 | if forkTime != nil { 333 | rollupInput["isthmus_time"] = *forkTime 334 | } 335 | 336 | newOpRollup, err := overrideJSON(opRollupConfig, rollupInput) 337 | if err != nil { 338 | return nil, err 339 | } 340 | 341 | if err := out.WriteFile("l2-genesis.json", newOpGenesis); err != nil { 342 | return nil, err 343 | } 344 | if err := out.WriteFile("rollup.json", newOpRollup); err != nil { 345 | return nil, err 346 | } 347 | } 348 | 349 | return &Artifacts{Out: out}, nil 350 | } 351 | 352 | type OpGenesisTmplInput struct { 353 | Timestamp uint64 354 | LatestFork *uint64 355 | } 356 | 357 | func overrideJSON(jsonData []byte, overrides map[string]interface{}) ([]byte, error) { 358 | // Parse original JSON into a map 359 | var original map[string]interface{} 360 | if err := json.Unmarshal(jsonData, &original); err != nil { 361 | return nil, fmt.Errorf("failed to unmarshal original JSON: %w", err) 362 | } 363 | 364 | // Recursively merge the overrides into the original 365 | mergeMap(original, overrides) 366 | 367 | // Marshal back to JSON 368 | result, err := json.Marshal(original) 369 | if err != nil { 370 | return nil, fmt.Errorf("failed to marshal modified JSON: %w", err) 371 | } 372 | 373 | return result, nil 374 | } 375 | 376 | // mergeMap recursively merges src into dst 377 | func mergeMap(dst, src map[string]interface{}) { 378 | for key, srcVal := range src { 379 | if dstVal, exists := dst[key]; exists { 380 | // If both values are maps, merge them recursively 381 | if dstMap, ok := dstVal.(map[string]interface{}); ok { 382 | if srcMap, ok := srcVal.(map[string]interface{}); ok { 383 | mergeMap(dstMap, srcMap) 384 | continue 385 | } 386 | } 387 | } 388 | // For all other cases, override the value 389 | dst[key] = srcVal 390 | } 391 | } 392 | 393 | func getPrivKey(privStr string) (*ecdsa.PrivateKey, error) { 394 | privBuf, err := hex.DecodeString(strings.TrimPrefix(privStr, "0x")) 395 | if err != nil { 396 | return nil, err 397 | } 398 | 399 | priv, err := ecrypto.ToECDSA(privBuf) 400 | if err != nil { 401 | return nil, err 402 | } 403 | return priv, nil 404 | } 405 | 406 | func ConnectRaw(service, port, protocol, user string) string { 407 | return fmt.Sprintf(`{{Service "%s" "%s" "%s" "%s"}}`, service, port, protocol, user) 408 | } 409 | 410 | func Connect(service, port string) string { 411 | return ConnectRaw(service, port, "http", "") 412 | } 413 | 414 | type output struct { 415 | dst string 416 | 417 | homeDir string 418 | lock sync.Mutex 419 | 420 | enodeAddrSeq *big.Int 421 | } 422 | 423 | func (o *output) AbsoluteDstPath() (string, error) { 424 | return filepath.Abs(o.dst) 425 | } 426 | 427 | func (o *output) Exists(path string) bool { 428 | _, err := os.Stat(filepath.Join(o.dst)) 429 | return err == nil 430 | } 431 | 432 | func (o *output) Remove(path string) error { 433 | return os.RemoveAll(filepath.Join(o.dst, path)) 434 | } 435 | 436 | // CreateDir creates a new dir in the output folder and returns the 437 | // absolute file path 438 | func (o *output) CreateDir(path string) (string, error) { 439 | absPath, err := filepath.Abs(filepath.Join(o.dst, path)) 440 | if err != nil { 441 | return "", err 442 | } 443 | if err := os.MkdirAll(absPath, 0755); err != nil { 444 | return "", fmt.Errorf("failed to create directory: %w", err) 445 | } 446 | return absPath, nil 447 | } 448 | 449 | func (o *output) CopyFile(src string, dst string) error { 450 | // Open the source file 451 | sourceFile, err := os.Open(src) 452 | if err != nil { 453 | return fmt.Errorf("failed to open source file: %w", err) 454 | } 455 | defer sourceFile.Close() 456 | 457 | // Create the destination directory if it doesn't exist 458 | dstPath := filepath.Join(o.dst, dst) 459 | if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil { 460 | return fmt.Errorf("failed to create destination directory: %w", err) 461 | } 462 | 463 | // Create the destination file 464 | destFile, err := os.Create(dstPath) 465 | if err != nil { 466 | return fmt.Errorf("failed to create destination file: %w", err) 467 | } 468 | defer destFile.Close() 469 | 470 | // Copy the contents 471 | if _, err := io.Copy(destFile, sourceFile); err != nil { 472 | return fmt.Errorf("failed to copy file contents: %w", err) 473 | } 474 | 475 | // Copy file permissions from source to destination 476 | sourceInfo, err := os.Stat(src) 477 | if err != nil { 478 | return fmt.Errorf("failed to get source file info: %w", err) 479 | } 480 | 481 | if err := os.Chmod(dstPath, sourceInfo.Mode()); err != nil { 482 | return fmt.Errorf("failed to set destination file permissions: %w", err) 483 | } 484 | 485 | return nil 486 | } 487 | 488 | func (o *output) WriteBatch(data map[string]interface{}) error { 489 | for dst, data := range data { 490 | if err := o.WriteFile(dst, data); err != nil { 491 | return err 492 | } 493 | } 494 | return nil 495 | } 496 | 497 | func (o *output) LogOutput(name string) (*os.File, error) { 498 | // lock this because some services might be trying to access this in parallel 499 | o.lock.Lock() 500 | defer o.lock.Unlock() 501 | 502 | path := filepath.Join(o.dst, "logs", name+".log") 503 | 504 | if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { 505 | return nil, err 506 | } 507 | logOutput, err := os.Create(path) 508 | if err != nil { 509 | return nil, err 510 | } 511 | return logOutput, nil 512 | } 513 | 514 | func (o *output) WriteFile(dst string, data interface{}) error { 515 | dst = filepath.Join(o.dst, dst) 516 | 517 | var dataRaw []byte 518 | var err error 519 | 520 | if raw, ok := data.([]byte); ok { 521 | dataRaw = raw 522 | } else if raw, ok := data.(string); ok { 523 | dataRaw = []byte(raw) 524 | } else if ssz, ok := data.(sszObject); ok { 525 | if dataRaw, err = ssz.MarshalSSZ(); err != nil { 526 | return err 527 | } 528 | } else if encObj, ok := data.(encObject); ok { 529 | // create a new output for this sub-object and delegate the full encoding to it 530 | if err = encObj.Encode(&output{dst: dst}); err != nil { 531 | return err 532 | } 533 | return nil 534 | } else if encFn, ok := data.(func() ([]byte, error)); ok { 535 | if dataRaw, err = encFn(); err != nil { 536 | return err 537 | } 538 | } else { 539 | // figure out how to decode the object given the file extension 540 | ext := filepath.Ext(dst) 541 | if ext == ".json" { 542 | if dataRaw, err = json.MarshalIndent(data, "", "\t"); err != nil { 543 | return err 544 | } 545 | } else if ext == ".yaml" { 546 | if dataRaw, err = yaml.Marshal(data); err != nil { 547 | return err 548 | } 549 | } else { 550 | return fmt.Errorf("unsupported file extension: %s", ext) 551 | } 552 | } 553 | 554 | if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { 555 | return err 556 | } 557 | if err := os.WriteFile(dst, dataRaw, 0644); err != nil { 558 | return err 559 | } 560 | return nil 561 | } 562 | 563 | var secret = "secret" 564 | 565 | type lighthouseKeystore struct { 566 | privKeys []common.SecretKey 567 | } 568 | 569 | func (l *lighthouseKeystore) Encode(o *output) error { 570 | for _, key := range l.privKeys { 571 | encryptor := keystorev4.New() 572 | cryptoFields, err := encryptor.Encrypt(key.Marshal(), secret) 573 | if err != nil { 574 | return err 575 | } 576 | 577 | id, _ := uuid.GenerateUUID() 578 | 579 | pubKeyHex := "0x" + hex.EncodeToString(key.PublicKey().Marshal()) 580 | item := map[string]interface{}{ 581 | "crypto": cryptoFields, 582 | "uuid": id, 583 | "pubkey": pubKeyHex[2:], // without 0x in the json file 584 | "version": 4, 585 | "description": "", 586 | } 587 | valJSON, err := json.MarshalIndent(item, "", "\t") 588 | if err != nil { 589 | return err 590 | } 591 | 592 | if err := o.WriteBatch(map[string]interface{}{ 593 | "validators/" + pubKeyHex + "/voting-keystore.json": valJSON, 594 | "secrets/" + pubKeyHex: secret, 595 | }); err != nil { 596 | return err 597 | } 598 | } 599 | 600 | return nil 601 | } 602 | 603 | type encObject interface { 604 | Encode(o *output) error 605 | } 606 | 607 | type sszObject interface { 608 | MarshalSSZ() ([]byte, error) 609 | } 610 | 611 | func GetHomeDir() (string, error) { 612 | homeDir, err := os.UserHomeDir() 613 | if err != nil { 614 | return "", fmt.Errorf("error getting user home directory: %w", err) 615 | } 616 | 617 | // Define the path for our custom home directory 618 | customHomeDir := filepath.Join(homeDir, ".playground") 619 | 620 | // Create output directory if it doesn't exist 621 | if err := os.MkdirAll(customHomeDir, 0755); err != nil { 622 | return "", fmt.Errorf("error creating output directory: %v", err) 623 | } 624 | 625 | return customHomeDir, nil 626 | } 627 | 628 | func convert(config *params.BeaconChainConfig) ([]byte, error) { 629 | val := reflect.ValueOf(config).Elem() 630 | 631 | vals := []string{} 632 | for i := 0; i < val.NumField(); i++ { 633 | // only encode the public fields with tag 'yaml' 634 | tag := val.Type().Field(i).Tag.Get("yaml") 635 | if tag == "" { 636 | continue 637 | } 638 | 639 | // decode the type of the value 640 | typ := val.Field(i).Type() 641 | 642 | var resTyp string 643 | if isByteArray(typ) || isByteSlice(typ) { 644 | resTyp = "0x" + hex.EncodeToString(val.Field(i).Bytes()) 645 | } else { 646 | // basic types 647 | switch typ.Kind() { 648 | case reflect.String: 649 | resTyp = val.Field(i).String() 650 | case reflect.Uint8, reflect.Uint64: 651 | resTyp = fmt.Sprintf("%d", val.Field(i).Uint()) 652 | case reflect.Int: 653 | resTyp = fmt.Sprintf("%d", val.Field(i).Int()) 654 | default: 655 | panic(fmt.Sprintf("BUG: unsupported type, tag '%s', err: '%s'", tag, val.Field(i).Kind())) 656 | } 657 | } 658 | 659 | vals = append(vals, fmt.Sprintf("%s: %s", tag, resTyp)) 660 | } 661 | 662 | return []byte(strings.Join(vals, "\n")), nil 663 | } 664 | 665 | func isByteArray(t reflect.Type) bool { 666 | return t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 667 | } 668 | 669 | func isByteSlice(t reflect.Type) bool { 670 | return t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 671 | } 672 | 673 | var prefundedAccounts = []string{ 674 | "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", 675 | "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d", 676 | "0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a", 677 | "0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6", 678 | "0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a", 679 | "0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba", 680 | "0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e", 681 | "0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356", 682 | "0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97", 683 | "0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6", 684 | } 685 | 686 | func applyTemplate2(templateStr []byte, input interface{}) ([]byte, error) { 687 | tpl, err := template.New("").Parse(string(templateStr)) 688 | if err != nil { 689 | return nil, fmt.Errorf("failed to parse template: %w", err) 690 | } 691 | 692 | var out strings.Builder 693 | if err := tpl.Execute(&out, input); err != nil { 694 | return nil, fmt.Errorf("failed to execute template: %w", err) 695 | } 696 | 697 | return []byte(out.String()), nil 698 | } 699 | 700 | type EnodeAddr struct { 701 | PrivKey *ecdsa.PrivateKey 702 | Artifact string 703 | } 704 | 705 | func (e *EnodeAddr) ID() enode.ID { 706 | return enode.PubkeyToIDV4(&e.PrivKey.PublicKey) 707 | } 708 | 709 | func (o *output) GetEnodeAddr() *EnodeAddr { 710 | // TODO: This is a bit enshrined here 711 | if o.enodeAddrSeq == nil { 712 | o.enodeAddrSeq = big.NewInt(0) 713 | } 714 | 715 | // always start with 1 since 0 is not a valid private key for an enode address 716 | o.enodeAddrSeq.Add(o.enodeAddrSeq, big.NewInt(1)) 717 | privKeyBytes := gethcommon.LeftPadBytes(o.enodeAddrSeq.Bytes(), 32) 718 | 719 | privKey, err := ecrypto.ToECDSA(privKeyBytes) 720 | if err != nil { 721 | panic(fmt.Sprintf("BUG: failed to convert private key to ECDSA: %v", err)) 722 | } 723 | 724 | privKeyBytesHex := hex.EncodeToString(privKeyBytes) 725 | 726 | // write the key to an artifact file 727 | fileName := fmt.Sprintf("enode-key-%d.txt", o.enodeAddrSeq.Int64()) 728 | if err := o.WriteFile(fileName, privKeyBytesHex); err != nil { 729 | panic(fmt.Sprintf("BUG: failed to write enode key to artifact file: %v", err)) 730 | } 731 | 732 | return &EnodeAddr{PrivKey: privKey, Artifact: fileName} 733 | } 734 | -------------------------------------------------------------------------------- /playground/artifacts_test.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | ) 7 | 8 | func TestEnodeGeneration(t *testing.T) { 9 | // Test that the enodes generated by output are deterministic 10 | o1 := newTestOutput(t) 11 | o2 := newTestOutput(t) 12 | 13 | for i := 0; i < 10; i++ { 14 | if o1.GetEnodeAddr().ID() != o2.GetEnodeAddr().ID() { 15 | t.Fatalf("enode IDs are not the same") 16 | } 17 | } 18 | } 19 | 20 | func newTestOutput(t *testing.T) *output { 21 | dir, err := os.MkdirTemp("/tmp", "test-output") 22 | if err != nil { 23 | t.Fatalf("failed to create temporal folder: %v", err) 24 | } 25 | defer os.RemoveAll(dir) 26 | 27 | o := &output{ 28 | dst: dir, 29 | } 30 | return o 31 | } 32 | -------------------------------------------------------------------------------- /playground/catalog.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | var Components = []ServiceGen{} 4 | 5 | func register(component ServiceGen) { 6 | Components = append(Components, component) 7 | } 8 | 9 | func init() { 10 | register(&OpBatcher{}) 11 | register(&OpGeth{}) 12 | register(&OpNode{}) 13 | register(&RethEL{}) 14 | register(&LighthouseBeaconNode{}) 15 | register(&LighthouseValidator{}) 16 | register(&ClProxy{}) 17 | register(&MevBoostRelay{}) 18 | register(&RollupBoost{}) 19 | register(&OpReth{}) 20 | register(&BuilderHub{}) 21 | register(&BuilderHubPostgres{}) 22 | register(&BuilderHubMockProxy{}) 23 | register(&nullService{}) 24 | } 25 | 26 | func FindComponent(name string) ServiceGen { 27 | for _, component := range Components { 28 | if component.Name() == name { 29 | return component 30 | } 31 | } 32 | return nil 33 | } 34 | -------------------------------------------------------------------------------- /playground/components.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "strconv" 8 | "time" 9 | ) 10 | 11 | var defaultJWTToken = "04592280e1778419b7aa954d43871cb2cfb2ebda754fb735e8adeb293a88f9bf" 12 | 13 | type RollupBoost struct { 14 | ELNode string 15 | Builder string 16 | } 17 | 18 | func (r *RollupBoost) Run(service *Service, ctx *ExContext) { 19 | service. 20 | WithImage("docker.io/flashbots/rollup-boost"). 21 | WithTag("0.4rc1"). 22 | WithArgs( 23 | "--rpc-port", `{{Port "authrpc" 8551}}`, 24 | "--l2-jwt-path", "/data/jwtsecret", 25 | "--l2-url", Connect(r.ELNode, "authrpc"), 26 | "--builder-jwt-path", "/data/jwtsecret", 27 | "--builder-url", r.Builder, 28 | ).WithArtifact("/data/jwtsecret", "jwtsecret") 29 | } 30 | 31 | func (r *RollupBoost) Name() string { 32 | return "rollup-boost" 33 | } 34 | 35 | type OpBatcher struct { 36 | L1Node string 37 | L2Node string 38 | RollupNode string 39 | MaxChannelDuration uint64 40 | } 41 | 42 | func (o *OpBatcher) Run(service *Service, ctx *ExContext) { 43 | if o.MaxChannelDuration == 0 { 44 | o.MaxChannelDuration = 2 45 | } 46 | service. 47 | WithImage("us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher"). 48 | WithTag("v1.12.0-rc.1"). 49 | WithEntrypoint("op-batcher"). 50 | WithArgs( 51 | "--l1-eth-rpc", Connect(o.L1Node, "http"), 52 | "--l2-eth-rpc", Connect(o.L2Node, "http"), 53 | "--rollup-rpc", Connect(o.RollupNode, "http"), 54 | "--max-channel-duration="+strconv.FormatUint(o.MaxChannelDuration, 10), 55 | "--sub-safety-margin=4", 56 | "--poll-interval=1s", 57 | "--num-confirmations=1", 58 | "--private-key=0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6", 59 | ) 60 | } 61 | 62 | func (o *OpBatcher) Name() string { 63 | return "op-batcher" 64 | } 65 | 66 | type OpNode struct { 67 | L1Node string 68 | L1Beacon string 69 | L2Node string 70 | } 71 | 72 | func (o *OpNode) Run(service *Service, ctx *ExContext) { 73 | service. 74 | WithImage("us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node"). 75 | WithTag("v1.13.0-rc.1"). 76 | WithEntrypoint("op-node"). 77 | WithEnv("A", "B"). // this is just a placeholder to make sure env works since we e2e test with the recipes 78 | WithArgs( 79 | "--l1", Connect(o.L1Node, "http"), 80 | "--l1.beacon", Connect(o.L1Beacon, "http"), 81 | "--l1.epoch-poll-interval", "12s", 82 | "--l1.http-poll-interval", "6s", 83 | "--l2", Connect(o.L2Node, "authrpc"), 84 | "--l2.jwt-secret", "/data/jwtsecret", 85 | "--sequencer.enabled", 86 | "--sequencer.l1-confs", "0", 87 | "--verifier.l1-confs", "0", 88 | "--p2p.sequencer.key", "8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba", 89 | "--rollup.config", "/data/rollup.json", 90 | "--rpc.addr", "0.0.0.0", 91 | "--rpc.port", `{{Port "http" 8549}}`, 92 | "--p2p.listen.ip", "0.0.0.0", 93 | "--p2p.listen.tcp", `{{Port "p2p" 9003}}`, 94 | "--p2p.listen.udp", `{{PortUDP "p2p" 9003}}`, 95 | "--p2p.scoring.peers", "light", 96 | "--p2p.ban.peers", "true", 97 | "--metrics.enabled", 98 | "--metrics.addr", "0.0.0.0", 99 | "--metrics.port", `{{Port "metrics" 7300}}`, 100 | "--pprof.enabled", 101 | "--rpc.enable-admin", 102 | "--safedb.path", "/data_db", 103 | ). 104 | WithArtifact("/data/jwtsecret", "jwtsecret"). 105 | WithArtifact("/data/rollup.json", "rollup.json"). 106 | WithVolume("data", "/data_db") 107 | } 108 | 109 | func (o *OpNode) Name() string { 110 | return "op-node" 111 | } 112 | 113 | type OpGeth struct { 114 | // outputs 115 | Enode *EnodeAddr 116 | } 117 | 118 | func logLevelToGethVerbosity(logLevel LogLevel) string { 119 | switch logLevel { 120 | case LevelTrace: 121 | return "5" 122 | case LevelDebug: 123 | return "4" 124 | case LevelInfo: 125 | return "3" 126 | case LevelWarn: 127 | return "2" 128 | case LevelError: 129 | return "1" 130 | default: 131 | return "3" 132 | } 133 | } 134 | 135 | func (o *OpGeth) Run(service *Service, ctx *ExContext) { 136 | o.Enode = ctx.Output.GetEnodeAddr() 137 | 138 | service. 139 | WithImage("us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth"). 140 | WithTag("v1.101503.2-rc.5"). 141 | WithEntrypoint("/bin/sh"). 142 | WithLabel("metrics_path", "/debug/metrics/prometheus"). 143 | WithArgs( 144 | "-c", 145 | "geth init --datadir /data_opgeth --state.scheme hash /data/l2-genesis.json && "+ 146 | "exec geth "+ 147 | "--datadir /data_opgeth "+ 148 | "--verbosity "+logLevelToGethVerbosity(ctx.LogLevel)+" "+ 149 | "--http "+ 150 | "--http.corsdomain \"*\" "+ 151 | "--http.vhosts \"*\" "+ 152 | "--http.addr 0.0.0.0 "+ 153 | "--http.port "+`{{Port "http" 8545}} `+ 154 | "--http.api web3,debug,eth,txpool,net,engine,miner "+ 155 | "--ws "+ 156 | "--ws.addr 0.0.0.0 "+ 157 | "--ws.port "+`{{Port "ws" 8546}} `+ 158 | "--ws.origins \"*\" "+ 159 | "--ws.api debug,eth,txpool,net,engine,miner "+ 160 | "--syncmode full "+ 161 | "--nodiscover "+ 162 | "--maxpeers 5 "+ 163 | "--rpc.allow-unprotected-txs "+ 164 | "--authrpc.addr 0.0.0.0 "+ 165 | "--authrpc.port "+`{{Port "authrpc" 8551}} `+ 166 | "--authrpc.vhosts \"*\" "+ 167 | "--authrpc.jwtsecret /data/jwtsecret "+ 168 | "--gcmode archive "+ 169 | "--state.scheme hash "+ 170 | "--port "+`{{Port "rpc" 30303}} `+ 171 | "--nodekey /data/p2p_key.txt "+ 172 | "--metrics "+ 173 | "--metrics.addr 0.0.0.0 "+ 174 | "--metrics.port "+`{{Port "metrics" 6061}}`, 175 | ). 176 | WithVolume("data", "/data_opgeth"). 177 | WithArtifact("/data/l2-genesis.json", "l2-genesis.json"). 178 | WithArtifact("/data/jwtsecret", "jwtsecret"). 179 | WithArtifact("/data/p2p_key.txt", o.Enode.Artifact) 180 | } 181 | 182 | func (o *OpGeth) Name() string { 183 | return "op-geth" 184 | } 185 | 186 | var _ ServiceWatchdog = &OpGeth{} 187 | 188 | func (o *OpGeth) Watchdog(out io.Writer, instance *instance, ctx context.Context) error { 189 | gethURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) 190 | return watchChainHead(out, gethURL, 2*time.Second) 191 | } 192 | 193 | type RethEL struct { 194 | UseRethForValidation bool 195 | UseNativeReth bool 196 | } 197 | 198 | func (r *RethEL) ReleaseArtifact() *release { 199 | return &release{ 200 | Name: "reth", 201 | Org: "paradigmxyz", 202 | Version: "v1.3.1", 203 | Arch: func(goos, goarch string) string { 204 | if goos == "linux" { 205 | return "x86_64-unknown-linux-gnu" 206 | } else if goos == "darwin" && goarch == "arm64" { // Apple M1 207 | return "aarch64-apple-darwin" 208 | } else if goos == "darwin" && goarch == "amd64" { 209 | return "x86_64-apple-darwin" 210 | } 211 | return "" 212 | }, 213 | } 214 | } 215 | 216 | func logLevelToRethVerbosity(logLevel LogLevel) string { 217 | switch logLevel { 218 | case LevelTrace: 219 | return "-vvvvv" 220 | case LevelDebug: 221 | return "-vvvv" 222 | case LevelWarn: 223 | return "-vv" 224 | case LevelError: 225 | return "-v" 226 | case LevelInfo: 227 | fallthrough 228 | default: 229 | return "-vvv" 230 | } 231 | } 232 | 233 | func (r *RethEL) Run(svc *Service, ctx *ExContext) { 234 | // start the reth el client 235 | svc. 236 | WithImage("ghcr.io/paradigmxyz/reth"). 237 | WithTag("v1.3.1"). 238 | WithEntrypoint("/usr/local/bin/reth"). 239 | WithArgs( 240 | "node", 241 | "--chain", "/data/genesis.json", 242 | "--datadir", "/data_reth", 243 | "--color", "never", 244 | "--ipcpath", "/data_reth/reth.ipc", 245 | "--addr", "127.0.0.1", 246 | "--port", `{{Port "rpc" 30303}}`, 247 | // "--disable-discovery", 248 | // http config 249 | "--http", 250 | "--http.addr", "0.0.0.0", 251 | "--http.api", "admin,eth,web3,net,rpc,mev,flashbots", 252 | "--http.port", `{{Port "http" 8545}}`, 253 | "--authrpc.port", `{{Port "authrpc" 8551}}`, 254 | "--authrpc.addr", "0.0.0.0", 255 | "--authrpc.jwtsecret", "/data/jwtsecret", 256 | "--metrics", `0.0.0.0:{{Port "metrics" 9090}}`, 257 | // For reth version 1.2.0 the "legacy" engine was removed, so we now require these arguments: 258 | "--engine.persistence-threshold", "0", "--engine.memory-block-buffer-target", "0", 259 | logLevelToRethVerbosity(ctx.LogLevel), 260 | ). 261 | WithArtifact("/data/genesis.json", "genesis.json"). 262 | WithArtifact("/data/jwtsecret", "jwtsecret"). 263 | WithVolume("data", "/data_reth") 264 | 265 | if r.UseNativeReth { 266 | // we need to use this otherwise the db cannot be binded 267 | svc.UseHostExecution() 268 | } 269 | } 270 | 271 | func (r *RethEL) Name() string { 272 | return "reth" 273 | } 274 | 275 | var _ ServiceWatchdog = &RethEL{} 276 | 277 | func (r *RethEL) Watchdog(out io.Writer, instance *instance, ctx context.Context) error { 278 | rethURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) 279 | return watchChainHead(out, rethURL, 12*time.Second) 280 | } 281 | 282 | type LighthouseBeaconNode struct { 283 | ExecutionNode string 284 | MevBoostNode string 285 | } 286 | 287 | func (l *LighthouseBeaconNode) Run(svc *Service, ctx *ExContext) { 288 | svc. 289 | WithImage("sigp/lighthouse"). 290 | WithTag("v7.0.0-beta.0"). 291 | WithEntrypoint("lighthouse"). 292 | WithArgs( 293 | "bn", 294 | "--datadir", "/data_beacon", 295 | "--testnet-dir", "/data/testnet-dir", 296 | "--enable-private-discovery", 297 | "--disable-peer-scoring", 298 | "--staking", 299 | "--enr-address", "127.0.0.1", 300 | "--enr-udp-port", `{{PortUDP "p2p" 9000}}`, 301 | "--enr-tcp-port", `{{Port "p2p" 9000}}`, 302 | "--enr-quic-port", `{{Port "quic-p2p" 9100}}`, 303 | "--port", `{{Port "p2p" 9000}}`, 304 | "--quic-port", `{{Port "quic-p2p" 9100}}`, 305 | "--http", 306 | "--http-port", `{{Port "http" 3500}}`, 307 | "--http-address", "0.0.0.0", 308 | "--http-allow-origin", "*", 309 | "--disable-packet-filter", 310 | "--target-peers", "0", 311 | "--execution-endpoint", Connect(l.ExecutionNode, "authrpc"), 312 | "--execution-jwt", "/data/jwtsecret", 313 | "--always-prepare-payload", 314 | "--prepare-payload-lookahead", "8000", 315 | "--suggested-fee-recipient", "0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990", 316 | ). 317 | WithArtifact("/data/testnet-dir", "testnet"). 318 | WithArtifact("/data/jwtsecret", "jwtsecret"). 319 | WithVolume("data", "/data_beacon"). 320 | WithReady(ReadyCheck{ 321 | QueryURL: "http://localhost:3500/eth/v1/node/syncing", 322 | Interval: 1 * time.Second, 323 | Timeout: 30 * time.Second, 324 | Retries: 3, 325 | StartPeriod: 1 * time.Second, 326 | }) 327 | 328 | if l.MevBoostNode != "" { 329 | svc.WithArgs( 330 | "--builder", Connect(l.MevBoostNode, "http"), 331 | "--builder-fallback-epochs-since-finalization", "0", 332 | "--builder-fallback-disable-checks", 333 | ) 334 | } 335 | } 336 | 337 | func (l *LighthouseBeaconNode) Name() string { 338 | return "lighthouse-beacon-node" 339 | } 340 | 341 | type LighthouseValidator struct { 342 | BeaconNode string 343 | } 344 | 345 | func (l *LighthouseValidator) Run(service *Service, ctx *ExContext) { 346 | // start validator client 347 | service. 348 | WithImage("sigp/lighthouse"). 349 | WithTag("v7.0.0-beta.0"). 350 | WithEntrypoint("lighthouse"). 351 | WithArgs( 352 | "vc", 353 | "--datadir", "/data/validator", 354 | "--testnet-dir", "/data/testnet-dir", 355 | "--init-slashing-protection", 356 | "--beacon-nodes", Connect(l.BeaconNode, "http"), 357 | "--suggested-fee-recipient", "0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990", 358 | "--builder-proposals", 359 | "--prefer-builder-proposals", 360 | ). 361 | WithArtifact("/data/validator", "data_validator"). 362 | WithArtifact("/data/testnet-dir", "testnet") 363 | } 364 | 365 | func (l *LighthouseValidator) Name() string { 366 | return "lighthouse-validator" 367 | } 368 | 369 | type ClProxy struct { 370 | PrimaryBuilder string 371 | SecondaryBuilder string 372 | } 373 | 374 | func (c *ClProxy) Run(service *Service, ctx *ExContext) { 375 | service. 376 | WithImage("docker.io/flashbots/playground-utils"). 377 | WithTag("latest"). 378 | WithEntrypoint("cl-proxy"). 379 | WithArgs( 380 | "--primary-builder", Connect(c.PrimaryBuilder, "authrpc"), 381 | "--secondary-builder", c.SecondaryBuilder, 382 | "--port", `{{Port "authrpc" 5656}}`, 383 | ) 384 | } 385 | 386 | func (c *ClProxy) Name() string { 387 | return "cl-proxy" 388 | } 389 | 390 | type MevBoostRelay struct { 391 | BeaconClient string 392 | ValidationServer string 393 | } 394 | 395 | func (m *MevBoostRelay) Run(service *Service, ctx *ExContext) { 396 | service. 397 | WithImage("docker.io/flashbots/playground-utils"). 398 | WithTag("latest"). 399 | WithEnv("ALLOW_SYNCING_BEACON_NODE", "1"). 400 | WithEntrypoint("mev-boost-relay"). 401 | DependsOnHealthy(m.BeaconClient). 402 | WithArgs( 403 | "--api-listen-addr", "0.0.0.0", 404 | "--api-listen-port", `{{Port "http" 5555}}`, 405 | "--beacon-client-addr", Connect(m.BeaconClient, "http"), 406 | ) 407 | 408 | if m.ValidationServer != "" { 409 | service.WithArgs("--validation-server-addr", Connect(m.ValidationServer, "http")) 410 | } 411 | } 412 | 413 | func (m *MevBoostRelay) Name() string { 414 | return "mev-boost-relay" 415 | } 416 | 417 | var _ ServiceWatchdog = &MevBoostRelay{} 418 | 419 | func (m *MevBoostRelay) Watchdog(out io.Writer, instance *instance, ctx context.Context) error { 420 | beaconNodeURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) 421 | 422 | watchGroup := newWatchGroup() 423 | watchGroup.watch(func() error { 424 | return watchProposerPayloads(beaconNodeURL) 425 | }) 426 | watchGroup.watch(func() error { 427 | return validateProposerPayloads(out, beaconNodeURL) 428 | }) 429 | 430 | return watchGroup.wait() 431 | } 432 | 433 | type BuilderHubPostgres struct { 434 | } 435 | 436 | func (b *BuilderHubPostgres) Run(service *Service, ctx *ExContext) { 437 | service. 438 | WithImage("docker.io/flashbots/builder-hub-db"). 439 | WithTag("latest"). 440 | WithPort("postgres", 5432). 441 | WithEnv("POSTGRES_USER", "postgres"). 442 | WithEnv("POSTGRES_PASSWORD", "postgres"). 443 | WithEnv("POSTGRES_DB", "postgres"). 444 | WithReady(ReadyCheck{ 445 | Test: []string{"CMD-SHELL", "pg_isready -U postgres -d postgres"}, 446 | Interval: 1 * time.Second, 447 | Timeout: 30 * time.Second, 448 | Retries: 3, 449 | StartPeriod: 1 * time.Second, 450 | }) 451 | } 452 | 453 | func (b *BuilderHubPostgres) Name() string { 454 | return "builder-hub-postgres" 455 | } 456 | 457 | type BuilderHub struct { 458 | postgres string 459 | } 460 | 461 | func (b *BuilderHub) Run(service *Service, ctx *ExContext) { 462 | service. 463 | WithImage("docker.io/flashbots/builder-hub"). 464 | WithTag("latest"). 465 | WithEntrypoint("/app/builder-hub"). 466 | WithEnv("POSTGRES_DSN", ConnectRaw(b.postgres, "postgres", "postgres", "postgres:postgres")+"/postgres?sslmode=disable"). 467 | WithEnv("LISTEN_ADDR", "0.0.0.0:"+`{{Port "http" 8080}}`). 468 | WithEnv("ADMIN_ADDR", "0.0.0.0:"+`{{Port "admin" 8081}}`). 469 | WithEnv("INTERNAL_ADDR", "0.0.0.0:"+`{{Port "internal" 8082}}`). 470 | WithEnv("METRICS_ADDR", "0.0.0.0:"+`{{Port "metrics" 8090}}`). 471 | DependsOnHealthy(b.postgres) 472 | } 473 | 474 | func (b *BuilderHub) Name() string { 475 | return "builder-hub" 476 | } 477 | 478 | type BuilderHubMockProxy struct { 479 | TargetService string 480 | } 481 | 482 | func (b *BuilderHubMockProxy) Run(service *Service, ctx *ExContext) { 483 | service. 484 | WithImage("docker.io/flashbots/builder-hub-mock-proxy"). 485 | WithTag("latest"). 486 | WithPort("http", 8888) 487 | 488 | if b.TargetService != "" { 489 | service.DependsOnHealthy(b.TargetService) 490 | } 491 | } 492 | 493 | func (b *BuilderHubMockProxy) Name() string { 494 | return "builder-hub-mock-proxy" 495 | } 496 | 497 | type OpReth struct { 498 | } 499 | 500 | func (o *OpReth) Run(service *Service, ctx *ExContext) { 501 | service.WithImage("ghcr.io/paradigmxyz/op-reth"). 502 | WithTag("nightly"). 503 | WithEntrypoint("op-reth"). 504 | WithArgs( 505 | "node", 506 | "--authrpc.port", `{{Port "authrpc" 8551}}`, 507 | "--authrpc.addr", "0.0.0.0", 508 | "--authrpc.jwtsecret", "/data/jwtsecret", 509 | "--http", 510 | "--http.addr", "0.0.0.0", 511 | "--http.port", `{{Port "http" 8545}}`, 512 | "--chain", "/data/l2-genesis.json", 513 | "--datadir", "/data_op_reth", 514 | "--disable-discovery", 515 | "--color", "never", 516 | "--metrics", `0.0.0.0:{{Port "metrics" 9090}}`, 517 | "--port", `{{Port "rpc" 30303}}`). 518 | WithArtifact("/data/jwtsecret", "jwtsecret"). 519 | WithArtifact("/data/l2-genesis.json", "l2-genesis.json"). 520 | WithVolume("data", "/data_op_reth") 521 | } 522 | 523 | func (o *OpReth) Name() string { 524 | return "op-reth" 525 | } 526 | 527 | func (o *OpReth) ReleaseArtifact() *release { 528 | return &release{ 529 | Name: "op-reth", 530 | Repo: "reth", 531 | Org: "paradigmxyz", 532 | Version: "v1.3.12", 533 | Arch: func(goos, goarch string) string { 534 | if goos == "linux" { 535 | return "x86_64-unknown-linux-gnu" 536 | } else if goos == "darwin" && goarch == "arm64" { // Apple M1 537 | return "aarch64-apple-darwin" 538 | } else if goos == "darwin" && goarch == "amd64" { 539 | return "x86_64-apple-darwin" 540 | } 541 | return "" 542 | }, 543 | } 544 | } 545 | 546 | var _ ServiceWatchdog = &OpReth{} 547 | 548 | func (p *OpReth) Watchdog(out io.Writer, instance *instance, ctx context.Context) error { 549 | rethURL := fmt.Sprintf("http://localhost:%d", instance.service.MustGetPort("http").HostPort) 550 | return watchChainHead(out, rethURL, 2*time.Second) 551 | } 552 | 553 | type nullService struct { 554 | } 555 | 556 | func (n *nullService) Run(service *Service, ctx *ExContext) { 557 | } 558 | 559 | func (n *nullService) Name() string { 560 | return "null" 561 | } 562 | -------------------------------------------------------------------------------- /playground/config.yaml.tmpl: -------------------------------------------------------------------------------- 1 | CONFIG_NAME: devnet 2 | PRESET_BASE: mainnet 3 | 4 | # Genesis 5 | GENESIS_FORK_VERSION: 0x20000089 6 | 7 | # Altair 8 | ALTAIR_FORK_EPOCH: 0 9 | ALTAIR_FORK_VERSION: 0x20000090 10 | 11 | # Merge 12 | BELLATRIX_FORK_EPOCH: 0 13 | BELLATRIX_FORK_VERSION: 0x20000091 14 | TERMINAL_TOTAL_DIFFICULTY: 0 15 | 16 | # Capella 17 | CAPELLA_FORK_EPOCH: 0 18 | CAPELLA_FORK_VERSION: 0x20000092 19 | MAX_WITHDRAWALS_PER_PAYLOAD: 16 20 | 21 | # Deneb 22 | DENEB_FORK_EPOCH: 0 23 | DENEB_FORK_VERSION: 0x20000093 24 | 25 | # Electra (not enabled by default) 26 | ELECTRA_FORK_EPOCH: {{.LatestForkEpoch}} 27 | ELECTRA_FORK_VERSION: 0x20000094 28 | 29 | # Fulu (not enabled at all yet) 30 | FULU_FORK_EPOCH: 18446744073709551615 31 | FULU_FORK_VERSION: 0x20000095 32 | 33 | # Time parameters 34 | SECONDS_PER_SLOT: 12 35 | 36 | # Deposit contract 37 | DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 38 | DEPOSIT_CHAIN_ID: 1337 39 | DEPOSIT_NETWORK_ID: 1337 40 | -------------------------------------------------------------------------------- /playground/genesis_op.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "math/big" 7 | 8 | "github.com/ethereum/go-ethereum/common" 9 | "github.com/ethereum/go-ethereum/core" 10 | "github.com/ethereum/go-ethereum/core/rawdb" 11 | "github.com/ethereum/go-ethereum/core/state" 12 | "github.com/ethereum/go-ethereum/core/tracing" 13 | "github.com/ethereum/go-ethereum/core/types" 14 | "github.com/ethereum/go-ethereum/params" 15 | "github.com/ethereum/go-ethereum/trie" 16 | "github.com/ethereum/go-ethereum/triedb" 17 | "github.com/ethereum/go-ethereum/triedb/pathdb" 18 | "github.com/holiman/uint256" 19 | ) 20 | 21 | var ( 22 | // The L2 withdrawals contract predeploy address 23 | optimismL2ToL1MessagePasser = common.HexToAddress("0x4200000000000000000000000000000000000016") 24 | ) 25 | 26 | type OpChainConfig struct { 27 | *params.ChainConfig 28 | 29 | IsthmusTime *uint64 `json:"isthmusTime,omitempty"` // Isthmus switch time (nil = no fork, 0 = already on Optimism Isthmus) 30 | } 31 | 32 | // isTimestampForked returns whether a fork scheduled at timestamp s is active 33 | // at the given head timestamp. Whilst this method is the same as isBlockForked, 34 | // they are explicitly separate for clearer reading. 35 | func isTimestampForked(s *uint64, head uint64) bool { 36 | if s == nil { 37 | return false 38 | } 39 | return *s <= head 40 | } 41 | 42 | // IsOptimism returns whether the node is an optimism node or not. 43 | func (c *OpChainConfig) IsOptimism() bool { 44 | return true 45 | } 46 | 47 | func (c *OpChainConfig) IsIsthmus(time uint64) bool { 48 | return isTimestampForked(c.IsthmusTime, time) 49 | } 50 | 51 | func (c *OpChainConfig) IsOptimismIsthmus(time uint64) bool { 52 | return c.IsOptimism() && c.IsIsthmus(time) 53 | } 54 | 55 | func (c *OpChainConfig) HasOptimismWithdrawalsRoot(blockTime uint64) bool { 56 | return c.IsOptimismIsthmus(blockTime) 57 | } 58 | 59 | // OpGenesis is the extension of the core.Genesis struct with additional fields 60 | // for the OP stack. There are problems doing json.Unmarshal if we embed the core.Genesis struct 61 | type OpGenesis struct { 62 | Config *OpChainConfig `json:"config"` 63 | } 64 | 65 | func toOpBlock(content []byte) (*types.Block, error) { 66 | var g core.Genesis 67 | if err := json.Unmarshal(content, &g); err != nil { 68 | return nil, err 69 | } 70 | var g1 OpGenesis 71 | if err := json.Unmarshal(content, &g1); err != nil { 72 | return nil, err 73 | } 74 | 75 | var stateRoot, storageRootMessagePasser common.Hash 76 | var err error 77 | if stateRoot, storageRootMessagePasser, err = hashAlloc(&g.Alloc, g.IsVerkle(), g1.Config.IsOptimismIsthmus(g.Timestamp)); err != nil { 78 | return nil, err 79 | } 80 | return g1.toBlockWithRoot(&g, stateRoot, storageRootMessagePasser), nil 81 | } 82 | 83 | // toBlockWithRoot constructs the genesis block with the given genesis state root. 84 | func (g1 *OpGenesis) toBlockWithRoot(g *core.Genesis, stateRoot, storageRootMessagePasser common.Hash) *types.Block { 85 | opConfig := g1.Config 86 | 87 | head := &types.Header{ 88 | Number: new(big.Int).SetUint64(g.Number), 89 | Nonce: types.EncodeNonce(g.Nonce), 90 | Time: g.Timestamp, 91 | ParentHash: g.ParentHash, 92 | Extra: g.ExtraData, 93 | GasLimit: g.GasLimit, 94 | GasUsed: g.GasUsed, 95 | BaseFee: g.BaseFee, 96 | Difficulty: g.Difficulty, 97 | MixDigest: g.Mixhash, 98 | Coinbase: g.Coinbase, 99 | Root: stateRoot, 100 | } 101 | if g.GasLimit == 0 { 102 | head.GasLimit = params.GenesisGasLimit 103 | } 104 | if g.Difficulty == nil { 105 | if g.Config != nil && g.Config.Ethash == nil { 106 | head.Difficulty = big.NewInt(0) 107 | } else if g.Mixhash == (common.Hash{}) { 108 | head.Difficulty = params.GenesisDifficulty 109 | } 110 | } 111 | if g.Config != nil && g.Config.IsLondon(common.Big0) { 112 | if g.BaseFee != nil { 113 | head.BaseFee = g.BaseFee 114 | } else { 115 | head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee) 116 | } 117 | } 118 | var withdrawals []*types.Withdrawal 119 | if conf := g.Config; conf != nil { 120 | num := big.NewInt(int64(g.Number)) 121 | if conf.IsShanghai(num, g.Timestamp) { 122 | head.WithdrawalsHash = &types.EmptyWithdrawalsHash 123 | withdrawals = make([]*types.Withdrawal, 0) 124 | } 125 | if conf.IsCancun(num, g.Timestamp) { 126 | // EIP-4788: The parentBeaconBlockRoot of the genesis block is always 127 | // the zero hash. This is because the genesis block does not have a parent 128 | // by definition. 129 | head.ParentBeaconRoot = new(common.Hash) 130 | // EIP-4844 fields 131 | head.ExcessBlobGas = g.ExcessBlobGas 132 | head.BlobGasUsed = g.BlobGasUsed 133 | if head.ExcessBlobGas == nil { 134 | head.ExcessBlobGas = new(uint64) 135 | } 136 | if head.BlobGasUsed == nil { 137 | head.BlobGasUsed = new(uint64) 138 | } 139 | } 140 | if conf.IsPrague(num, g.Timestamp) { 141 | head.RequestsHash = &types.EmptyRequestsHash 142 | } 143 | // If Isthmus is active at genesis, set the WithdrawalRoot to the storage root of the L2ToL1MessagePasser contract. 144 | if opConfig.IsOptimismIsthmus(g.Timestamp) { 145 | if storageRootMessagePasser == (common.Hash{}) { 146 | // if there was no MessagePasser contract storage, set the WithdrawalsHash to the empty hash 147 | storageRootMessagePasser = types.EmptyWithdrawalsHash 148 | } 149 | head.WithdrawalsHash = &storageRootMessagePasser 150 | } 151 | } 152 | 153 | // keep a copy of the withdrawals hash because it gets overwritten in the next step 154 | withdrawalsHash := common.Hash{} 155 | copy(withdrawalsHash[:], head.WithdrawalsHash[:]) 156 | 157 | block := types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil)) 158 | 159 | { 160 | // add the Ishtmus changes 161 | header := block.Header() 162 | if opConfig.HasOptimismWithdrawalsRoot(header.Time) { 163 | if withdrawals == nil || len(withdrawals) > 0 { 164 | panic(fmt.Sprintf("expected non-nil empty withdrawals operation list in Isthmus")) 165 | } 166 | header.WithdrawalsHash = &withdrawalsHash 167 | } else if withdrawals == nil { 168 | // pre-Canyon 169 | header.WithdrawalsHash = nil 170 | } else if len(withdrawals) == 0 { 171 | header.WithdrawalsHash = &types.EmptyWithdrawalsHash 172 | } else { 173 | hash := types.DeriveSha(types.Withdrawals(withdrawals), trie.NewStackTrie(nil)) 174 | header.WithdrawalsHash = &hash 175 | } 176 | block = types.NewBlockWithHeader(header) 177 | } 178 | 179 | return block 180 | } 181 | 182 | // hashAlloc returns the following: 183 | // * computed state root according to the genesis specification. 184 | // * storage root of the L2ToL1MessagePasser contract. 185 | // * error if any, when committing the genesis state (if so, state root and storage root will be empty). 186 | func hashAlloc(ga *types.GenesisAlloc, isVerkle, isIsthmus bool) (common.Hash, common.Hash, error) { 187 | // If a genesis-time verkle trie is requested, create a trie config 188 | // with the verkle trie enabled so that the tree can be initialized 189 | // as such. 190 | var config *triedb.Config 191 | if isVerkle { 192 | config = &triedb.Config{ 193 | PathDB: pathdb.Defaults, 194 | IsVerkle: true, 195 | } 196 | } 197 | // Create an ephemeral in-memory database for computing hash, 198 | // all the derived states will be discarded to not pollute disk. 199 | emptyRoot := types.EmptyRootHash 200 | if isVerkle { 201 | emptyRoot = types.EmptyVerkleHash 202 | } 203 | db := rawdb.NewMemoryDatabase() 204 | statedb, err := state.New(emptyRoot, state.NewDatabase(triedb.NewDatabase(db, config), nil)) 205 | if err != nil { 206 | return common.Hash{}, common.Hash{}, err 207 | } 208 | for addr, account := range *ga { 209 | if account.Balance != nil { 210 | statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance) 211 | } 212 | statedb.SetCode(addr, account.Code) 213 | statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis) 214 | for key, value := range account.Storage { 215 | statedb.SetState(addr, key, value) 216 | } 217 | } 218 | 219 | stateRoot, err := statedb.Commit(0, false, false) 220 | if err != nil { 221 | return common.Hash{}, common.Hash{}, err 222 | } 223 | // get the storage root of the L2ToL1MessagePasser contract 224 | var storageRootMessagePasser common.Hash 225 | if isIsthmus { 226 | storageRootMessagePasser = statedb.GetStorageRoot(optimismL2ToL1MessagePasser) 227 | } 228 | 229 | return stateRoot, storageRootMessagePasser, nil 230 | } 231 | -------------------------------------------------------------------------------- /playground/genesis_op_test.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "encoding/json" 5 | "os" 6 | "testing" 7 | 8 | "github.com/ethereum/go-ethereum/common" 9 | ) 10 | 11 | func TestOpGenesisIshtmus(t *testing.T) { 12 | data, err := os.ReadFile("./testcases/l2_genesis_ishtmus.json") 13 | if err != nil { 14 | t.Fatalf("failed to read file: %v", err) 15 | } 16 | var opGenesisObj OpGenesis 17 | if err := json.Unmarshal(data, &opGenesisObj); err != nil { 18 | t.Fatalf("failed to unmarshal genesis: %v", err) 19 | } 20 | 21 | opBlock, err := toOpBlock(data) 22 | if err != nil { 23 | t.Fatalf("failed to convert to op block: %v", err) 24 | } 25 | 26 | expected := common.HexToHash("0x6c2f6ce3e748bd0b0717a48e5e3d223258a7d0135bc95f758fc90f6e44813ab9") 27 | if opBlock.Hash() != expected { 28 | t.Fatalf("expected hash %s, got %s", expected.Hex(), opBlock.Hash().Hex()) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /playground/inspect.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "os" 8 | 9 | "github.com/docker/docker/api/types/container" 10 | "github.com/docker/docker/api/types/filters" 11 | "github.com/docker/docker/client" 12 | ) 13 | 14 | // Inspect incldues the logic for the inspect command 15 | func Inspect(ctx context.Context, serviceName, portName string) error { 16 | client, err := newDockerClient() 17 | if err != nil { 18 | return fmt.Errorf("failed to create docker client: %w", err) 19 | } 20 | 21 | serviceID, portNum, err := retrieveContainerDetails(client, serviceName, portName) 22 | if err != nil { 23 | return fmt.Errorf("failed to retrieve container details: %w", err) 24 | } 25 | 26 | return runTcpFlow(ctx, client, serviceID, portNum) 27 | } 28 | 29 | func retrieveContainerDetails(client *client.Client, serviceName, portName string) (string, string, error) { 30 | // Get the service by name 31 | containers, err := client.ContainerList(context.Background(), container.ListOptions{ 32 | Filters: filters.NewArgs(filters.Arg("label", "service="+serviceName)), 33 | All: true, 34 | }) 35 | if err != nil { 36 | return "", "", fmt.Errorf("error getting container list: %w", err) 37 | } 38 | 39 | size := len(containers) 40 | if size == 0 { 41 | return "", "", fmt.Errorf("no containers found for service %s", serviceName) 42 | } else if size > 1 { 43 | return "", "", fmt.Errorf("multiple containers found for service %s", serviceName) 44 | } 45 | 46 | container := containers[0] 47 | 48 | // Get the container details to find the port mapping in the labels as port. 49 | containerDetails, err := client.ContainerInspect(context.Background(), container.ID) 50 | if err != nil { 51 | return "", "", fmt.Errorf("error inspecting container %s: %w", container.ID, err) 52 | } 53 | 54 | // Check if the port name is in the labels 55 | portLabel := fmt.Sprintf("port.%s", portName) 56 | portNum, ok := containerDetails.Config.Labels[portLabel] 57 | if !ok { 58 | return "", "", fmt.Errorf("port %s not found in container %s", portName, container.ID) 59 | } 60 | 61 | return container.ID, portNum, nil 62 | } 63 | 64 | func runTcpFlow(ctx context.Context, client *client.Client, containerID, portName string) error { 65 | // Create container config for tcpflow 66 | config := &container.Config{ 67 | Image: "appropriate/tcpflow:latest", 68 | Cmd: []string{"-c", "-p", "-i", "eth0", "port", portName}, 69 | Tty: true, 70 | AttachStdout: true, 71 | AttachStderr: true, 72 | } 73 | 74 | // Host config with network mode and capabilities 75 | hostConfig := &container.HostConfig{ 76 | NetworkMode: container.NetworkMode("container:" + containerID), 77 | CapAdd: []string{"NET_ADMIN"}, 78 | } 79 | 80 | // Create the container 81 | resp, err := client.ContainerCreate(ctx, config, hostConfig, nil, nil, "") 82 | if err != nil { 83 | return fmt.Errorf("failed to create container: %w", err) 84 | } 85 | 86 | // Start the container 87 | if err := client.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { 88 | return fmt.Errorf("failed to start container: %w", err) 89 | } 90 | 91 | // Get container logs and stream them 92 | logOptions := container.LogsOptions{ 93 | ShowStdout: true, 94 | ShowStderr: true, 95 | Follow: true, 96 | Timestamps: false, 97 | } 98 | 99 | logs, err := client.ContainerLogs(ctx, resp.ID, logOptions) 100 | if err != nil { 101 | return fmt.Errorf("failed to get container logs: %w", err) 102 | } 103 | defer logs.Close() 104 | 105 | // Start copying logs to stdout 106 | go func() { 107 | _, err := io.Copy(os.Stdout, logs) 108 | if err != nil && err != io.EOF { 109 | fmt.Fprintf(os.Stderr, "Error copying logs: %v\n", err) 110 | } 111 | }() 112 | 113 | // Wait for interrupt signal 114 | <-ctx.Done() 115 | 116 | // Cleanup: stop and remove the container 117 | timeout := 5 118 | if err := client.ContainerStop(context.Background(), resp.ID, container.StopOptions{Timeout: &timeout}); err != nil { 119 | fmt.Fprintf(os.Stderr, "Error stopping container: %v\n", err) 120 | } 121 | 122 | if err := client.ContainerRemove(context.Background(), resp.ID, container.RemoveOptions{Force: true}); err != nil { 123 | fmt.Fprintf(os.Stderr, "Error removing container: %v\n", err) 124 | } 125 | 126 | return nil 127 | } 128 | -------------------------------------------------------------------------------- /playground/local_runner.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "net" 8 | "os" 9 | "os/exec" 10 | "path/filepath" 11 | "runtime" 12 | "strings" 13 | "sync" 14 | "text/template" 15 | "time" 16 | 17 | "github.com/charmbracelet/bubbles/spinner" 18 | "github.com/charmbracelet/lipgloss" 19 | "github.com/docker/docker/api/types/container" 20 | "github.com/docker/docker/api/types/events" 21 | "github.com/docker/docker/api/types/filters" 22 | "github.com/docker/docker/client" 23 | "github.com/docker/docker/pkg/stdcopy" 24 | "github.com/ethereum/go-ethereum/log" 25 | "github.com/google/uuid" 26 | "gopkg.in/yaml.v2" 27 | ) 28 | 29 | const defaultNetworkName = "ethplayground" 30 | 31 | // LocalRunner is a component that runs the services from the manifest on the local host machine. 32 | // By default, it uses docker and docker compose to run all the services. 33 | // But, some services (if they are configured to do so) can be run on the host machine instead. 34 | // When running inside docker, each service will use the port numbers they define in the component description. 35 | // Besides, they will also bind to an available public port on the host machine. 36 | // If the service runs on the host, it will use the host port numbers instead directly. 37 | type LocalRunner struct { 38 | out *output 39 | manifest *Manifest 40 | client *client.Client 41 | 42 | // reservedPorts is a map of port numbers reserved for each service to avoid conflicts 43 | // since we reserve ports for all the services before they are used 44 | reservedPorts map[int]bool 45 | 46 | // overrides is a map of service name to the path of the executable to run 47 | // on the host machine instead of a container. 48 | overrides map[string]string 49 | 50 | // handles stores the references to the processes that are running on host machine 51 | // they are executed sequentially so we do not need to lock the handles 52 | handles []*exec.Cmd 53 | 54 | // exitError signals when one of the services fails 55 | exitErr chan error 56 | 57 | // signals whether we are running in interactive mode 58 | interactive bool 59 | 60 | // TODO: Merge instance with tasks 61 | instances []*instance 62 | 63 | // tasks tracks the status of each service 64 | tasksMtx sync.Mutex 65 | tasks map[string]*task 66 | taskUpdateCh chan struct{} 67 | 68 | // wether to bind the ports to the local interface 69 | bindHostPortsLocally bool 70 | 71 | // sessionID is a random sequence that is used to identify the session 72 | // it is used to identify the containers in the cleanup process 73 | sessionID string 74 | 75 | // networkName is the name of the network to use for the services 76 | networkName string 77 | 78 | // labels is the list of labels to apply to each resource being created 79 | labels map[string]string 80 | 81 | // logInternally outputs the logs of the service to the artifacts folder 82 | logInternally bool 83 | } 84 | 85 | type task struct { 86 | status string 87 | ready bool 88 | logs *os.File 89 | } 90 | 91 | var ( 92 | taskStatusPending = "pending" 93 | taskStatusStarted = "started" 94 | taskStatusDie = "die" 95 | taskStatusHealthy = "healthy" 96 | ) 97 | 98 | type taskUI struct { 99 | tasks map[string]string 100 | spinners map[string]spinner.Model 101 | style lipgloss.Style 102 | } 103 | 104 | func newDockerClient() (*client.Client, error) { 105 | client, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) 106 | if err != nil { 107 | return nil, fmt.Errorf("failed to create docker client: %w", err) 108 | } 109 | return client, nil 110 | } 111 | 112 | // TODO: add a runner config struct 113 | func NewLocalRunner(out *output, manifest *Manifest, overrides map[string]string, interactive bool, bindHostPortsLocally bool, networkName string, labels map[string]string, logInternally bool) (*LocalRunner, error) { 114 | client, err := newDockerClient() 115 | if err != nil { 116 | return nil, fmt.Errorf("failed to create docker client: %w", err) 117 | } 118 | 119 | // merge the overrides with the manifest overrides 120 | if overrides == nil { 121 | overrides = make(map[string]string) 122 | } 123 | for k, v := range manifest.overrides { 124 | overrides[k] = v 125 | } 126 | 127 | // Create the concrete instances to run 128 | instances := []*instance{} 129 | for _, service := range manifest.Services { 130 | component := FindComponent(service.ComponentName) 131 | if component == nil { 132 | return nil, fmt.Errorf("component not found '%s'", service.ComponentName) 133 | } 134 | instance := &instance{ 135 | service: service, 136 | component: component, 137 | } 138 | if logInternally { 139 | log_output, err := out.LogOutput(service.Name) 140 | if err != nil { 141 | return nil, fmt.Errorf("error getting log output: %w", err) 142 | } 143 | instance.logs = &serviceLogs{ 144 | logRef: log_output, 145 | path: log_output.Name(), 146 | } 147 | } 148 | instances = append(instances, instance) 149 | } 150 | 151 | // download any local release artifacts for the services that require them 152 | // TODO: it feels a bit weird to have all this logic on the new command. We should split it later on. 153 | for _, instance := range instances { 154 | ss := instance.service 155 | if ss.Labels[useHostExecutionLabel] == "true" { 156 | // If the service wants to run on the host, it must implement the ReleaseService interface 157 | // which provides functions to download the release artifact. 158 | releaseService, ok := instance.component.(ReleaseService) 159 | if !ok { 160 | return nil, fmt.Errorf("service '%s' must implement the ReleaseService interface", ss.Name) 161 | } 162 | releaseArtifact := releaseService.ReleaseArtifact() 163 | bin, err := DownloadRelease(out.homeDir, releaseArtifact) 164 | if err != nil { 165 | return nil, fmt.Errorf("failed to download release artifact for service '%s': %w", ss.Name, err) 166 | } 167 | overrides[ss.Name] = bin 168 | } 169 | } 170 | 171 | // Now, the override can either be one of two things (we are overloading the override map): 172 | // - docker image: In that case, change the manifest and remove from override map 173 | // - a path to an executable: In that case, we need to run it on the host machine 174 | // and use the override map <- We only check this case, and if it is not a path, we assume 175 | // it is a docker image. If it is not a docker image either, the error will be catched during the execution 176 | for k, v := range overrides { 177 | if _, err := os.Stat(v); err != nil { 178 | // this is a path to an executable, remove it from the overrides since we 179 | // assume it s a docker image and add it to manifest 180 | parts := strings.Split(v, ":") 181 | if len(parts) != 2 { 182 | return nil, fmt.Errorf("invalid override docker image %s, expected image:tag", v) 183 | } 184 | 185 | srv := manifest.MustGetService(k) 186 | srv.Image = parts[0] 187 | srv.Tag = parts[1] 188 | 189 | delete(overrides, k) 190 | continue 191 | } 192 | } 193 | 194 | tasks := map[string]*task{} 195 | for _, svc := range manifest.Services { 196 | tasks[svc.Name] = &task{ 197 | status: taskStatusPending, 198 | logs: nil, 199 | } 200 | } 201 | 202 | if networkName == "" { 203 | networkName = defaultNetworkName 204 | } 205 | d := &LocalRunner{ 206 | out: out, 207 | manifest: manifest, 208 | client: client, 209 | reservedPorts: map[int]bool{}, 210 | overrides: overrides, 211 | handles: []*exec.Cmd{}, 212 | tasks: tasks, 213 | taskUpdateCh: make(chan struct{}), 214 | exitErr: make(chan error, 2), 215 | bindHostPortsLocally: bindHostPortsLocally, 216 | sessionID: uuid.New().String(), 217 | networkName: networkName, 218 | instances: instances, 219 | labels: labels, 220 | logInternally: logInternally, 221 | } 222 | 223 | if interactive { 224 | go d.printStatus() 225 | 226 | select { 227 | case d.taskUpdateCh <- struct{}{}: 228 | default: 229 | } 230 | } 231 | 232 | return d, nil 233 | } 234 | 235 | func (d *LocalRunner) Instances() []*instance { 236 | return d.instances 237 | } 238 | 239 | func (d *LocalRunner) printStatus() { 240 | fmt.Print("\033[s") 241 | lineOffset := 0 242 | 243 | // Get ordered service names from manifest 244 | orderedServices := make([]string, 0, len(d.manifest.Services)) 245 | for _, svc := range d.manifest.Services { 246 | orderedServices = append(orderedServices, svc.Name) 247 | } 248 | 249 | // Initialize UI state 250 | ui := taskUI{ 251 | tasks: make(map[string]string), 252 | spinners: make(map[string]spinner.Model), 253 | style: lipgloss.NewStyle(), 254 | } 255 | 256 | // Initialize spinners for each service 257 | for _, name := range orderedServices { 258 | sp := spinner.New() 259 | sp.Spinner = spinner.Dot 260 | ui.spinners[name] = sp 261 | } 262 | 263 | for { 264 | select { 265 | case <-d.taskUpdateCh: 266 | d.tasksMtx.Lock() 267 | 268 | // Clear the previous lines and move cursor up 269 | if lineOffset > 0 { 270 | fmt.Printf("\033[%dA", lineOffset) 271 | fmt.Print("\033[J") 272 | } 273 | 274 | lineOffset = 0 275 | // Use ordered services instead of ranging over map 276 | for _, name := range orderedServices { 277 | status := d.tasks[name].status 278 | var statusLine string 279 | 280 | switch status { 281 | case taskStatusStarted: 282 | sp := ui.spinners[name] 283 | sp.Tick() 284 | ui.spinners[name] = sp 285 | statusLine = ui.style.Foreground(lipgloss.Color("2")).Render(fmt.Sprintf("%s [%s] Running", sp.View(), name)) 286 | case taskStatusDie: 287 | statusLine = ui.style.Foreground(lipgloss.Color("1")).Render(fmt.Sprintf("✗ [%s] Failed", name)) 288 | case taskStatusPending: 289 | sp := ui.spinners[name] 290 | sp.Tick() 291 | ui.spinners[name] = sp 292 | statusLine = ui.style.Foreground(lipgloss.Color("3")).Render(fmt.Sprintf("%s [%s] Pending", sp.View(), name)) 293 | } 294 | 295 | fmt.Println(statusLine) 296 | lineOffset++ 297 | } 298 | 299 | d.tasksMtx.Unlock() 300 | } 301 | } 302 | } 303 | 304 | func (d *LocalRunner) AreReady() bool { 305 | d.tasksMtx.Lock() 306 | defer d.tasksMtx.Unlock() 307 | 308 | for name, task := range d.tasks { 309 | // ensure the task is not a host service 310 | if d.isHostService(name) { 311 | continue 312 | } 313 | 314 | // first ensure the task has started 315 | if task.status != taskStatusStarted { 316 | return false 317 | } 318 | 319 | // then ensure it is ready if it has a ready function 320 | svc := d.getService(name) 321 | if svc.ReadyCheck != nil { 322 | if !task.ready { 323 | return false 324 | } 325 | } 326 | } 327 | return true 328 | } 329 | 330 | func (d *LocalRunner) WaitForReady(ctx context.Context, timeout time.Duration) error { 331 | for { 332 | select { 333 | case <-ctx.Done(): 334 | return ctx.Err() 335 | 336 | case <-time.After(1 * time.Second): 337 | if d.AreReady() { 338 | return nil 339 | } 340 | 341 | case err := <-d.exitErr: 342 | return err 343 | } 344 | } 345 | } 346 | 347 | func (d *LocalRunner) updateTaskStatus(name string, status string) { 348 | d.tasksMtx.Lock() 349 | defer d.tasksMtx.Unlock() 350 | if status == taskStatusHealthy { 351 | d.tasks[name].ready = true 352 | } else { 353 | d.tasks[name].status = status 354 | } 355 | 356 | if status == taskStatusDie { 357 | d.exitErr <- fmt.Errorf("container %s failed", name) 358 | } 359 | 360 | select { 361 | case d.taskUpdateCh <- struct{}{}: 362 | default: 363 | } 364 | } 365 | 366 | func (d *LocalRunner) ExitErr() <-chan error { 367 | return d.exitErr 368 | } 369 | 370 | func (d *LocalRunner) Stop() error { 371 | // only stop the containers that belong to this session 372 | containers, err := d.client.ContainerList(context.Background(), container.ListOptions{ 373 | Filters: filters.NewArgs(filters.Arg("label", fmt.Sprintf("playground.session=%s", d.sessionID))), 374 | }) 375 | if err != nil { 376 | return fmt.Errorf("error getting container list: %w", err) 377 | } 378 | 379 | var wg sync.WaitGroup 380 | wg.Add(len(containers)) 381 | 382 | var errCh chan error 383 | errCh = make(chan error, len(containers)) 384 | 385 | for _, cont := range containers { 386 | go func(contID string) { 387 | defer wg.Done() 388 | if err := d.client.ContainerRemove(context.Background(), contID, container.RemoveOptions{ 389 | RemoveVolumes: true, 390 | RemoveLinks: false, 391 | Force: true, 392 | }); err != nil { 393 | errCh <- fmt.Errorf("error removing container: %w", err) 394 | } 395 | }(cont.ID) 396 | } 397 | 398 | wg.Wait() 399 | 400 | // stop all the handles 401 | for _, handle := range d.handles { 402 | handle.Process.Kill() 403 | } 404 | 405 | close(errCh) 406 | 407 | for err := range errCh { 408 | if err != nil { 409 | return err 410 | } 411 | } 412 | 413 | return nil 414 | } 415 | 416 | // reservePort finds the first available port from the startPort and reserves it 417 | // Note that we have to keep track of the port in 'reservedPorts' because 418 | // the port allocation happens before the services uses it and binds to it. 419 | func (d *LocalRunner) reservePort(startPort int, protocol string) int { 420 | for i := startPort; i < startPort+1000; i++ { 421 | if _, ok := d.reservedPorts[i]; ok { 422 | continue 423 | } 424 | 425 | bindAddr := "0.0.0.0" 426 | if d.bindHostPortsLocally { 427 | bindAddr = "127.0.0.1" 428 | } 429 | 430 | if protocol == ProtocolUDP { 431 | listener, err := net.ListenUDP("udp", &net.UDPAddr{ 432 | Port: i, 433 | IP: net.ParseIP(bindAddr), 434 | }) 435 | if err != nil { 436 | continue 437 | } 438 | listener.Close() 439 | } else if protocol == ProtocolTCP { 440 | listener, err := net.Listen(protocol, fmt.Sprintf("%s:%d", bindAddr, i)) 441 | if err != nil { 442 | continue 443 | } 444 | listener.Close() 445 | } else { 446 | panic(fmt.Sprintf("invalid protocol: %s", protocol)) 447 | } 448 | 449 | d.reservedPorts[i] = true 450 | return i 451 | } 452 | panic("BUG: could not reserve a port") 453 | } 454 | 455 | func (d *LocalRunner) getService(name string) *Service { 456 | for _, svc := range d.manifest.Services { 457 | if svc.Name == name { 458 | return svc 459 | } 460 | } 461 | return nil 462 | } 463 | 464 | // applyTemplate resolves the templates from the manifest (Dir, Port, Connect) into 465 | // the actual values for this specific docker execution. 466 | func (d *LocalRunner) applyTemplate(s *Service) ([]string, map[string]string, error) { 467 | var input map[string]interface{} 468 | 469 | resolvePort := func(name string, defaultPort int, protocol string) int { 470 | // For {{Port "name" "defaultPort"}}: 471 | // - Service runs on host: return the host port 472 | // - Service runs inside docker: return the docker port 473 | if d.isHostService(s.Name) { 474 | return s.MustGetPort(name).HostPort 475 | } 476 | return defaultPort 477 | } 478 | 479 | funcs := template.FuncMap{ 480 | "Service": func(name string, portLabel, protocol, user string) string { 481 | // For {{Service "name" "portLabel"}}: 482 | // - Service runs on host: 483 | // A: target is inside docker: access with localhost:hostPort 484 | // B: target is on the host: access with localhost:hostPort 485 | // - Service runs inside docker: 486 | // C: target is inside docker: access it with DNS service:port 487 | // D: target is on the host: access it with host.docker.internal:hostPort 488 | 489 | // find the service and the port that it resolves for that label 490 | svc := d.manifest.MustGetService(name) 491 | port := svc.MustGetPort(portLabel) 492 | 493 | if d.isHostService(s.Name) { 494 | // A and B 495 | return printAddr(protocol, "localhost", port.HostPort, user) 496 | } else { 497 | if d.isHostService(svc.Name) { 498 | // D 499 | return printAddr(protocol, "host.docker.internal", port.HostPort, user) 500 | } 501 | // C 502 | return printAddr(protocol, svc.Name, port.Port, user) 503 | } 504 | }, 505 | "Port": func(name string, defaultPort int) int { 506 | return resolvePort(name, defaultPort, ProtocolTCP) 507 | }, 508 | "PortUDP": func(name string, defaultPort int) int { 509 | return resolvePort(name, defaultPort, ProtocolUDP) 510 | }, 511 | } 512 | 513 | runTemplate := func(arg string) (string, error) { 514 | tpl, err := template.New("").Funcs(funcs).Parse(arg) 515 | if err != nil { 516 | return "", err 517 | } 518 | 519 | var out strings.Builder 520 | if err := tpl.Execute(&out, input); err != nil { 521 | return "", err 522 | } 523 | 524 | return out.String(), nil 525 | } 526 | 527 | // apply the templates to the arguments 528 | var argsResult []string 529 | for _, arg := range s.Args { 530 | newArg, err := runTemplate(arg) 531 | if err != nil { 532 | return nil, nil, err 533 | } 534 | argsResult = append(argsResult, newArg) 535 | } 536 | 537 | // apply the templates to the environment variables 538 | envs := map[string]string{} 539 | for k, v := range s.Env { 540 | newV, err := runTemplate(v) 541 | if err != nil { 542 | return nil, nil, err 543 | } 544 | envs[k] = newV 545 | } 546 | 547 | return argsResult, envs, nil 548 | } 549 | 550 | func printAddr(protocol, serviceName string, port int, user string) string { 551 | var protocolPrefix string 552 | if protocol != "" { 553 | protocolPrefix = protocol + "://" 554 | } 555 | 556 | if user != "" { 557 | return fmt.Sprintf("%s%s@%s:%s", protocolPrefix, user, serviceName, serviceName) 558 | } 559 | 560 | return fmt.Sprintf("%s%s:%d", protocolPrefix, serviceName, port) 561 | } 562 | 563 | func (d *LocalRunner) validateImageExists(image string) error { 564 | // check locally 565 | _, err := d.client.ImageInspect(context.Background(), image) 566 | if err == nil { 567 | return nil 568 | } 569 | if !client.IsErrNotFound(err) { 570 | return err 571 | } 572 | 573 | // check remotely 574 | if _, err = d.client.DistributionInspect(context.Background(), image, ""); err == nil { 575 | return nil 576 | } 577 | if !client.IsErrNotFound(err) { 578 | return err 579 | } 580 | 581 | return fmt.Errorf("image %s not found", image) 582 | } 583 | 584 | func (d *LocalRunner) toDockerComposeService(s *Service) (map[string]interface{}, error) { 585 | // apply the template again on the arguments to figure out the connections 586 | // at this point all of them are valid, we just have to resolve them again. We assume for now 587 | // everyone is going to be on docker at the same network. 588 | args, envs, err := d.applyTemplate(s) 589 | if err != nil { 590 | return nil, fmt.Errorf("failed to apply template, err: %w", err) 591 | } 592 | 593 | // The containers have access to the full set of artifacts on the /artifacts folder 594 | // so, we have to bind it as a volume on the container. 595 | outputFolder, err := d.out.AbsoluteDstPath() 596 | if err != nil { 597 | return nil, fmt.Errorf("failed to get absolute path for output folder: %w", err) 598 | } 599 | 600 | // Validate that the image exists 601 | imageName := fmt.Sprintf("%s:%s", s.Image, s.Tag) 602 | if err := d.validateImageExists(imageName); err != nil { 603 | return nil, fmt.Errorf("failed to validate image %s: %w", imageName, err) 604 | } 605 | 606 | labels := map[string]string{ 607 | // It is important to use the playground label to identify the containers 608 | // during the cleanup process 609 | "playground": "true", 610 | "playground.session": d.sessionID, 611 | "service": s.Name, 612 | } 613 | 614 | // apply the user defined labels 615 | for k, v := range d.labels { 616 | labels[k] = v 617 | } 618 | 619 | // add the local ports exposed by the service as labels 620 | // we have to do this for now since we do not store the manifest in JSON yet. 621 | // Otherwise, we could use that directly 622 | for _, port := range s.Ports { 623 | labels[fmt.Sprintf("port.%s", port.Name)] = fmt.Sprintf("%d", port.Port) 624 | } 625 | 626 | // Use files mapped to figure out which files from the artifacts is using the service 627 | volumes := map[string]string{ 628 | outputFolder: "/artifacts", // placeholder 629 | } 630 | for k, v := range s.FilesMapped { 631 | volumes[filepath.Join(outputFolder, v)] = k 632 | } 633 | 634 | // create the bind volumes 635 | for localPath, volumeName := range s.VolumesMapped { 636 | volumeDirAbsPath, err := d.createVolume(s.Name, volumeName) 637 | if err != nil { 638 | return nil, err 639 | } 640 | volumes[volumeDirAbsPath] = localPath 641 | } 642 | 643 | volumesInLine := []string{} 644 | for k, v := range volumes { 645 | volumesInLine = append(volumesInLine, fmt.Sprintf("%s:%s", k, v)) 646 | } 647 | 648 | // add the ports to the labels as well 649 | service := map[string]interface{}{ 650 | "image": imageName, 651 | "command": args, 652 | // Add volume mount for the output directory 653 | "volumes": volumesInLine, 654 | // Add the ethereum network 655 | "networks": []string{d.networkName}, 656 | "labels": labels, 657 | } 658 | 659 | if len(envs) > 0 { 660 | service["environment"] = envs 661 | } 662 | 663 | if s.ReadyCheck != nil { 664 | var test []string 665 | if s.ReadyCheck.QueryURL != "" { 666 | // This is pretty much hardcoded for now. 667 | test = []string{"CMD-SHELL", "chmod +x /artifacts/scripts/query.sh && /artifacts/scripts/query.sh " + s.ReadyCheck.QueryURL} 668 | } else { 669 | test = s.ReadyCheck.Test 670 | } 671 | 672 | service["healthcheck"] = map[string]interface{}{ 673 | "test": test, 674 | "interval": s.ReadyCheck.Interval.String(), 675 | "timeout": s.ReadyCheck.Timeout.String(), 676 | "retries": s.ReadyCheck.Retries, 677 | "start_period": s.ReadyCheck.StartPeriod.String(), 678 | } 679 | } 680 | 681 | if s.DependsOn != nil { 682 | depends := map[string]interface{}{} 683 | 684 | for _, d := range s.DependsOn { 685 | if d.Condition == "" { 686 | depends[d.Name] = struct{}{} 687 | } else { 688 | depends[d.Name] = map[string]interface{}{ 689 | "condition": d.Condition, 690 | } 691 | } 692 | } 693 | service["depends_on"] = depends 694 | } 695 | 696 | if runtime.GOOS == "linux" { 697 | // We rely on host.docker.internal as the DNS address for the host inside 698 | // the container. But, this is only available on Macos and Windows. 699 | // On Linux, you can use the IP address 172.17.0.1 to access the host. 700 | // Thus, if we are running on Linux, we need to add an extra host entry. 701 | service["extra_hosts"] = map[string]string{ 702 | "host.docker.internal": "172.17.0.1", 703 | } 704 | } 705 | 706 | if s.Entrypoint != "" { 707 | service["entrypoint"] = s.Entrypoint 708 | } 709 | 710 | if len(s.Ports) > 0 { 711 | ports := []string{} 712 | for _, p := range s.Ports { 713 | protocol := "" 714 | if p.Protocol == ProtocolUDP { 715 | protocol = "/udp" 716 | } 717 | 718 | if d.bindHostPortsLocally { 719 | ports = append(ports, fmt.Sprintf("127.0.0.1:%d:%d%s", p.HostPort, p.Port, protocol)) 720 | } else { 721 | ports = append(ports, fmt.Sprintf("%d:%d%s", p.HostPort, p.Port, protocol)) 722 | } 723 | } 724 | service["ports"] = ports 725 | } 726 | 727 | return service, nil 728 | } 729 | 730 | func (d *LocalRunner) isHostService(name string) bool { 731 | _, ok := d.overrides[name] 732 | return ok 733 | } 734 | 735 | func (d *LocalRunner) generateDockerCompose() ([]byte, error) { 736 | compose := map[string]interface{}{ 737 | // We create a new network to be used by all the services so that 738 | // we can do DNS discovery between them. 739 | "networks": map[string]interface{}{ 740 | d.networkName: map[string]interface{}{ 741 | "name": d.networkName, 742 | }, 743 | }, 744 | } 745 | 746 | services := map[string]interface{}{} 747 | 748 | // for each service, reserve a port on the host machine. We use this ports 749 | // both to have access to the services from localhost but also to do communication 750 | // between services running inside docker and the ones running on the host machine. 751 | for _, svc := range d.manifest.Services { 752 | for _, port := range svc.Ports { 753 | port.HostPort = d.reservePort(port.Port, port.Protocol) 754 | } 755 | } 756 | 757 | for _, svc := range d.manifest.Services { 758 | if d.isHostService(svc.Name) { 759 | // skip services that are going to be launched on host 760 | continue 761 | } 762 | var err error 763 | if services[svc.Name], err = d.toDockerComposeService(svc); err != nil { 764 | return nil, fmt.Errorf("failed to convert service %s to docker compose service: %w", svc.Name, err) 765 | } 766 | } 767 | 768 | compose["services"] = services 769 | yamlData, err := yaml.Marshal(compose) 770 | if err != nil { 771 | return nil, fmt.Errorf("failed to marshal docker compose: %w", err) 772 | } 773 | 774 | return yamlData, nil 775 | } 776 | 777 | func (d *LocalRunner) createVolume(service, volumeName string) (string, error) { 778 | // create the volume in the output folder 779 | volumeDirAbsPath, err := d.out.CreateDir(fmt.Sprintf("volume-%s-%s", service, volumeName)) 780 | if err != nil { 781 | return "", fmt.Errorf("failed to create volume dir %s: %w", volumeName, err) 782 | } 783 | return volumeDirAbsPath, nil 784 | } 785 | 786 | // runOnHost runs the service on the host machine 787 | func (d *LocalRunner) runOnHost(ss *Service) error { 788 | // TODO: Use env vars in host processes 789 | args, _, err := d.applyTemplate(ss) 790 | if err != nil { 791 | return fmt.Errorf("failed to apply template, err: %w", err) 792 | } 793 | 794 | // Create the volumes for this service 795 | volumesMapped := map[string]string{} 796 | for pathInDocker, volumeName := range ss.VolumesMapped { 797 | volumeDirAbsPath, err := d.createVolume(ss.Name, volumeName) 798 | if err != nil { 799 | return err 800 | } 801 | volumesMapped[pathInDocker] = volumeDirAbsPath 802 | } 803 | 804 | // We have to replace the names of the files it is using as artifacts for the full names 805 | // Just a string replacement should be enough 806 | for i, arg := range args { 807 | // If any of the args contains any of the files mapped, we need to replace it 808 | for pathInDocker, artifactName := range ss.FilesMapped { 809 | if strings.Contains(arg, pathInDocker) { 810 | args[i] = strings.ReplaceAll(arg, pathInDocker, filepath.Join(d.out.dst, artifactName)) 811 | } 812 | } 813 | // If any of the args contains any of the volumes mapped, we need to create 814 | // the volume and replace it 815 | for pathInDocker, volumeAbsPath := range volumesMapped { 816 | if strings.Contains(arg, pathInDocker) { 817 | args[i] = strings.ReplaceAll(arg, pathInDocker, volumeAbsPath) 818 | } 819 | } 820 | } 821 | 822 | execPath := d.overrides[ss.Name] 823 | cmd := exec.Command(execPath, args...) 824 | 825 | logOutput, err := d.out.LogOutput(ss.Name) 826 | if err != nil { 827 | // this should not happen, log it 828 | logOutput = os.Stdout 829 | } 830 | 831 | // Output the command itself to the log output for debugging purposes 832 | fmt.Fprint(logOutput, strings.Join(args, " ")+"\n\n") 833 | 834 | cmd.Stdout = logOutput 835 | cmd.Stderr = logOutput 836 | 837 | go func() { 838 | if err := cmd.Run(); err != nil { 839 | d.exitErr <- fmt.Errorf("error running host service %s: %w", ss.Name, err) 840 | } 841 | }() 842 | 843 | // we do not need to lock this array because we run the host services sequentially 844 | d.handles = append(d.handles, cmd) 845 | return nil 846 | } 847 | 848 | // trackLogs tracks the logs of a container and writes them to the log output 849 | func (d *LocalRunner) trackLogs(serviceName string, containerID string) error { 850 | d.tasksMtx.Lock() 851 | log_output := d.tasks[serviceName].logs 852 | d.tasksMtx.Unlock() 853 | 854 | if log_output == nil { 855 | panic("BUG: log output not found for service " + serviceName) 856 | } 857 | 858 | logs, err := d.client.ContainerLogs(context.Background(), containerID, container.LogsOptions{ 859 | ShowStdout: true, 860 | ShowStderr: true, 861 | Follow: true, 862 | }) 863 | if err != nil { 864 | return fmt.Errorf("error getting container logs: %w", err) 865 | } 866 | 867 | if _, err := stdcopy.StdCopy(log_output, log_output, logs); err != nil { 868 | return fmt.Errorf("error copying logs: %w", err) 869 | } 870 | 871 | return nil 872 | } 873 | 874 | func (d *LocalRunner) trackContainerStatusAndLogs() { 875 | eventCh, errCh := d.client.Events(context.Background(), events.ListOptions{ 876 | Filters: filters.NewArgs(filters.Arg("label", fmt.Sprintf("playground.session=%s", d.sessionID))), 877 | }) 878 | 879 | for { 880 | select { 881 | case event := <-eventCh: 882 | name := event.Actor.Attributes["com.docker.compose.service"] 883 | 884 | switch event.Action { 885 | case events.ActionStart: 886 | d.updateTaskStatus(name, taskStatusStarted) 887 | 888 | if d.logInternally { 889 | // the container has started, we can track the logs now 890 | go func() { 891 | if err := d.trackLogs(name, event.Actor.ID); err != nil { 892 | log.Warn("error tracking logs", "error", err) 893 | } 894 | }() 895 | } 896 | case events.ActionDie: 897 | d.updateTaskStatus(name, taskStatusDie) 898 | log.Info("container died", "name", name) 899 | 900 | case events.ActionHealthStatusHealthy: 901 | d.updateTaskStatus(name, taskStatusHealthy) 902 | log.Info("container is healthy", "name", name) 903 | } 904 | 905 | case err := <-errCh: 906 | log.Warn("error tracking events", "error", err) 907 | } 908 | } 909 | } 910 | 911 | func CreatePrometheusServices(manifest *Manifest, out *output) error { 912 | // Read all the components to be deployed and find all the ports with name 'metrics' 913 | // to create the prometheus scrapper config 914 | var scrapeConfigs []map[string]interface{} 915 | 916 | // global scrape config 917 | scrapeConfigs = append(scrapeConfigs, map[string]interface{}{ 918 | "job_name": "external", 919 | "metrics_path": "/metrics", 920 | "static_configs": []map[string]interface{}{ 921 | { 922 | "targets": []string{"host.docker.internal:5555"}, 923 | }, 924 | }, 925 | }) 926 | 927 | for _, c := range manifest.Services { 928 | for _, port := range c.Ports { 929 | if port.Name == "metrics" { 930 | metricsPath := "/metrics" 931 | if overrideMetricsPath, ok := c.Labels["metrics_path"]; ok { 932 | metricsPath = overrideMetricsPath 933 | } 934 | 935 | scrapeConfig := map[string]interface{}{ 936 | "job_name": c.Name, 937 | "metrics_path": metricsPath, 938 | "static_configs": []map[string]interface{}{ 939 | { 940 | "targets": []string{fmt.Sprintf("%s:%d", c.Name, port.Port)}, 941 | }, 942 | }, 943 | } 944 | scrapeConfigs = append(scrapeConfigs, scrapeConfig) 945 | } 946 | } 947 | } 948 | 949 | promConfig := map[string]interface{}{ 950 | "global": map[string]interface{}{ 951 | "scrape_interval": "1s", 952 | "evaluation_interval": "1s", 953 | }, 954 | "scrape_configs": scrapeConfigs, 955 | } 956 | 957 | if err := out.WriteFile("prometheus.yaml", promConfig); err != nil { 958 | return fmt.Errorf("failed to write prometheus.yml: %w", err) 959 | } 960 | 961 | // add to the manifest the prometheus service 962 | // This is a bit of a hack. 963 | srv := manifest.NewService("prometheus"). 964 | WithImage("prom/prometheus"). 965 | WithTag("latest"). 966 | WithArgs("--config.file", "/data/prometheus.yaml"). 967 | WithPort("metrics", 9090, "tcp"). 968 | WithArtifact("/data/prometheus.yaml", "prometheus.yaml") 969 | srv.ComponentName = "null" // For now, later on we can create a Prometheus component 970 | manifest.Services = append(manifest.Services, srv) 971 | 972 | return nil 973 | } 974 | 975 | func (d *LocalRunner) Run() error { 976 | go d.trackContainerStatusAndLogs() 977 | 978 | yamlData, err := d.generateDockerCompose() 979 | if err != nil { 980 | return fmt.Errorf("failed to generate docker-compose.yaml: %w", err) 981 | } 982 | 983 | if err := d.out.WriteFile("docker-compose.yaml", yamlData); err != nil { 984 | return fmt.Errorf("failed to write docker-compose.yaml: %w", err) 985 | } 986 | 987 | // generate the output log file for each service so that it is available after Run is done 988 | for _, instance := range d.instances { 989 | if instance.logs != nil { 990 | d.tasks[instance.service.Name].logs = instance.logs.logRef 991 | } 992 | } 993 | 994 | // First start the services that are running in docker-compose 995 | cmd := exec.Command("docker", "compose", "-f", d.out.dst+"/docker-compose.yaml", "up", "-d") 996 | 997 | var errOut bytes.Buffer 998 | cmd.Stderr = &errOut 999 | 1000 | if err := cmd.Run(); err != nil { 1001 | return fmt.Errorf("failed to run docker-compose: %w, err: %s", err, errOut.String()) 1002 | } 1003 | 1004 | // Second, start the services that are running on the host machine 1005 | errCh := make(chan error) 1006 | go func() { 1007 | for _, svc := range d.manifest.Services { 1008 | if d.isHostService(svc.Name) { 1009 | if err := d.runOnHost(svc); err != nil { 1010 | errCh <- err 1011 | } 1012 | } 1013 | } 1014 | close(errCh) 1015 | }() 1016 | 1017 | for err := range errCh { 1018 | if err != nil { 1019 | return err 1020 | } 1021 | } 1022 | return nil 1023 | } 1024 | -------------------------------------------------------------------------------- /playground/manifest.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "text/template" 10 | "time" 11 | 12 | flag "github.com/spf13/pflag" 13 | ) 14 | 15 | const useHostExecutionLabel = "use-host-execution" 16 | 17 | type Recipe interface { 18 | Name() string 19 | Description() string 20 | Flags() *flag.FlagSet 21 | Artifacts() *ArtifactsBuilder 22 | Apply(ctx *ExContext, artifacts *Artifacts) *Manifest 23 | Output(manifest *Manifest) map[string]interface{} 24 | } 25 | 26 | // Manifest describes a list of services and their dependencies 27 | type Manifest struct { 28 | ctx *ExContext 29 | 30 | // list of Services 31 | Services []*Service `json:"services"` 32 | 33 | // overrides is a map of service name to the path of the executable to run 34 | // on the host machine instead of a container. 35 | overrides map[string]string 36 | 37 | out *output 38 | } 39 | 40 | func NewManifest(ctx *ExContext, out *output) *Manifest { 41 | ctx.Output = out 42 | return &Manifest{ctx: ctx, out: out, overrides: make(map[string]string)} 43 | } 44 | 45 | type LogLevel string 46 | 47 | var ( 48 | LevelDebug LogLevel = "debug" 49 | LevelInfo LogLevel = "info" 50 | LevelWarn LogLevel = "warn" 51 | LevelError LogLevel = "error" 52 | LevelTrace LogLevel = "trace" 53 | ) 54 | 55 | func (l *LogLevel) Unmarshal(s string) error { 56 | switch s { 57 | case "debug": 58 | *l = LevelDebug 59 | case "info": 60 | *l = LevelInfo 61 | case "trace": 62 | *l = LevelTrace 63 | case "warn": 64 | *l = LevelWarn 65 | case "error": 66 | *l = LevelError 67 | default: 68 | return fmt.Errorf("invalid log level: %s", s) 69 | } 70 | return nil 71 | } 72 | 73 | // Execution context 74 | type ExContext struct { 75 | LogLevel LogLevel 76 | 77 | // This dependency is not ideal. Doing it so that I do not 78 | // have to modify the serviceDesc interface to give services 79 | // access to the output. 80 | Output *output 81 | } 82 | 83 | type ServiceGen interface { 84 | Run(service *Service, ctx *ExContext) 85 | Name() string 86 | } 87 | 88 | type ServiceReady interface { 89 | Ready(instance *instance) error 90 | } 91 | 92 | // ReleaseService is a service that can also be runned as an artifact in the host machine 93 | type ReleaseService interface { 94 | ReleaseArtifact() *release 95 | } 96 | 97 | func (s *Manifest) AddService(name string, srv ServiceGen) { 98 | service := s.NewService(name) 99 | service.ComponentName = srv.Name() 100 | srv.Run(service, s.ctx) 101 | 102 | s.Services = append(s.Services, service) 103 | } 104 | 105 | func (s *Manifest) MustGetService(name string) *Service { 106 | service, ok := s.GetService(name) 107 | if !ok { 108 | panic(fmt.Sprintf("service %s not found", name)) 109 | } 110 | return service 111 | } 112 | 113 | func (s *Manifest) GetService(name string) (*Service, bool) { 114 | for _, ss := range s.Services { 115 | if ss.Name == name { 116 | return ss, true 117 | } 118 | } 119 | return nil, false 120 | } 121 | 122 | // Validate validates the manifest 123 | // - checks if all the port dependencies are met from the service description 124 | // - downloads any local release artifacts for the services that require host execution 125 | func (s *Manifest) Validate() error { 126 | for _, ss := range s.Services { 127 | // validate node port references 128 | for _, nodeRef := range ss.NodeRefs { 129 | targetService, ok := s.GetService(nodeRef.Service) 130 | if !ok { 131 | return fmt.Errorf("service %s depends on service %s, but it is not defined", ss.Name, nodeRef.Service) 132 | } 133 | 134 | if _, ok := targetService.GetPort(nodeRef.PortLabel); !ok { 135 | return fmt.Errorf("service %s depends on service %s, but it does not expose port %s", ss.Name, nodeRef.Service, nodeRef.PortLabel) 136 | } 137 | } 138 | 139 | // validate depends_on statements 140 | for _, dep := range ss.DependsOn { 141 | service, ok := s.GetService(dep.Name) 142 | if !ok { 143 | return fmt.Errorf("service %s depends on service %s, but it is not defined", ss.Name, dep.Name) 144 | } 145 | 146 | if dep.Condition == DependsOnConditionHealthy { 147 | // if we depedn on the service to be healthy, it must have a ready check 148 | if service.ReadyCheck == nil { 149 | return fmt.Errorf("service %s depends on service %s, but it does not have a ready check", ss.Name, dep.Name) 150 | } 151 | } 152 | } 153 | } 154 | 155 | // validate that the mounts are correct 156 | for _, ss := range s.Services { 157 | for _, fileNameRef := range ss.FilesMapped { 158 | fileLoc := filepath.Join(s.out.dst, fileNameRef) 159 | 160 | if _, err := os.Stat(fileLoc); err != nil { 161 | if os.IsNotExist(err) { 162 | return fmt.Errorf("service %s includes an unknown file %s does not exist", ss.Name, fileLoc) 163 | } 164 | return fmt.Errorf("failed to stat file %s: %w", fileLoc, err) 165 | } 166 | } 167 | } 168 | 169 | // validate that the mounts are correct 170 | for _, ss := range s.Services { 171 | for _, fileNameRef := range ss.FilesMapped { 172 | fileLoc := filepath.Join(s.out.dst, fileNameRef) 173 | 174 | if _, err := os.Stat(fileLoc); err != nil { 175 | if os.IsNotExist(err) { 176 | return fmt.Errorf("service %s includes an unknown file %s does not exist", ss.Name, fileLoc) 177 | } 178 | return fmt.Errorf("failed to stat file %s: %w", fileLoc, err) 179 | } 180 | } 181 | } 182 | 183 | return nil 184 | } 185 | 186 | const ( 187 | ProtocolUDP = "udp" 188 | ProtocolTCP = "tcp" 189 | ) 190 | 191 | // Port describes a port that a service exposes 192 | type Port struct { 193 | // Name is the name of the port 194 | Name string `json:"name"` 195 | 196 | // Port is the port number 197 | Port int `json:"port"` 198 | 199 | // Protocol (tcp or udp) 200 | Protocol string 201 | 202 | // HostPort is the port number assigned on the host machine for this 203 | // container port. It is populated by the local runner 204 | // TODO: We might want to move this to the runner itself. 205 | HostPort int 206 | } 207 | 208 | // NodeRef describes a reference from one service to another 209 | type NodeRef struct { 210 | Service string `json:"service"` 211 | PortLabel string `json:"port_label"` 212 | Protocol string `json:"protocol"` 213 | User string `json:"user"` 214 | } 215 | 216 | // serviceLogs is a service to access the logs of the running service 217 | type serviceLogs struct { 218 | logRef *os.File 219 | path string 220 | } 221 | 222 | func (s *serviceLogs) readLogs() (string, error) { 223 | content, err := os.ReadFile(s.path) 224 | if err != nil { 225 | return "", fmt.Errorf("failed to read logs: %w", err) 226 | } 227 | return string(content), nil 228 | } 229 | 230 | func (s *serviceLogs) FindLog(pattern string) (string, error) { 231 | logs, err := s.readLogs() 232 | if err != nil { 233 | return "", fmt.Errorf("failed to read logs: %w", err) 234 | } 235 | 236 | lines := strings.Split(logs, "\n") 237 | for _, line := range lines { 238 | if strings.Contains(line, pattern) { 239 | return line, nil 240 | } 241 | } 242 | return "", fmt.Errorf("log pattern %s not found", pattern) 243 | } 244 | 245 | type Service struct { 246 | Name string `json:"name"` 247 | Args []string `json:"args"` 248 | 249 | Labels map[string]string `json:"labels,omitempty"` 250 | 251 | // list of environment variables to set for the service 252 | Env map[string]string `json:"env,omitempty"` 253 | 254 | ReadyCheck *ReadyCheck `json:"ready_check,omitempty"` 255 | 256 | DependsOn []DependsOn `json:"depends_on,omitempty"` 257 | 258 | Ports []*Port `json:"ports,omitempty"` 259 | NodeRefs []*NodeRef `json:"node_refs,omitempty"` 260 | 261 | FilesMapped map[string]string `json:"files_mapped,omitempty"` 262 | VolumesMapped map[string]string `json:"volumes_mapped,omitempty"` 263 | 264 | ComponentName string `json:"component_name,omitempty"` 265 | 266 | Tag string `json:"tag,omitempty"` 267 | Image string `json:"image,omitempty"` 268 | Entrypoint string `json:"entrypoint,omitempty"` 269 | } 270 | 271 | type instance struct { 272 | service *Service 273 | 274 | logs *serviceLogs 275 | component ServiceGen 276 | } 277 | 278 | type DependsOnCondition string 279 | 280 | const ( 281 | DependsOnConditionRunning DependsOnCondition = "service_started" 282 | DependsOnConditionHealthy DependsOnCondition = "service_healthy" 283 | ) 284 | 285 | type DependsOn struct { 286 | Name string 287 | Condition DependsOnCondition 288 | } 289 | 290 | func (s *Service) GetPorts() []*Port { 291 | return s.Ports 292 | } 293 | 294 | func (s *Service) MustGetPort(name string) *Port { 295 | port, ok := s.GetPort(name) 296 | if !ok { 297 | panic(fmt.Sprintf("port %s not found", name)) 298 | } 299 | return port 300 | } 301 | 302 | func (s *Service) GetPort(name string) (*Port, bool) { 303 | for _, p := range s.Ports { 304 | if p.Name == name { 305 | return p, true 306 | } 307 | } 308 | return nil, false 309 | } 310 | 311 | func (s *Service) UseHostExecution() *Service { 312 | s.WithLabel(useHostExecutionLabel, "true") 313 | return s 314 | } 315 | 316 | func (s *Service) WithEnv(key, value string) *Service { 317 | if s.Env == nil { 318 | s.Env = make(map[string]string) 319 | } 320 | s.applyTemplate(value) 321 | s.Env[key] = value 322 | return s 323 | } 324 | 325 | func (s *Service) WithLabel(key, value string) *Service { 326 | if s.Labels == nil { 327 | s.Labels = make(map[string]string) 328 | } 329 | s.Labels[key] = value 330 | return s 331 | } 332 | 333 | func (s *Manifest) NewService(name string) *Service { 334 | return &Service{Name: name, Args: []string{}, Ports: []*Port{}, NodeRefs: []*NodeRef{}} 335 | } 336 | 337 | func (s *Service) WithImage(image string) *Service { 338 | s.Image = image 339 | return s 340 | } 341 | 342 | func (s *Service) WithEntrypoint(entrypoint string) *Service { 343 | s.Entrypoint = entrypoint 344 | return s 345 | } 346 | 347 | func (s *Service) WithTag(tag string) *Service { 348 | s.Tag = tag 349 | return s 350 | } 351 | 352 | func (s *Service) WithPort(name string, portNumber int, protocolVar ...string) *Service { 353 | protocol := ProtocolTCP 354 | if len(protocolVar) > 0 { 355 | if protocolVar[0] != ProtocolTCP && protocolVar[0] != ProtocolUDP { 356 | panic(fmt.Sprintf("protocol %s not supported", protocolVar[0])) 357 | } 358 | protocol = protocolVar[0] 359 | } 360 | 361 | // add the port if not already present with the same name. 362 | // if preset with the same name, they must have same port number 363 | for _, p := range s.Ports { 364 | if p.Name == name { 365 | if p.Port != portNumber { 366 | panic(fmt.Sprintf("port %s already defined with different port number", name)) 367 | } 368 | if p.Protocol != protocol { 369 | // If they have different protocols they are different ports 370 | continue 371 | } 372 | return s 373 | } 374 | } 375 | s.Ports = append(s.Ports, &Port{Name: name, Port: portNumber, Protocol: protocol}) 376 | return s 377 | } 378 | 379 | func (s *Service) applyTemplate(arg string) { 380 | var port []Port 381 | var nodeRef []NodeRef 382 | _, port, nodeRef = applyTemplate(arg) 383 | for _, p := range port { 384 | s.WithPort(p.Name, p.Port, p.Protocol) 385 | } 386 | for _, n := range nodeRef { 387 | s.NodeRefs = append(s.NodeRefs, &n) 388 | } 389 | } 390 | 391 | func (s *Service) WithArgs(args ...string) *Service { 392 | for _, arg := range args { 393 | s.applyTemplate(arg) 394 | } 395 | s.Args = append(s.Args, args...) 396 | return s 397 | } 398 | 399 | func (s *Service) WithVolume(name string, localPath string) *Service { 400 | if s.VolumesMapped == nil { 401 | s.VolumesMapped = make(map[string]string) 402 | } 403 | s.VolumesMapped[localPath] = name 404 | return s 405 | } 406 | 407 | func (s *Service) WithArtifact(localPath string, artifactName string) *Service { 408 | if s.FilesMapped == nil { 409 | s.FilesMapped = make(map[string]string) 410 | } 411 | s.FilesMapped[localPath] = artifactName 412 | return s 413 | } 414 | 415 | func (s *Service) WithReady(check ReadyCheck) *Service { 416 | s.ReadyCheck = &check 417 | return s 418 | } 419 | 420 | type ReadyCheck struct { 421 | QueryURL string `json:"query_url"` 422 | Test []string `json:"test"` 423 | Interval time.Duration `json:"interval"` 424 | StartPeriod time.Duration `json:"start_period"` 425 | Timeout time.Duration `json:"timeout"` 426 | Retries int `json:"retries"` 427 | } 428 | 429 | func (s *Service) DependsOnHealthy(name string) *Service { 430 | s.DependsOn = append(s.DependsOn, DependsOn{Name: name, Condition: DependsOnConditionHealthy}) 431 | return s 432 | } 433 | 434 | func (s *Service) DependsOnRunning(name string) *Service { 435 | s.DependsOn = append(s.DependsOn, DependsOn{Name: name, Condition: DependsOnConditionRunning}) 436 | return s 437 | } 438 | 439 | func applyTemplate(templateStr string) (string, []Port, []NodeRef) { 440 | // TODO: Can we remove the return argument string? 441 | 442 | // use template substitution to load constants 443 | // pass-through the Dir template because it has to be resolved at the runtime 444 | input := map[string]interface{}{} 445 | 446 | var portRef []Port 447 | var nodeRef []NodeRef 448 | // ther can be multiple port and nodere because in the case of op-geth we pass a whole string as nested command args 449 | 450 | funcs := template.FuncMap{ 451 | "Service": func(name string, portLabel, protocol, user string) string { 452 | if name == "" { 453 | panic("BUG: service name cannot be empty") 454 | } 455 | if portLabel == "" { 456 | panic("BUG: port label cannot be empty") 457 | } 458 | 459 | // for the first pass of service we do not do anything, keep it as it is for the followup pass 460 | // here we only keep the references to the services to be checked if they are valid and an be resolved 461 | // later on for the runtime we will do the resolve stage. 462 | // TODO: this will get easier when we move away from templates and use interface and structs. 463 | nodeRef = append(nodeRef, NodeRef{Service: name, PortLabel: portLabel, Protocol: protocol, User: user}) 464 | return fmt.Sprintf(`{{Service "%s" "%s" "%s" "%s"}}`, name, portLabel, protocol, user) 465 | }, 466 | "Port": func(name string, defaultPort int) string { 467 | portRef = append(portRef, Port{Name: name, Port: defaultPort, Protocol: ProtocolTCP}) 468 | return fmt.Sprintf(`{{Port "%s" %d}}`, name, defaultPort) 469 | }, 470 | "PortUDP": func(name string, defaultPort int) string { 471 | portRef = append(portRef, Port{Name: name, Port: defaultPort, Protocol: ProtocolUDP}) 472 | return fmt.Sprintf(`{{PortUDP "%s" %d}}`, name, defaultPort) 473 | }, 474 | } 475 | 476 | tpl, err := template.New("").Funcs(funcs).Parse(templateStr) 477 | if err != nil { 478 | panic(fmt.Sprintf("BUG: failed to parse template, err: %s", err)) 479 | } 480 | 481 | var out strings.Builder 482 | if err := tpl.Execute(&out, input); err != nil { 483 | panic(fmt.Sprintf("BUG: failed to execute template, err: %s", err)) 484 | } 485 | res := out.String() 486 | 487 | // escape quotes 488 | res = strings.ReplaceAll(res, `"`, `"`) 489 | 490 | return res, portRef, nodeRef 491 | } 492 | 493 | func (s *Manifest) GenerateDotGraph() string { 494 | var b strings.Builder 495 | b.WriteString("digraph G {\n") 496 | b.WriteString(" rankdir=LR;\n") 497 | b.WriteString(" node [shape=record];\n\n") 498 | 499 | // Create a map of services for easy lookup 500 | servicesMap := make(map[string]*Service) 501 | for _, ss := range s.Services { 502 | servicesMap[ss.Name] = ss 503 | } 504 | 505 | // Add nodes (services) with their ports as labels 506 | for _, ss := range s.Services { 507 | var ports []string 508 | for _, p := range ss.Ports { 509 | ports = append(ports, fmt.Sprintf("%s:%d", p.Name, p.Port)) 510 | } 511 | portLabel := "" 512 | if len(ports) > 0 { 513 | portLabel = "|{" + strings.Join(ports, "|") + "}" 514 | } 515 | // Replace hyphens with underscores for DOT compatibility 516 | nodeName := strings.ReplaceAll(ss.Name, "-", "_") 517 | b.WriteString(fmt.Sprintf(" %s [label=\"%s%s\"];\n", nodeName, ss.Name, portLabel)) 518 | } 519 | 520 | b.WriteString("\n") 521 | 522 | // Add edges (connections between services) 523 | for _, ss := range s.Services { 524 | sourceNode := strings.ReplaceAll(ss.Name, "-", "_") 525 | for _, ref := range ss.NodeRefs { 526 | targetNode := strings.ReplaceAll(ref.Service, "-", "_") 527 | b.WriteString(fmt.Sprintf(" %s -> %s [label=\"%s\"];\n", 528 | sourceNode, 529 | targetNode, 530 | ref.PortLabel, 531 | )) 532 | } 533 | } 534 | 535 | // Add edges for dependws_on 536 | for _, ss := range s.Services { 537 | for _, dep := range ss.DependsOn { 538 | sourceNode := strings.ReplaceAll(ss.Name, "-", "_") 539 | targetNode := strings.ReplaceAll(dep.Name, "-", "_") 540 | b.WriteString(fmt.Sprintf(" %s -> %s [style=dashed, color=gray, constraint=true, label=\"depends_on\"];\n", 541 | sourceNode, 542 | targetNode, 543 | )) 544 | } 545 | } 546 | 547 | b.WriteString("}\n") 548 | return b.String() 549 | } 550 | 551 | func saveDotGraph(svcManager *Manifest, out *output) error { 552 | dotGraph := svcManager.GenerateDotGraph() 553 | return out.WriteFile("services.dot", dotGraph) 554 | } 555 | 556 | func (m *Manifest) SaveJson() error { 557 | return m.out.WriteFile("manifest.json", m) 558 | } 559 | 560 | func ReadManifest(outputFolder string) (*Manifest, error) { 561 | // read outputFolder/manifest.json file 562 | manifestFile := filepath.Join(outputFolder, "manifest.json") 563 | if _, err := os.Stat(manifestFile); os.IsNotExist(err) { 564 | return nil, fmt.Errorf("manifest file %s does not exist", manifestFile) 565 | } 566 | manifest, err := os.ReadFile(manifestFile) 567 | if err != nil { 568 | return nil, fmt.Errorf("failed to read manifest file %s: %w", manifestFile, err) 569 | } 570 | 571 | // parse the manifest file 572 | var manifestData Manifest 573 | if err := json.Unmarshal(manifest, &manifestData); err != nil { 574 | return nil, fmt.Errorf("failed to parse manifest file %s: %w", manifestFile, err) 575 | } 576 | 577 | // set the output folder 578 | manifestData.out = &output{ 579 | dst: outputFolder, 580 | } 581 | return &manifestData, nil 582 | } 583 | -------------------------------------------------------------------------------- /playground/manifest_test.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestNodeRefString(t *testing.T) { 10 | var testCases = []struct { 11 | protocol string 12 | service string 13 | port int 14 | user string 15 | expected string 16 | }{ 17 | { 18 | protocol: "", 19 | service: "test", 20 | port: 80, 21 | user: "", 22 | expected: "test:80", 23 | }, 24 | { 25 | protocol: "", 26 | service: "test", 27 | port: 80, 28 | user: "test", 29 | expected: "test@test:test", 30 | }, 31 | { 32 | protocol: "http", 33 | service: "test", 34 | port: 80, 35 | user: "", 36 | expected: "http://test:80", 37 | }, 38 | { 39 | protocol: "http", 40 | service: "test", 41 | port: 80, 42 | user: "test", 43 | expected: "http://test@test:test", 44 | }, 45 | { 46 | protocol: "enode", 47 | service: "test", 48 | port: 80, 49 | user: "", 50 | expected: "enode://test:80", 51 | }, 52 | } 53 | 54 | for _, testCase := range testCases { 55 | result := printAddr(testCase.protocol, testCase.service, testCase.port, testCase.user) 56 | if result != testCase.expected { 57 | t.Errorf("expected %s, got %s", testCase.expected, result) 58 | } 59 | } 60 | } 61 | 62 | func TestManifestWriteRead(t *testing.T) { 63 | out := newTestOutput(t) 64 | 65 | recipe := &L1Recipe{} 66 | 67 | builder := recipe.Artifacts() 68 | builder.OutputDir(out.dst) 69 | 70 | artifacts, err := builder.Build() 71 | assert.NoError(t, err) 72 | 73 | manifest := recipe.Apply(&ExContext{}, artifacts) 74 | assert.NoError(t, manifest.SaveJson()) 75 | 76 | manifest2, err := ReadManifest(out.dst) 77 | assert.NoError(t, err) 78 | 79 | for _, svc := range manifest.Services { 80 | svc2 := manifest2.MustGetService(svc.Name) 81 | assert.Equal(t, svc.Name, svc2.Name) 82 | assert.Equal(t, svc.Args, svc2.Args) 83 | assert.Equal(t, svc.Env, svc2.Env) 84 | assert.Equal(t, svc.Labels, svc2.Labels) 85 | assert.Equal(t, svc.VolumesMapped, svc2.VolumesMapped) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /playground/recipe_buildernet.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "fmt" 5 | 6 | flag "github.com/spf13/pflag" 7 | ) 8 | 9 | var _ Recipe = &BuilderNetRecipe{} 10 | 11 | // BuilderNetRecipe is a recipe that extends the L1 recipe to include builder-hub 12 | type BuilderNetRecipe struct { 13 | // Embed the L1Recipe to reuse its functionality 14 | l1Recipe L1Recipe 15 | 16 | // Add mock proxy for testing 17 | includeMockProxy bool 18 | } 19 | 20 | func (b *BuilderNetRecipe) Name() string { 21 | return "buildernet" 22 | } 23 | 24 | func (b *BuilderNetRecipe) Description() string { 25 | return "Deploy a full L1 stack with mev-boost and builder-hub" 26 | } 27 | 28 | func (b *BuilderNetRecipe) Flags() *flag.FlagSet { 29 | // Reuse the L1Recipe flags 30 | flags := b.l1Recipe.Flags() 31 | 32 | // Add a flag to enable/disable the mock proxy 33 | flags.BoolVar(&b.includeMockProxy, "mock-proxy", false, "include a mock proxy for builder-hub with attestation headers") 34 | 35 | return flags 36 | } 37 | 38 | func (b *BuilderNetRecipe) Artifacts() *ArtifactsBuilder { 39 | // Reuse the L1Recipe artifacts builder 40 | return b.l1Recipe.Artifacts() 41 | } 42 | 43 | func (b *BuilderNetRecipe) Apply(ctx *ExContext, artifacts *Artifacts) *Manifest { 44 | // Start with the L1Recipe manifest 45 | svcManager := b.l1Recipe.Apply(ctx, artifacts) 46 | 47 | // Add builder-hub-postgres service (now includes migrations) 48 | svcManager.AddService("builder-hub-postgres", &BuilderHubPostgres{}) 49 | 50 | // Add the builder-hub service 51 | svcManager.AddService("builder-hub", &BuilderHub{ 52 | postgres: "builder-hub-postgres", 53 | }) 54 | 55 | // Optionally add mock proxy for testing 56 | if b.includeMockProxy { 57 | svcManager.AddService("builder-hub-proxy", &BuilderHubMockProxy{ 58 | TargetService: "builder-hub", 59 | }) 60 | } 61 | 62 | return svcManager 63 | } 64 | 65 | func (b *BuilderNetRecipe) Output(manifest *Manifest) map[string]interface{} { 66 | // Start with the L1Recipe output 67 | output := b.l1Recipe.Output(manifest) 68 | 69 | // Add builder-hub service info 70 | builderHubService, ok := manifest.GetService("builder-hub") 71 | if ok { 72 | http := builderHubService.MustGetPort("http") 73 | admin := builderHubService.MustGetPort("admin") 74 | internal := builderHubService.MustGetPort("internal") 75 | 76 | output["builder-hub-http"] = fmt.Sprintf("http://localhost:%d", http.HostPort) 77 | output["builder-hub-admin"] = fmt.Sprintf("http://localhost:%d", admin.HostPort) 78 | output["builder-hub-internal"] = fmt.Sprintf("http://localhost:%d", internal.HostPort) 79 | } 80 | 81 | if b.includeMockProxy { 82 | proxyService, ok := manifest.GetService("builder-hub-proxy") 83 | if ok { 84 | http := proxyService.MustGetPort("http") 85 | output["builder-hub-proxy"] = fmt.Sprintf("http://localhost:%d", http.HostPort) 86 | } 87 | } 88 | 89 | return output 90 | } 91 | -------------------------------------------------------------------------------- /playground/recipe_l1.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "fmt" 5 | 6 | flag "github.com/spf13/pflag" 7 | ) 8 | 9 | var _ Recipe = &L1Recipe{} 10 | 11 | type L1Recipe struct { 12 | // latestFork enables the use of the latest fork at startup 13 | latestFork bool 14 | 15 | // useRethForValidation signals mev-boost to use the Reth EL node for block validation 16 | useRethForValidation bool 17 | 18 | // secondaryELPort enables the use of a secondary EL connected to the validator beacon node 19 | // It is enabled through the use of the cl-proxy service 20 | secondaryELPort uint64 21 | 22 | // if useNativeReth is set to true, the Reth EL execution client for the validator beacon node 23 | // will run on the host machine. This is useful if you want to bind to the Reth database and you 24 | // are running a host machine (i.e Mac) that is differerent from the docker one (Linux) 25 | useNativeReth bool 26 | } 27 | 28 | func (l *L1Recipe) Name() string { 29 | return "l1" 30 | } 31 | 32 | func (l *L1Recipe) Description() string { 33 | return "Deploy a full L1 stack with mev-boost" 34 | } 35 | 36 | func (l *L1Recipe) Flags() *flag.FlagSet { 37 | flags := flag.NewFlagSet("l1", flag.ContinueOnError) 38 | flags.BoolVar(&l.latestFork, "latest-fork", false, "use the latest fork") 39 | flags.BoolVar(&l.useRethForValidation, "use-reth-for-validation", false, "use reth for validation") 40 | flags.Uint64Var(&l.secondaryELPort, "secondary-el", 0, "port to use for the secondary builder") 41 | flags.BoolVar(&l.useNativeReth, "use-native-reth", false, "use the native reth binary") 42 | return flags 43 | } 44 | 45 | func (l *L1Recipe) Artifacts() *ArtifactsBuilder { 46 | builder := NewArtifactsBuilder() 47 | builder.ApplyLatestL1Fork(l.latestFork) 48 | 49 | return builder 50 | } 51 | 52 | func (l *L1Recipe) Apply(ctx *ExContext, artifacts *Artifacts) *Manifest { 53 | svcManager := NewManifest(ctx, artifacts.Out) 54 | 55 | svcManager.AddService("el", &RethEL{ 56 | UseRethForValidation: l.useRethForValidation, 57 | UseNativeReth: l.useNativeReth, 58 | }) 59 | 60 | var elService string 61 | if l.secondaryELPort != 0 { 62 | // we are going to use the cl-proxy service to connect the beacon node to two builders 63 | // one the 'el' builder and another one the remote one 64 | elService = "cl-proxy" 65 | svcManager.AddService("cl-proxy", &ClProxy{ 66 | PrimaryBuilder: "el", 67 | SecondaryBuilder: fmt.Sprintf("http://localhost:%d", l.secondaryELPort), 68 | }) 69 | } else { 70 | elService = "el" 71 | } 72 | 73 | svcManager.AddService("beacon", &LighthouseBeaconNode{ 74 | ExecutionNode: elService, 75 | MevBoostNode: "mev-boost", 76 | }) 77 | svcManager.AddService("validator", &LighthouseValidator{ 78 | BeaconNode: "beacon", 79 | }) 80 | 81 | mevBoostValidationServer := "" 82 | if l.useRethForValidation { 83 | mevBoostValidationServer = "el" 84 | } 85 | svcManager.AddService("mev-boost", &MevBoostRelay{ 86 | BeaconClient: "beacon", 87 | ValidationServer: mevBoostValidationServer, 88 | }) 89 | return svcManager 90 | } 91 | 92 | func (l *L1Recipe) Output(manifest *Manifest) map[string]interface{} { 93 | return map[string]interface{}{} 94 | } 95 | -------------------------------------------------------------------------------- /playground/recipe_opstack.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | flag "github.com/spf13/pflag" 5 | ) 6 | 7 | var _ Recipe = &OpRecipe{} 8 | 9 | // OpRecipe is a recipe that deploys an OP stack 10 | type OpRecipe struct { 11 | // externalBuilder is the URL of the external builder to use. If enabled, the recipe deploys 12 | // rollup-boost on the sequencer and uses this URL as the external builder. 13 | externalBuilder string 14 | 15 | // whether to enable the latest fork isthmus and when 16 | enableLatestFork *uint64 17 | 18 | // blockTime is the block time to use for the rollup 19 | // (default is 2 seconds) 20 | blockTime uint64 21 | 22 | // batcherMaxChannelDuration is the maximum channel duration to use for the batcher 23 | // (default is 2 seconds) 24 | batcherMaxChannelDuration uint64 25 | } 26 | 27 | func (o *OpRecipe) Name() string { 28 | return "opstack" 29 | } 30 | 31 | func (o *OpRecipe) Description() string { 32 | return "Deploy an OP stack" 33 | } 34 | 35 | func (o *OpRecipe) Flags() *flag.FlagSet { 36 | flags := flag.NewFlagSet("opstack", flag.ContinueOnError) 37 | flags.StringVar(&o.externalBuilder, "external-builder", "", "External builder URL") 38 | flags.Var(&nullableUint64Value{&o.enableLatestFork}, "enable-latest-fork", "Enable latest fork isthmus (nil or empty = disabled, otherwise enabled at specified block)") 39 | flags.Uint64Var(&o.blockTime, "block-time", defaultOpBlockTimeSeconds, "Block time to use for the rollup") 40 | flags.Uint64Var(&o.batcherMaxChannelDuration, "batcher-max-channel-duration", 2, "Maximum channel duration to use for the batcher") 41 | return flags 42 | } 43 | 44 | func (o *OpRecipe) Artifacts() *ArtifactsBuilder { 45 | builder := NewArtifactsBuilder() 46 | builder.ApplyLatestL2Fork(o.enableLatestFork) 47 | builder.OpBlockTime(o.blockTime) 48 | return builder 49 | } 50 | 51 | func (o *OpRecipe) Apply(ctx *ExContext, artifacts *Artifacts) *Manifest { 52 | svcManager := NewManifest(ctx, artifacts.Out) 53 | svcManager.AddService("el", &RethEL{}) 54 | svcManager.AddService("beacon", &LighthouseBeaconNode{ 55 | ExecutionNode: "el", 56 | }) 57 | svcManager.AddService("validator", &LighthouseValidator{ 58 | BeaconNode: "beacon", 59 | }) 60 | 61 | externalBuilderRef := o.externalBuilder 62 | if o.externalBuilder == "op-reth" { 63 | // Add a new op-reth service and connect it to Rollup-boost 64 | svcManager.AddService("op-reth", &OpReth{}) 65 | 66 | externalBuilderRef = Connect("op-reth", "authrpc") 67 | } 68 | 69 | elNode := "op-geth" 70 | if o.externalBuilder != "" { 71 | elNode = "rollup-boost" 72 | 73 | svcManager.AddService("rollup-boost", &RollupBoost{ 74 | ELNode: "op-geth", 75 | Builder: externalBuilderRef, 76 | }) 77 | } 78 | svcManager.AddService("op-node", &OpNode{ 79 | L1Node: "el", 80 | L1Beacon: "beacon", 81 | L2Node: elNode, 82 | }) 83 | svcManager.AddService("op-geth", &OpGeth{}) 84 | svcManager.AddService("op-batcher", &OpBatcher{ 85 | L1Node: "el", 86 | L2Node: "op-geth", 87 | RollupNode: "op-node", 88 | MaxChannelDuration: o.batcherMaxChannelDuration, 89 | }) 90 | return svcManager 91 | } 92 | 93 | func (o *OpRecipe) Output(manifest *Manifest) map[string]interface{} { 94 | /* 95 | opGeth := manifest.MustGetService("op-geth").component.(*OpGeth) 96 | if opGeth.Enode != "" { 97 | // Only output if enode was set 98 | return map[string]interface{}{ 99 | "op-geth-enode": opGeth.Enode, 100 | } 101 | } 102 | */ 103 | return map[string]interface{}{} 104 | } 105 | -------------------------------------------------------------------------------- /playground/releases.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "archive/tar" 5 | "compress/gzip" 6 | "fmt" 7 | "io" 8 | "log" 9 | "net/http" 10 | "os" 11 | "os/exec" 12 | "path/filepath" 13 | "runtime" 14 | ) 15 | 16 | type release struct { 17 | Name string 18 | Repo string 19 | Org string 20 | Version string 21 | Arch func(string, string) string 22 | } 23 | 24 | func DownloadRelease(outputFolder string, artifact *release) (string, error) { 25 | goos := runtime.GOOS 26 | goarch := runtime.GOARCH 27 | 28 | outPath := filepath.Join(outputFolder, artifact.Name+"-"+artifact.Version) 29 | _, err := os.Stat(outPath) 30 | if err != nil && !os.IsNotExist(err) { 31 | return "", fmt.Errorf("error checking file existence: %v", err) 32 | } 33 | if err == nil { 34 | return outPath, nil 35 | } 36 | 37 | // create the output folder if it doesn't exist yet 38 | if err := os.MkdirAll(outputFolder, 0755); err != nil { 39 | return "", fmt.Errorf("error creating output folder: %v", err) 40 | } 41 | 42 | archVersion := artifact.Arch(goos, goarch) 43 | if archVersion == "" { 44 | // Case 2. The architecture is not supported. 45 | log.Printf("unsupported OS/Arch: %s/%s\n", goos, goarch) 46 | if _, err := exec.LookPath(artifact.Name); err != nil { 47 | return "", fmt.Errorf("error looking up binary in PATH: %v", err) 48 | } else { 49 | outPath = artifact.Name 50 | log.Printf("Using %s from PATH\n", artifact.Name) 51 | } 52 | } else { 53 | // Case 3. Download the binary from the release page 54 | repo := artifact.Repo 55 | if repo == "" { 56 | repo = artifact.Name 57 | } 58 | releasesURL := fmt.Sprintf("https://github.com/%s/%s/releases/download/%s/%s-%s-%s.tar.gz", artifact.Org, repo, artifact.Version, artifact.Name, artifact.Version, archVersion) 59 | log.Printf("Downloading %s: %s\n", outPath, releasesURL) 60 | 61 | if err := downloadArtifact(releasesURL, artifact.Name, outPath); err != nil { 62 | return "", fmt.Errorf("error downloading artifact: %v", err) 63 | } 64 | } 65 | 66 | return outPath, nil 67 | } 68 | 69 | func downloadArtifact(url string, expectedFile string, outPath string) error { 70 | // Download the file 71 | resp, err := http.Get(url) 72 | if err != nil { 73 | return fmt.Errorf("error downloading file: %v", err) 74 | } 75 | defer resp.Body.Close() 76 | 77 | // Create a gzip reader 78 | gzipReader, err := gzip.NewReader(resp.Body) 79 | if err != nil { 80 | return fmt.Errorf("error creating gzip reader: %v", err) 81 | } 82 | defer gzipReader.Close() 83 | 84 | // Create a tar reader 85 | tarReader := tar.NewReader(gzipReader) 86 | 87 | // Extract the file 88 | var found bool 89 | for { 90 | header, err := tarReader.Next() 91 | if err == io.EOF { 92 | break 93 | } 94 | if err != nil { 95 | return fmt.Errorf("error reading tar: %v", err) 96 | } 97 | 98 | if header.Typeflag == tar.TypeReg { 99 | if header.Name != expectedFile { 100 | return fmt.Errorf("unexpected file in archive: %s", header.Name) 101 | } 102 | outFile, err := os.Create(outPath) 103 | if err != nil { 104 | return fmt.Errorf("error creating output file: %v", err) 105 | } 106 | defer outFile.Close() 107 | 108 | if _, err := io.Copy(outFile, tarReader); err != nil { 109 | return fmt.Errorf("error writing output file: %v", err) 110 | } 111 | 112 | // change permissions 113 | if err := os.Chmod(outPath, 0755); err != nil { 114 | return fmt.Errorf("error changing permissions: %v", err) 115 | } 116 | found = true 117 | break // Assuming there's only one file per repo 118 | } 119 | } 120 | 121 | if !found { 122 | return fmt.Errorf("file not found in archive: %s", expectedFile) 123 | } 124 | return nil 125 | } 126 | -------------------------------------------------------------------------------- /playground/utils.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | ) 8 | 9 | type nullableUint64Value struct { 10 | ptr **uint64 11 | } 12 | 13 | func (n nullableUint64Value) String() string { 14 | if *n.ptr == nil { 15 | return "nil" 16 | } 17 | return fmt.Sprintf("%d", **n.ptr) 18 | } 19 | 20 | func (n nullableUint64Value) Set(s string) error { 21 | if s == "" || s == "nil" { 22 | *n.ptr = nil 23 | return nil 24 | } 25 | 26 | val, err := strconv.ParseUint(s, 10, 64) 27 | if err != nil { 28 | return err 29 | } 30 | *n.ptr = &val 31 | return nil 32 | } 33 | 34 | func (n nullableUint64Value) Type() string { 35 | return "uint64" 36 | } 37 | 38 | func (n nullableUint64Value) GetNoOptDefVal() string { 39 | return "0" 40 | } 41 | 42 | type MapStringFlag map[string]string 43 | 44 | func (n *MapStringFlag) String() string { 45 | parts := []string{} 46 | for k, v := range *n { 47 | parts = append(parts, k+"="+v) 48 | } 49 | return "(" + strings.Join(parts, ",") + ")" 50 | } 51 | 52 | func (n *MapStringFlag) Type() string { 53 | return "map(string, string)" 54 | } 55 | 56 | func (n *MapStringFlag) Set(s string) error { 57 | parts := strings.Split(s, "=") 58 | if len(parts) != 2 { 59 | return fmt.Errorf("expected k=v for flag") 60 | } 61 | 62 | k := parts[0] 63 | v := parts[1] 64 | 65 | if *n == nil { 66 | (*n) = map[string]string{} 67 | } 68 | (*n)[k] = v 69 | return nil 70 | } 71 | -------------------------------------------------------------------------------- /playground/utils/README.md: -------------------------------------------------------------------------------- 1 | Reset the state 2 | rm state.json 3 | vim {"version": 1} > state.json 4 | 5 | op-deployer apply --workdir . --deployment-target genesis 6 | 7 | op-deployer inspect genesis --workdir . --outfile ./genesis.json 13 8 | 9 | op-deployer inspect rollup --workdir . --outfile ./rollup.json 13 10 | -------------------------------------------------------------------------------- /playground/utils/intent.toml: -------------------------------------------------------------------------------- 1 | configType = "custom" 2 | l1ChainID = 1337 3 | fundDevAccounts = false 4 | useInterop = false 5 | l1ContractsLocator = "https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-c193a1863182092bc6cb723e523e8313a0f4b6e9c9636513927f1db74c047c15.tar.gz" 6 | l2ContractsLocator = "https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-c193a1863182092bc6cb723e523e8313a0f4b6e9c9636513927f1db74c047c15.tar.gz" 7 | 8 | [superchainRoles] 9 | proxyAdminOwner = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 10 | protocolVersionsOwner = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 11 | guardian = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 12 | 13 | [[chains]] 14 | id = "0x000000000000000000000000000000000000000000000000000000000000000d" 15 | baseFeeVaultRecipient = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 16 | l1FeeVaultRecipient = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 17 | sequencerFeeVaultRecipient = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 18 | eip1559DenominatorCanyon = 250 19 | eip1559Denominator = 50 20 | eip1559Elasticity = 6 21 | [chains.roles] 22 | l1ProxyAdminOwner = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 23 | l2ProxyAdminOwner = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 24 | systemConfigOwner = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 25 | unsafeBlockSigner = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 26 | batcher = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 27 | proposer = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 28 | challenger = "0xafF0CA253b97e54440965855cec0A8a2E2399896" 29 | -------------------------------------------------------------------------------- /playground/utils/query.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Try curl first 4 | if curl -s -w "\n%{http_code}" "$1" >/dev/null 2>&1; then 5 | curl -s "$1" 6 | exit 0 7 | fi 8 | 9 | # If curl fails, try wget 10 | if wget -qO- "$1" >/dev/null 2>&1; then 11 | wget -qO- "$1" 12 | exit 0 13 | fi 14 | 15 | # No client found, try to install curl 16 | if [ -f "/etc/alpine-release" ]; then 17 | apk add --no-cache curl >/dev/null 2>&1 || exit 1 18 | elif [ -f "/etc/debian_version" ]; then 19 | apt-get update >/dev/null 2>&1 && apt-get install -y curl >/dev/null 2>&1 || exit 1 20 | else 21 | echo "No package manager found and no HTTP client available" >&2 22 | exit 1 23 | fi 24 | 25 | # Try curl again after installation 26 | if curl -s "$1"; then 27 | exit 0 28 | else 29 | echo "Failed to make request even after installing curl" >&2 30 | exit 1 31 | fi 32 | -------------------------------------------------------------------------------- /playground/utils/rollup.json: -------------------------------------------------------------------------------- 1 | { 2 | "genesis": { 3 | "l1": { 4 | "hash": "0x357882d73a3e8075ae1c28ea05d750b1454fb97516e5466e2900981602052393", 5 | "number": 0 6 | }, 7 | "l2": { 8 | "hash": "0x69b450902eab4d2bd20651e4f152c78921dc645cdfe11cfd374447856b152711", 9 | "number": 0 10 | }, 11 | "l2_time": 1741345873, 12 | "system_config": { 13 | "batcherAddr": "0xaff0ca253b97e54440965855cec0a8a2e2399896", 14 | "overhead": "0x0000000000000000000000000000000000000000000000000000000000000000", 15 | "scalar": "0x010000000000000000000000000000000000000000000000000c5fc500000558", 16 | "gasLimit": 60000000, 17 | "eip1559Params": "0x0000000000000000" 18 | } 19 | }, 20 | "block_time": 2, 21 | "max_sequencer_drift": 600, 22 | "seq_window_size": 3600, 23 | "channel_timeout": 300, 24 | "l1_chain_id": 1337, 25 | "l2_chain_id": 13, 26 | "regolith_time": 0, 27 | "canyon_time": 0, 28 | "delta_time": 0, 29 | "ecotone_time": 0, 30 | "fjord_time": 0, 31 | "granite_time": 0, 32 | "holocene_time": 0, 33 | "batch_inbox_address": "0x00d7b6990105719101dabeb77144f2a3385c8033", 34 | "deposit_contract_address": "0xc4d2986564c1907750c968ec5c973c497db78f42", 35 | "l1_system_config_address": "0x12d727cc0eebcc43410d4b39ed67749254c57301", 36 | "protocol_versions_address": "0x9f80021446ff81cdd41208eef8cbbb312148a4d6" 37 | } -------------------------------------------------------------------------------- /playground/watchdog.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | ) 8 | 9 | type ServiceWatchdog interface { 10 | Watchdog(out io.Writer, instance *instance, ctx context.Context) error 11 | } 12 | 13 | func RunWatchdog(out *output, instances []*instance) error { 14 | watchdogErr := make(chan error, len(instances)) 15 | 16 | output, err := out.LogOutput("watchdog") 17 | if err != nil { 18 | return fmt.Errorf("failed to create log output: %w", err) 19 | } 20 | 21 | for _, s := range instances { 22 | if watchdogFn, ok := s.component.(ServiceWatchdog); ok { 23 | go func() { 24 | if err := watchdogFn.Watchdog(output, s, context.Background()); err != nil { 25 | watchdogErr <- fmt.Errorf("service %s watchdog failed: %w", s.service.Name, err) 26 | } 27 | }() 28 | } 29 | } 30 | 31 | // If any of the watchdogs fail, we return the error 32 | if err := <-watchdogErr; err != nil { 33 | return fmt.Errorf("failed to run watchdog: %w", err) 34 | } 35 | return nil 36 | } 37 | 38 | func CompleteReady(instances []*instance) error { 39 | for _, s := range instances { 40 | if readyFn, ok := s.component.(ServiceReady); ok { 41 | if err := readyFn.Ready(s); err != nil { 42 | return err 43 | } 44 | } 45 | } 46 | return nil 47 | } 48 | -------------------------------------------------------------------------------- /playground/watchers.go: -------------------------------------------------------------------------------- 1 | package playground 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "time" 10 | 11 | "github.com/ethereum/go-ethereum/ethclient" 12 | "github.com/ethereum/go-ethereum/rpc" 13 | "github.com/flashbots/mev-boost-relay/beaconclient" 14 | mevRCommon "github.com/flashbots/mev-boost-relay/common" 15 | ) 16 | 17 | func waitForChainAlive(ctx context.Context, logOutput io.Writer, beaconNodeURL string, timeout time.Duration) error { 18 | // Test that blocks are being produced 19 | log := mevRCommon.LogSetup(false, "info").WithField("context", "waitForChainAlive") 20 | log.Logger.Out = logOutput 21 | 22 | clt := beaconclient.NewProdBeaconInstance(log, beaconNodeURL, beaconNodeURL) 23 | 24 | // Subscribe to head events right away even if the connection has not been established yet 25 | // That is handled internally in the function already. 26 | // Otherwise, if we connect only when the first head slot happens we might miss some initial slots. 27 | ch := make(chan beaconclient.PayloadAttributesEvent) 28 | go clt.SubscribeToPayloadAttributesEvents(ch) 29 | 30 | { 31 | // If the chain has not started yet, wait for it to start. 32 | // Otherwise, the subscription will not return any data. 33 | bClient := beaconclient.NewMultiBeaconClient(log, []beaconclient.IBeaconInstance{ 34 | clt, 35 | }) 36 | 37 | isReady := func() bool { 38 | sync, err := bClient.BestSyncStatus() 39 | if err != nil { 40 | return false 41 | } 42 | return sync.HeadSlot >= 1 43 | } 44 | 45 | if !isReady() { 46 | syncTimeoutCh := time.After(timeout) 47 | for { 48 | if isReady() { 49 | break 50 | } 51 | select { 52 | case <-syncTimeoutCh: 53 | return fmt.Errorf("beacon client failed to start") 54 | case <-ctx.Done(): 55 | return fmt.Errorf("timeout waiting for chain to start") 56 | default: 57 | time.Sleep(1 * time.Second) 58 | } 59 | } 60 | } 61 | } 62 | 63 | return nil 64 | } 65 | 66 | // validateProposerPayloads validates that payload attribute events are being broadcasted by the beacon node 67 | // in the correct order without any missing slots. 68 | func validateProposerPayloads(logOutput io.Writer, beaconNodeURL string) error { 69 | // Test that blocks are being produced 70 | log := mevRCommon.LogSetup(false, "info").WithField("context", "validateProposerPayloads") 71 | log.Logger.Out = logOutput 72 | 73 | clt := beaconclient.NewProdBeaconInstance(log, beaconNodeURL, beaconNodeURL) 74 | 75 | // We run this after 'waitForChainAlive' to ensure that the beacon node is ready to receive payloads. 76 | ch := make(chan beaconclient.PayloadAttributesEvent) 77 | go clt.SubscribeToPayloadAttributesEvents(ch) 78 | 79 | log.Infof("Chain is alive. Subscribing to head events") 80 | 81 | var lastSlot uint64 82 | for { 83 | select { 84 | case head := <-ch: 85 | log.Infof("Slot: %d Parent block number: %d", head.Data.ProposalSlot, head.Data.ParentBlockNumber) 86 | 87 | // If we are being notified of a new slot, validate that the slots are contiguous 88 | // Note that lighthouse might send multiple updates for the same slot. 89 | if lastSlot != 0 && lastSlot != head.Data.ProposalSlot && lastSlot+1 != head.Data.ProposalSlot { 90 | return fmt.Errorf("slot mismatch, expected %d, got %d", lastSlot+1, head.Data.ProposalSlot) 91 | } 92 | // if the network did not miss any initial slots, lighthouse will send payload attribute updates 93 | // of the form: (slot = slot, parent block number = slot - 2), (slot, slot - 1). 94 | // The -2 is in case we want to handle reorgs in the chain. 95 | // We need to validate that at least the difference between the parent block number and the slot is 2. 96 | if head.Data.ProposalSlot-head.Data.ParentBlockNumber > 2 { 97 | return fmt.Errorf("parent block too big %d", head.Data.ParentBlockNumber) 98 | } 99 | 100 | lastSlot = head.Data.ProposalSlot 101 | case <-time.After(20 * time.Second): 102 | return fmt.Errorf("timeout waiting for block") 103 | } 104 | } 105 | } 106 | 107 | func watchProposerPayloads(beaconNodeURL string) error { 108 | getProposerPayloadDelivered := func() ([]*mevRCommon.BidTraceV2JSON, error) { 109 | resp, err := http.Get(fmt.Sprintf("%s/relay/v1/data/bidtraces/proposer_payload_delivered", beaconNodeURL)) 110 | if err != nil { 111 | return nil, err 112 | } 113 | defer resp.Body.Close() 114 | 115 | data, err := io.ReadAll(resp.Body) 116 | if err != nil { 117 | return nil, err 118 | } 119 | 120 | var payloadDeliveredList []*mevRCommon.BidTraceV2JSON 121 | if err := json.Unmarshal(data, &payloadDeliveredList); err != nil { 122 | return nil, err 123 | } 124 | return payloadDeliveredList, nil 125 | } 126 | 127 | // Wait for at least 10 seconds for Mev-boost to start 128 | timerC := time.After(10 * time.Second) 129 | LOOP: 130 | for { 131 | select { 132 | case <-timerC: 133 | break 134 | case <-time.After(2 * time.Second): 135 | if _, err := getProposerPayloadDelivered(); err == nil { 136 | break LOOP 137 | } 138 | } 139 | } 140 | 141 | // This is not the most efficient solution since we are querying the endpoint for the full list of payloads 142 | // every 2 seconds. It should be fine for the kind of workloads expected to run. 143 | 144 | lastSlot := uint64(0) 145 | 146 | for { 147 | time.Sleep(2 * time.Second) 148 | 149 | vals, err := getProposerPayloadDelivered() 150 | if err != nil { 151 | fmt.Println("Error getting proposer payloads:", err) 152 | continue 153 | } 154 | 155 | for _, val := range vals { 156 | if val.Slot <= lastSlot { 157 | continue 158 | } 159 | 160 | fmt.Printf("Block Proposed: Slot: %d, Builder: %s, Block: %d\n", val.Slot, val.BuilderPubkey, val.BlockNumber) 161 | lastSlot = val.Slot 162 | } 163 | } 164 | } 165 | 166 | // watchChainHead watches the chain head and ensures that it is advancing 167 | func watchChainHead(logOutput io.Writer, elURL string, blockTime time.Duration) error { 168 | log := mevRCommon.LogSetup(false, "info").WithField("context", "watchChainHead").WithField("el", elURL) 169 | log.Logger.Out = logOutput 170 | 171 | // add some wiggle room to block time 172 | blockTime = blockTime + 1*time.Second 173 | 174 | rpcClient, err := rpc.Dial(elURL) 175 | if err != nil { 176 | return err 177 | } 178 | 179 | var latestBlock *uint64 180 | clt := ethclient.NewClient(rpcClient) 181 | 182 | timeout := time.NewTimer(blockTime) 183 | defer timeout.Stop() 184 | 185 | for { 186 | select { 187 | case <-time.After(500 * time.Millisecond): 188 | num, err := clt.BlockNumber(context.Background()) 189 | if err != nil { 190 | return err 191 | } 192 | if latestBlock != nil && num <= *latestBlock { 193 | continue 194 | } 195 | log.Infof("Chain head: %d", num) 196 | latestBlock = &num 197 | 198 | // Reset timeout since we saw a new block 199 | if !timeout.Stop() { 200 | <-timeout.C 201 | } 202 | timeout.Reset(blockTime) 203 | 204 | case <-timeout.C: 205 | return fmt.Errorf("chain head for %s not advancing", elURL) 206 | } 207 | } 208 | } 209 | 210 | type watchGroup struct { 211 | errCh chan error 212 | } 213 | 214 | func newWatchGroup() *watchGroup { 215 | return &watchGroup{ 216 | errCh: make(chan error, 1), 217 | } 218 | } 219 | 220 | func (wg *watchGroup) watch(watch func() error) { 221 | go func() { 222 | wg.errCh <- watch() 223 | }() 224 | } 225 | 226 | func (wg *watchGroup) wait() error { 227 | return <-wg.errCh 228 | } 229 | -------------------------------------------------------------------------------- /scripts/ci-build-playground-utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker build -t docker.io/flashbots/playground-utils:latest -f Dockerfile . 4 | -------------------------------------------------------------------------------- /scripts/ci-copy-playground-logs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$#" -ne 2 ]; then 4 | echo "Usage: $0 source_folder destination_folder" 5 | exit 1 6 | fi 7 | 8 | source_dir="$1" 9 | dest_dir="$2" 10 | 11 | # Check if source directory exists 12 | if [ ! -d "$source_dir" ]; then 13 | echo "Error: Source directory '$source_dir' does not exist" 14 | exit 1 15 | fi 16 | 17 | # Create destination directory if it doesn't exist 18 | mkdir -p "$dest_dir" 19 | 20 | # First, copy everything 21 | cp -r "$source_dir"/* "$dest_dir"/ 2>/dev/null || true 22 | 23 | # Remove any data_* directories from the destination 24 | find "$dest_dir" -type d -name "data_*" -exec rm -rf {} + 25 | 26 | echo "Copied contents from '$source_dir' to '$dest_dir' (excluding data_* folders)" 27 | -------------------------------------------------------------------------------- /scripts/ci-setup-docker-compose.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Add Docker's official GPG key: 4 | sudo apt-get update 5 | sudo apt-get install ca-certificates curl 6 | sudo install -m 0755 -d /etc/apt/keyrings 7 | sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc 8 | sudo chmod a+r /etc/apt/keyrings/docker.asc 9 | 10 | # Add the repository to Apt sources: 11 | echo \ 12 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ 13 | $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ 14 | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 15 | sudo apt-get update 16 | 17 | docker version 18 | sudo apt-get update 19 | docker compose version 20 | --------------------------------------------------------------------------------