├── grafana ├── grafana.ini ├── dashboards.yml ├── datasource.yml ├── dashboards │ ├── logs_dashboard.json │ └── single_node_dashboard.json └── alerts │ └── alerts.yml ├── jwt └── jwt.hex ├── DVNode.png ├── nimbus ├── Dockerfile └── run.sh ├── .gitignore ├── .github ├── workflows │ ├── test.yml │ ├── label-issues.yml │ └── dispath-update.yml ├── renovate.json └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── tempo └── tempo.yaml ├── logging.yml ├── prometheus ├── run.sh └── prometheus.yml.example ├── promtail ├── run.sh └── config.yml.example ├── relay └── docker-compose.yml ├── loki └── loki.yml ├── commit-boost ├── config.toml.sample.holesky ├── config.toml.sample.hoodi └── config.toml.sample.mainnet ├── compose-debug.yml ├── lodestar └── run.sh ├── prysm └── run.sh ├── compose-mev.yml ├── docker-compose.override.yml.sample ├── compose-el.yml ├── compose-vc.yml ├── compose-cl.yml ├── docker-compose.yml ├── .env.sample.hoodi ├── .env.sample.holesky ├── .env.sample.mainnet └── README.md /grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | [auth.anonymous] 2 | enabled = true 3 | org_role = Admin -------------------------------------------------------------------------------- /jwt/jwt.hex: -------------------------------------------------------------------------------- 1 | 7074a5bf6bd6dae368fa598249d57edfcbccc67a1205b2c8d5d2fe7b800663aa -------------------------------------------------------------------------------- /DVNode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ObolNetwork/charon-distributed-validator-node/HEAD/DVNode.png -------------------------------------------------------------------------------- /grafana/dashboards.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: dashboards 5 | type: file 6 | updateIntervalSeconds: 30 7 | options: 8 | path: /etc/dashboards 9 | -------------------------------------------------------------------------------- /nimbus/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG VERSION 2 | 3 | FROM statusim/nimbus-eth2:${VERSION} AS nimbusbn 4 | 5 | FROM statusim/nimbus-validator-client:${VERSION} 6 | 7 | COPY --from=nimbusbn /home/user/nimbus_beacon_node /home/user/nimbus_beacon_node 8 | 9 | ENTRYPOINT ["/home/user/data/run.sh"] 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .env.charon.more 3 | docker-compose.override.yml 4 | charon-enr-private-key 5 | validator_keys/ 6 | keystore-* 7 | deposit-data.json 8 | cluster-definition.json 9 | cluster-lock.json 10 | .DS_Store 11 | data/ 12 | .idea 13 | .charon 14 | prometheus/prometheus.yml 15 | commit-boost/config.toml 16 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | on: 3 | pull_request: 4 | push: 5 | branches: 6 | - main 7 | jobs: 8 | create_containers: 9 | # Ensures default fresh checkout can create containers. 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v6 13 | - run: docker compose up --no-start 14 | - run: docker compose -f docker-compose.yml -f compose-debug.yml up --no-start 15 | -------------------------------------------------------------------------------- /tempo/tempo.yaml: -------------------------------------------------------------------------------- 1 | server: 2 | http_listen_port: 3200 3 | grpc_listen_port: 9095 4 | 5 | distributor: 6 | receivers: 7 | otlp: 8 | protocols: 9 | grpc: 10 | endpoint: 0.0.0.0:4317 11 | 12 | ingester: 13 | trace_idle_period: 10s 14 | max_block_bytes: 1_048_576 15 | max_block_duration: 5m 16 | 17 | compactor: 18 | compaction: 19 | block_retention: 24h 20 | 21 | storage: 22 | trace: 23 | backend: local 24 | local: 25 | path: /opt/tempo/traces 26 | -------------------------------------------------------------------------------- /logging.yml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | promtail: 4 | image: grafana/promtail:${PROMTAIL_VERSION:-2.8.2} 5 | environment: 6 | CHARON_LOKI_ADDRESSES: ${CHARON_LOKI_ADDRESSES} 7 | CLUSTER_NAME: ${CLUSTER_NAME} 8 | CLUSTER_PEER: ${CLUSTER_PEER} 9 | command: -config.file=/etc/promtail/config.yml 10 | volumes: 11 | - ./promtail:/etc/promtail 12 | - /var/run/docker.sock:/var/run/docker.sock 13 | networks: [dvnode] 14 | entrypoint: /etc/promtail/run.sh 15 | restart: unless-stopped 16 | 17 | networks: 18 | dvnode: 19 | -------------------------------------------------------------------------------- /prometheus/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -z "$SERVICE_OWNER" ] 4 | then 5 | echo "\$SERVICE_OWNER variable is empty" >&2 6 | exit 1 7 | fi 8 | 9 | if [ -z "$PROM_REMOTE_WRITE_TOKEN" ] 10 | then 11 | echo "\$PROM_REMOTE_WRITE_TOKEN variable is empty" >&2 12 | exit 1 13 | fi 14 | 15 | sed -e "s|\$PROM_REMOTE_WRITE_TOKEN|${PROM_REMOTE_WRITE_TOKEN}|g" \ 16 | -e "s|\$SERVICE_OWNER|${SERVICE_OWNER}|g" \ 17 | /etc/prometheus/prometheus.yml.example > /etc/prometheus/prometheus.yml 18 | 19 | /bin/prometheus \ 20 | --config.file=/etc/prometheus/prometheus.yml 21 | -------------------------------------------------------------------------------- /.github/workflows/label-issues.yml: -------------------------------------------------------------------------------- 1 | name: Label issues 2 | on: 3 | issues: 4 | types: 5 | - reopened 6 | - opened 7 | jobs: 8 | label_issues: 9 | runs-on: ubuntu-latest 10 | permissions: 11 | issues: write 12 | steps: 13 | - uses: actions/github-script@v8 14 | with: 15 | script: | 16 | github.rest.issues.addLabels({ 17 | issue_number: context.issue.number, 18 | owner: context.repo.owner, 19 | repo: context.repo.repo, 20 | labels: ["protocol"] 21 | }) 22 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ], 6 | "enabledManagers": [ 7 | "github-actions" 8 | ], 9 | "packageRules": [ 10 | { 11 | "matchManagers": [ 12 | "github-actions" 13 | ], 14 | "matchDepTypes": [ 15 | "github-actions" 16 | ], 17 | "matchFileNames": [ 18 | ".github/workflows/**" 19 | ], 20 | "schedule": [ 21 | "every weekend" 22 | ], 23 | "labels": [ 24 | "renovate/github-actions" 25 | ], 26 | "groupName": "GitHub Actions updates" 27 | } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /promtail/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -z "$CHARON_LOKI_ADDRESSES" ]; then 4 | echo "Error: \$CHARON_LOKI_ADDRESSES variable is empty" >&2 5 | exit 1 6 | fi 7 | 8 | if [ -z "$CLUSTER_NAME" ]; then 9 | echo "Error: \$CLUSTER_NAME variable is empty" >&2 10 | exit 1 11 | fi 12 | 13 | if [ -z "$CLUSTER_PEER" ]; then 14 | echo "Error: \$CLUSTER_PEER variable is empty" >&2 15 | exit 1 16 | fi 17 | 18 | # Process the template file once 19 | sed -e "s|\$CHARON_LOKI_ADDRESSES|${CHARON_LOKI_ADDRESSES}|g" \ 20 | -e "s|\$CLUSTER_NAME|${CLUSTER_NAME}|g" \ 21 | -e "s|\$CLUSTER_PEER|${CLUSTER_PEER}|g" \ 22 | /etc/promtail/config.yml.example > /etc/promtail/config.yml 23 | 24 | # Start Promtail with the generated config 25 | /usr/bin/promtail \ 26 | -config.file=/etc/promtail/config.yml 27 | -------------------------------------------------------------------------------- /relay/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # _ _ _ 3 | # _ _ ___ | | __ _ | || | 4 | # | '_|/ -_)| |/ _` | \_. | 5 | # |_| \___||_|\__/_| |__/ 6 | # 7 | relay: 8 | # Pegged charon version (update this for each release). 9 | image: obolnetwork/charon:${CHARON_VERSION:-v1.8.0} 10 | environment: 11 | CHARON_P2P_TCP_ADDRESS: 0.0.0.0:3610 12 | CHARON_HTTP_ADDRESS: 0.0.0.0:3640 13 | CHARON_LOG_LEVEL: debug 14 | CHARON_P2P_EXTERNAL_HOSTNAME: replace.with.public.ip.or.hostname 15 | ports: 16 | - 3610:3610/tcp 17 | - 3640:3640/tcp 18 | command: relay 19 | volumes: 20 | - .charon:/opt/charon/.charon # Relay charon-enr-private-key generated and persisted across restarts in this folder 21 | restart: on-failure 22 | -------------------------------------------------------------------------------- /grafana/datasource.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | deleteDatasources: 4 | - name: Prometheus 5 | orgId: 1 6 | - name: Loki 7 | orgId: 1 8 | 9 | 10 | datasources: 11 | - name: Prometheus 12 | type: prometheus 13 | uid: prometheus 14 | orgId: 1 15 | url: http://prometheus:9090 16 | basicAuth: false 17 | isDefault: true 18 | jsonData: 19 | graphiteVersion: "1.1" 20 | tlsAuth: false 21 | tlsAuthWithCACert: false 22 | version: 1 23 | editable: true 24 | 25 | - name: Loki 26 | type: loki 27 | uid: loki 28 | orgId: 1 29 | url: http://loki:3100 30 | basicAuth: false 31 | isDefault: false 32 | version: 1 33 | editable: true 34 | 35 | - name: Tempo 36 | type: tempo 37 | uid: tempo 38 | orgId: 1 39 | url: http://tempo:3200 40 | basicAuth: false 41 | isDefault: false 42 | version: 1 43 | editable: true -------------------------------------------------------------------------------- /loki/loki.yml: -------------------------------------------------------------------------------- 1 | auth_enabled: false 2 | 3 | server: 4 | http_listen_port: 3100 5 | 6 | ingester: 7 | lifecycler: 8 | address: 127.0.0.1 9 | ring: 10 | kvstore: 11 | store: inmemory 12 | replication_factor: 1 13 | final_sleep: 0s 14 | wal: 15 | enabled: false 16 | chunk_idle_period: 5m 17 | chunk_retain_period: 30s 18 | max_transfer_retries: 0 19 | 20 | schema_config: 21 | configs: 22 | - from: 2022-01-01 23 | store: boltdb 24 | object_store: filesystem 25 | schema: v11 26 | index: 27 | prefix: index_ 28 | period: 168h 29 | 30 | storage_config: 31 | boltdb: 32 | directory: /opt/loki/index 33 | 34 | filesystem: 35 | directory: /opt/loki/chunks 36 | 37 | limits_config: 38 | enforce_metric_name: false 39 | reject_old_samples: true 40 | reject_old_samples_max_age: 168h 41 | 42 | chunk_store_config: 43 | max_look_back_period: 0s 44 | 45 | table_manager: 46 | retention_deletes_enabled: false 47 | retention_period: 0s 48 | -------------------------------------------------------------------------------- /commit-boost/config.toml.sample.holesky: -------------------------------------------------------------------------------- 1 | chain = "Holesky" 2 | 3 | [pbs] 4 | port = 18550 5 | host = "0.0.0.0" 6 | #timeout_get_header_ms 7 | #timeout_get_payload_ms 8 | #timeout_register_validator_ms 9 | 10 | [metrics] 11 | enabled = true 12 | host = "0.0.0.0" 13 | 14 | [[relays]] 15 | id = "boost-relay-holesky.flashbots.net" 16 | url = "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net" 17 | 18 | [[relays]] 19 | id = "holesky.titanrelay.xyz" 20 | url = "https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz" 21 | 22 | [[relays]] 23 | id = "relay-stag.ultrasound.money" 24 | url = "https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money" 25 | 26 | [[relays]] 27 | id = "holesky.aestus.live" 28 | url = "https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live" -------------------------------------------------------------------------------- /prometheus/prometheus.yml.example: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 30s # Set the scrape interval to every 30 seconds. 3 | evaluation_interval: 30s # Evaluate rules every 30 seconds. 4 | external_labels: 5 | service_owner: $SERVICE_OWNER # replace this with your Operator name you want to be identified by, it helps us route alerts and metrics to your notification channels easily 6 | 7 | remote_write: 8 | - url: https://vm.monitoring.gcp.obol.tech/write 9 | authorization: 10 | credentials: $PROM_REMOTE_WRITE_TOKEN 11 | write_relabel_configs: 12 | - source_labels: [job] 13 | regex: "charon" 14 | action: keep # Keeps charon metrics and drop metrics from other containers. 15 | 16 | scrape_configs: 17 | - job_name: "nethermind" 18 | static_configs: 19 | - targets: ["nethermind:8008"] 20 | - job_name: "lighthouse" 21 | static_configs: 22 | - targets: ["lighthouse:5054"] 23 | - job_name: "charon" 24 | static_configs: 25 | - targets: ["charon:3620"] 26 | - job_name: "lodestar" 27 | static_configs: 28 | - targets: ["lodestar:5064"] 29 | # Debug 30 | - job_name: "node-exporter" 31 | static_configs: 32 | - targets: ["node-exporter:9100"] 33 | - job_name: "cadvisor" 34 | static_configs: 35 | - targets: ["cadvisor:8080"] 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F680 Feature or Improvement Ticket" 3 | about: Create a new feature or suggest an improvement 4 | labels: Enhancement 5 | --- 6 | 7 | # 🎯 Problem to be solved 8 | 9 | 10 | 11 | # 🧪 Tests 12 | 13 | 14 | 15 | - [ ] Works in local docker-compose 16 | - [ ] Has a attested on a testnet at least once 17 | 18 | # 👐 Additional acceptance criteria 19 | 20 | 21 | 22 | # ❌ Out of Scope 23 | 24 | 25 | 26 | 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F41E Bug report" 3 | about: Report a bug or problem with running this repo 4 | labels: Bug 5 | --- 6 | 14 | 15 | # 🐞 Bug Report 16 | 17 | ### Description 18 | 19 | A clear and concise description of the problem... 20 | 21 | ## 🔬 Minimal Reproduction 22 | 23 | 26 | 27 | ## 🔥 Error 28 | 29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 | ## 🌍 Your Environment
37 |
38 | **Operating System:**
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 | **What version of Charon are you running? (Which release)**
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 | **Anything else relevant (validator index / public key)?**
55 |
56 |
--------------------------------------------------------------------------------
/.github/workflows/dispath-update.yml:
--------------------------------------------------------------------------------
1 | name: Dispatch Update Version
2 |
3 | on:
4 | repository_dispatch:
5 | types: [update-version]
6 |
7 | jobs:
8 | update-version:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - name: Checkout this repository
12 | uses: actions/checkout@v6
13 |
14 | - name: Extract tag name
15 | run: echo "TAG_NAME=${{ github.event.client_payload.tag }}" >> $GITHUB_ENV
16 |
17 | - name: Update version in Ansible configuration
18 | run: |
19 | TAG_NAME="$(echo "${TAG_NAME}" | sed 's/^v//')"
20 |
21 | sed -i -E 's|(image: obolnetwork/charon:\$\{CHARON_VERSION:-)v\.?[0-9]+\.[0-9]+\.[0-9]+[a-zA-Z0-9\-]*}|\1v'"${TAG_NAME}"'}|' docker-compose.yml
22 | sed -i -E 's|(image: obolnetwork/charon:\$\{CHARON_VERSION:-)v\.?[0-9]+\.[0-9]+\.[0-9]+[a-zA-Z0-9\-]*}|\1v'"${TAG_NAME}"'}|' relay/docker-compose.yml
23 |
24 | - name: Create Pull Request
25 | uses: peter-evans/create-pull-request@v8
26 | with:
27 | token: ${{ secrets.GITHUB_TOKEN }}
28 | base: main
29 | branch: update-version-${{ env.TAG_NAME }}
30 | title: "Update version to ${{ env.TAG_NAME }}"
31 | body: "Automatically generated PR to update version to ${{ env.TAG_NAME }}"
32 | commit-message: "Update version to ${{ env.TAG_NAME }}"
33 | author-name: "obol-platform"
34 | author-email: "platform@obol.tech"
35 |
--------------------------------------------------------------------------------
/commit-boost/config.toml.sample.hoodi:
--------------------------------------------------------------------------------
1 | chain = "Hoodi"
2 |
3 | [pbs]
4 | port = 18550
5 | host = "0.0.0.0"
6 | #timeout_get_header_ms
7 | #timeout_get_payload_ms
8 | #timeout_register_validator_ms
9 |
10 | [metrics]
11 | enabled = true
12 | host = "0.0.0.0"
13 |
14 | [[relays]]
15 | id = "hoodi.aestus.live"
16 | url = "https://0x98f0ef62f00780cf8eb06701a7d22725b9437d4768bb19b363e882ae87129945ec206ec2dc16933f31d983f8225772b6@hoodi.aestus.live"
17 |
18 | [[relays]]
19 | id = "hoodi.titanrelay.xyz"
20 | url = "https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@hoodi.titanrelay.xyz"
21 |
22 | [[relays]]
23 | id = "relay-hoodi.ultrasound.money"
24 | url = "https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-hoodi.ultrasound.money"
25 |
26 | [[relays]]
27 | id = "boost-relay-hoodi.flashbots.net"
28 | url = "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-hoodi.flashbots.net"
29 |
30 | [[relays]]
31 | id = "hoodi-builder-proxy-alpha.interstate.so"
32 | url = "https://0x9110847c15a7f5c80a9fdd5db989a614cc01104e53bd8c252b6f46a4842c7fdef6b9593336035b5094878deff386804c@hoodi-builder-proxy-alpha.interstate.so"
33 |
34 | [[relays]]
35 | id = "hoodi-relay.ethgas.com"
36 | url = "https://0xb20c3fe59db9c3655088839ef3d972878d182eb745afd8abb1dd2abf6c14f93cd5934ed4446a5fe1ba039e2bc0cf1011@hoodi-relay.ethgas.com"
--------------------------------------------------------------------------------
/nimbus/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Cleanup nimbus directories if they already exist.
4 | rm -rf /home/user/data
5 |
6 | # Refer: https://nimbus.guide/keys.html
7 | # Running a nimbus VC involves two steps which need to run in order:
8 | # 1. Importing the validator keys
9 | # 2. And then actually running the VC
10 | tmpkeys="/home/validator_keys/tmpkeys"
11 | mkdir -p ${tmpkeys}
12 |
13 | for f in /home/validator_keys/keystore-*.json; do
14 | echo "Importing key ${f}"
15 |
16 | # Read password from keystore-*.txt into $password variable.
17 | password=$(<"${f//json/txt}")
18 |
19 | # Copy keystore file to tmpkeys/ directory.
20 | cp "${f}" "${tmpkeys}"
21 |
22 | # Import keystore with the password.
23 | echo "$password" |
24 | /home/user/nimbus_beacon_node deposits import \
25 | --data-dir=/home/user/data \
26 | /home/validator_keys/tmpkeys
27 |
28 | # Delete tmpkeys/keystore-*.json file that was copied before.
29 | filename="$(basename ${f})"
30 | rm "${tmpkeys}/${filename}"
31 | done
32 |
33 | # Delete the tmpkeys/ directory since it's no longer needed.
34 | rm -r ${tmpkeys}
35 |
36 | echo "Imported all keys"
37 |
38 | # Now run nimbus VC
39 | exec /home/user/nimbus_validator_client \
40 | --data-dir=/home/user/data \
41 | --beacon-node="${BEACON_NODE_ADDRESS}" \
42 | --doppelganger-detection=false \
43 | --metrics \
44 | --metrics-address=0.0.0.0 \
45 | --payload-builder=${BUILDER_API_ENABLED} \
46 | --distributed
47 |
--------------------------------------------------------------------------------
/promtail/config.yml.example:
--------------------------------------------------------------------------------
1 | server:
2 | http_listen_port: 9080
3 | grpc_listen_port: 0
4 |
5 | positions:
6 | filename: /tmp/positions.yaml
7 |
8 | clients:
9 | - url: $CHARON_LOKI_ADDRESSES
10 |
11 | scrape_configs:
12 | - job_name: docker
13 | docker_sd_configs:
14 | - host: unix:///var/run/docker.sock
15 | relabel_configs:
16 | - source_labels:
17 | [__meta_docker_container_label_promtail_monitored]
18 | regex: "true"
19 | action: keep
20 | - source_labels: ['__meta_docker_container_name']
21 | regex: '/(.*)'
22 | replacement: '$1'
23 | target_label: 'container'
24 | - source_labels: ['container']
25 | regex: '.*charon.*'
26 | replacement: 'charon'
27 | target_label: 'job'
28 | - source_labels: ['container']
29 | regex: '.*nethermind.*'
30 | replacement: 'nethermind'
31 | target_label: 'job'
32 | - source_labels: ['container']
33 | regex: '.*lodestar.*'
34 | replacement: 'lodestar'
35 | target_label: 'job'
36 | - source_labels: ['container']
37 | regex: '.*lighthouse.*'
38 | replacement: 'lighthouse'
39 | target_label: 'job'
40 | - source_labels: ['container']
41 | regex: '.*mev-boost.*'
42 | replacement: 'mev-boost'
43 | target_label: 'job'
44 | - target_label: 'cluster_name'
45 | replacement: $CLUSTER_NAME
46 | - target_label: 'cluster_peer'
47 | replacement: $CLUSTER_PEER
48 | pipeline_stages:
49 | - docker: {}
50 |
--------------------------------------------------------------------------------
/commit-boost/config.toml.sample.mainnet:
--------------------------------------------------------------------------------
1 | chain = "Mainnet"
2 |
3 | [pbs]
4 | port = 18550
5 | host = "0.0.0.0"
6 | #timeout_get_header_ms
7 | #timeout_get_payload_ms
8 | #timeout_register_validator_ms
9 |
10 | [metrics]
11 | enabled = true
12 | host = "0.0.0.0"
13 |
14 | [[relays]]
15 | id = "aestus.live"
16 | url = "https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live"
17 |
18 | [[relays]]
19 | id = "agnostic-relay.net"
20 | url = "https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net"
21 |
22 | [[relays]]
23 | id = "bloxroute.max-profit.blxrbdn.com"
24 | url = "https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com"
25 |
26 | [[relays]]
27 | id = "bloxroute.regulated.blxrbdn.com"
28 | url = "https://0xb0b07cd0abef743db4260b0ed50619cf6ad4d82064cb4fbec9d3ec530f7c5e6793d9f286c4e082c0244ffb9f2658fe88@bloxroute.regulated.blxrbdn.com"
29 |
30 | [[relays]]
31 | id = "boost-relay.flashbots.net"
32 | url = "https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net"
33 |
34 | [[relays]]
35 | id = "relay.ultrasound.money"
36 | url = "https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money"
37 |
38 | [[relays]]
39 | id = "global.titanrelay.xyz"
40 | url = "https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz"
--------------------------------------------------------------------------------
/compose-debug.yml:
--------------------------------------------------------------------------------
1 | # Docker-compose file to aid developers in debugging. This is not required for "normal" users. See the "Docker power users" section of the README.md for more info.
2 |
3 | # Override any defaults specified by `${FOO:-bar}` in `.env` with `FOO=qux`.
4 | # See .env.sample "Debug Config" section
5 |
6 | services:
7 | cadvisor:
8 | image: gcr.io/cadvisor/cadvisor:${CADVISOR_VERSION:-v0.47.0}
9 | command: --raw_cgroup_prefix_whitelist=/docker/ --disable_metrics=hugetlb
10 | privileged: true
11 | volumes:
12 | - "/:/rootfs:ro"
13 | - "/var/run:/var/run:ro"
14 | - "/sys:/sys:ro"
15 | - "/var/lib/docker/:/var/lib/docker:ro"
16 | - "/dev/disk/:/dev/disk:ro"
17 | devices:
18 | - "/dev/kmsg:/dev/kmsg"
19 | restart: unless-stopped
20 |
21 | node-exporter:
22 | image: bitnamilegacy/node-exporter:${NODE_EXPORTER_VERSION:-1.6.0}
23 |
24 | tempo:
25 | image: grafana/tempo:${TEMPO_VERSION:-2.7.1}
26 | user: ":"
27 | command: -config.file=/etc/tempo/tempo.yaml
28 | volumes:
29 | - ./tempo:/etc/tempo
30 | - ./data/tempo:/opt/tempo
31 | restart: unless-stopped
32 |
33 | loki:
34 | image: grafana/loki:${LOKI_VERSION:-2.8.2}
35 | user: ":"
36 | command: -config.file=/etc/loki/loki.yml
37 | volumes:
38 | - ./loki/loki.yml:/etc/loki/loki.yml
39 | - ./data/loki:/opt/loki
40 | restart: unless-stopped
41 |
42 | charon:
43 | environment:
44 | - CHARON_OTLP_ADDRESS=tempo:4317
45 | - CHARON_OTLP_SERVICE_NAME=charon
46 | - CHARON_LOKI_ADDRESSES=${CHARON_LOKI_ADDRESSES:-http://loki:3100/loki/api/v1/push}
47 | - CHARON_LOKI_SERVICE=charon
48 |
49 | networks:
50 | default:
51 | name: ${CHARON_DOCKER_NETWORK:-charon-distributed-validator-node_dvnode}
52 | external: true
53 |
--------------------------------------------------------------------------------
/lodestar/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Remove the existing keystores to avoid keystore locking issues.
4 | rm -rf /opt/data/cache /opt/data/secrets /opt/data/keystores
5 |
6 | DATA_DIR="/opt/data"
7 | KEYSTORES_DIR="${DATA_DIR}/keystores"
8 | SECRETS_DIR="${DATA_DIR}/secrets"
9 |
10 | mkdir -p "${KEYSTORES_DIR}" "${SECRETS_DIR}"
11 |
12 | IMPORTED_COUNT=0
13 | EXISTING_COUNT=0
14 |
15 | for f in /home/charon/validator_keys/keystore-*.json; do
16 | echo "Importing key ${f}"
17 |
18 | # Extract pubkey from keystore file
19 | PUBKEY="0x$(grep '"pubkey"' "$f" | awk -F'"' '{print $4}')"
20 |
21 | PUBKEY_DIR="${KEYSTORES_DIR}/${PUBKEY}"
22 |
23 | # Skip import if keystore already exists
24 | if [ -d "${PUBKEY_DIR}" ]; then
25 | EXISTING_COUNT=$((EXISTING_COUNT + 1))
26 | continue
27 | fi
28 |
29 | mkdir -p "${PUBKEY_DIR}"
30 |
31 | # Copy the keystore file to persisted keys backend
32 | install -m 600 "$f" "${PUBKEY_DIR}/voting-keystore.json"
33 |
34 | # Copy the corresponding password file
35 | PASSWORD_FILE="${f%.json}.txt"
36 | install -m 600 "${PASSWORD_FILE}" "${SECRETS_DIR}/${PUBKEY}"
37 |
38 | IMPORTED_COUNT=$((IMPORTED_COUNT + 1))
39 | done
40 |
41 | echo "Processed all keys imported=${IMPORTED_COUNT}, existing=${EXISTING_COUNT}, total=$(ls /home/charon/validator_keys/keystore-*.json | wc -l)"
42 |
43 | exec node /usr/app/packages/cli/bin/lodestar validator \
44 | --dataDir="$DATA_DIR" \
45 | --keystoresDir="$KEYSTORES_DIR" \
46 | --secretsDir="$SECRETS_DIR" \
47 | --network="$NETWORK" \
48 | --metrics=true \
49 | --metrics.address="0.0.0.0" \
50 | --metrics.port=5064 \
51 | --beaconNodes="$BEACON_NODE_ADDRESS" \
52 | --builder="$BUILDER_API_ENABLED" \
53 | --builder.selection="$BUILDER_SELECTION" \
54 | --distributed
55 |
--------------------------------------------------------------------------------
/prysm/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | WALLET_DIR="/prysm-wallet"
4 |
5 | # Cleanup wallet directories if already exists.
6 | rm -rf $WALLET_DIR
7 | mkdir $WALLET_DIR
8 |
9 | # Refer: https://prysm.offchainlabs.com/docs/install-prysm/install-with-script/#step-5-run-a-validator-using-prysm
10 | # Running a prysm VC involves two steps which need to run in order:
11 | # 1. Import validator keys in a prysm wallet account.
12 | # 2. Run the validator client.
13 | WALLET_PASSWORD="prysm-validator-secret"
14 | echo $WALLET_PASSWORD > /wallet-password.txt
15 | /app/cmd/validator/validator wallet create --accept-terms-of-use --wallet-password-file=wallet-password.txt --keymanager-kind=direct --wallet-dir="$WALLET_DIR"
16 |
17 | tmpkeys="/home/validator_keys/tmpkeys"
18 | mkdir -p ${tmpkeys}
19 |
20 | for f in /home/charon/validator_keys/keystore-*.json; do
21 | echo "Importing key ${f}"
22 |
23 | # Copy keystore file to tmpkeys/ directory.
24 | cp "${f}" "${tmpkeys}"
25 |
26 | # Import keystore with password.
27 | /app/cmd/validator/validator accounts import \
28 | --accept-terms-of-use=true \
29 | --wallet-dir="$WALLET_DIR" \
30 | --keys-dir="${tmpkeys}" \
31 | --account-password-file="${f//json/txt}" \
32 | --wallet-password-file=wallet-password.txt
33 |
34 | # Delete tmpkeys/keystore-*.json file that was copied before.
35 | filename="$(basename ${f})"
36 | rm "${tmpkeys}/${filename}"
37 | done
38 |
39 | # Delete the tmpkeys/ directory since it's no longer needed.
40 | rm -r ${tmpkeys}
41 |
42 | echo "Imported all keys"
43 |
44 | # Now run prysm VC
45 | /app/cmd/validator/validator --wallet-dir="$WALLET_DIR" \
46 | --accept-terms-of-use=true \
47 | --datadir="/data/vc" \
48 | --wallet-password-file="/wallet-password.txt" \
49 | --enable-beacon-rest-api \
50 | --beacon-rest-api-provider="${BEACON_NODE_ADDRESS}" \
51 | --beacon-rpc-provider="${BEACON_NODE_ADDRESS}" \
52 | --"${NETWORK}" \
53 | --distributed
54 |
--------------------------------------------------------------------------------
/compose-mev.yml:
--------------------------------------------------------------------------------
1 | # Override any defaults specified by `${FOO:-bar}` in `.env` with `FOO=qux`.
2 | # ${VARIABLE:-default} evaluates to default if VARIABLE is unset or empty in the environment.
3 | # ${VARIABLE-default} evaluates to default only if VARIABLE is unset in the environment.
4 |
5 | services:
6 | # _ _ _ _
7 | # ___ ___ _ __ ___ _ __ ___ (_) |_ | |__ ___ ___ ___| |_
8 | # / __/ _ \| '_ ` _ \| '_ ` _ \| | __|____| '_ \ / _ \ / _ \/ __| __|
9 | # | (_| (_) | | | | | | | | | | | | ||_____| |_) | (_) | (_) \__ \ |_
10 | # \___\___/|_| |_| |_|_| |_| |_|_|\__| |_.__/ \___/ \___/|___/\__|
11 |
12 | mev-commitboost:
13 | profiles: [mev-commitboost]
14 | image: ghcr.io/commit-boost/pbs:${MEV_COMMITBOOST_VERSION:-v0.9.2}
15 | environment:
16 | - CB_CONFIG=/etc/commit-boost/config.toml
17 | volumes:
18 | - ./commit-boost/config.toml:/etc/commit-boost/config.toml:ro
19 | labels:
20 | - "promtail-monitored=${MEV_COMMIT_BOOST_PROMTAIL_MONITORED:-true}"
21 | networks: [dvnode]
22 | restart: unless-stopped
23 |
24 | # _ _
25 | # _ __ ___ _____ __ | |__ ___ ___ ___| |_
26 | # | '_ ` _ \ / _ \ \ / /____| '_ \ / _ \ / _ \/ __| __|
27 | # | | | | | | __/\ V /_____| |_) | (_) | (_) \__ \ |_
28 | # |_| |_| |_|\___| \_/ |_.__/ \___/ \___/|___/\__|
29 |
30 | mev-mevboost:
31 | profiles: [mev-mevboost]
32 | image: flashbots/mev-boost:${MEV_MEVBOOST_VERSION:-1.10.1}
33 | command: |
34 | -${NETWORK}
35 | -loglevel=debug
36 | -addr=0.0.0.0:18550
37 | -relay-check
38 | -relays=${MEV_RELAYS}
39 | -request-timeout-getheader=${MEV_TIMEOUT_GETHEADER:-950}
40 | -request-timeout-getpayload=${MEV_TIMEOUT_GETPAYLOAD:-4000}
41 | -request-timeout-regval=${MEV_TIMEOUT_REGVAL:-3000}
42 | labels:
43 | - "promtail-monitored=${MEV_MEV_BOOST_PROMTAIL_MONITORED:-true}"
44 | networks: [dvnode]
45 | restart: unless-stopped
46 |
--------------------------------------------------------------------------------
/docker-compose.override.yml.sample:
--------------------------------------------------------------------------------
1 | # The "Multiple Compose File" feature provides a very powerful way to override
2 | # any configuration in docker-compose.yml without needing to modify
3 | # git-checked-in files since that results in conflicts when upgrading this repo.
4 | # See https://docs.docker.com/compose/extends/#multiple-compose-files for more.
5 |
6 | # Just copy this file to `docker-compose.override.yml` and customise it to your liking.
7 | # `cp docker-compose.override.yml.sample docker-compose.override.yml`
8 |
9 | # Some example overrides are commented out below. Any uncommented section
10 | # below will automatically override the same section in
11 | # docker-compose.yml when ran with `docker-compose up`.
12 | # See https://docs.docker.com/compose/extends/#adding-and-overriding-configuration for details.
13 |
14 | # WARNING: This is for power users only and requires a deep understanding of Docker Compose
15 | # and how the local docker-compose.yml is configured.
16 |
17 | #services:
18 | #nethermind:
19 | # Disable nethermind
20 | #profiles: [disable]
21 | # Bind nethermind internal ports to host ports
22 | #ports:
23 | #- 8545:8545 # JSON-RPC
24 | #- 8551:8551 # AUTH-RPC
25 | #- 8008:8008 # Metrics
26 |
27 | #lighthouse:
28 | # Disable lighthouse
29 | #profiles: [disable]
30 | # Bind lighthouse internal ports to host ports
31 | #ports:
32 | #- 5052:5052 # HTTP
33 | #- 5054:5054 # Metrics
34 |
35 | #charon:
36 | # Configure any additional env var flags in .env.charon.more
37 | #env_file: [.env.charon.more]
38 | # Uncomment the extra_hosts section if you are trying to communicate with a CL running in a different docker network on the same machine
39 | #extra_hosts:
40 | #- "host.docker.internal:host-gateway"
41 | # Bind charon internal ports to host ports
42 | #ports:
43 | #- 3600:3600/tcp # Validator API
44 | #- 3620:3620/tcp # Monitoring
45 |
46 | #lodestar:
47 | # Disable lodestar
48 | #profiles: [disable]
49 | # Bind lodestar internal ports to host ports
50 | #ports:
51 | #- 5064:5064 # Metrics
52 |
53 | #prometheus:
54 | # Disable prometheus
55 | #profiles: [disable]
56 | # Bind prometheus internal ports to host ports
57 | #ports:
58 | #- 9090:9090 # Metrics
59 |
60 | #mev-boost:
61 | # Disable mev-boost
62 | #profiles: [disable]
63 | # Bind mev-boost internal ports to host ports
64 | #ports:
65 | #- 18550:18550 # Metrics
66 |
--------------------------------------------------------------------------------
/compose-el.yml:
--------------------------------------------------------------------------------
1 | # Override any defaults specified by `${FOO:-bar}` in `.env` with `FOO=qux`.
2 | # ${VARIABLE:-default} evaluates to default if VARIABLE is unset or empty in the environment.
3 | # ${VARIABLE-default} evaluates to default only if VARIABLE is unset in the environment.
4 |
5 | services:
6 | # _ _ _ _
7 | # _ __ ___| |_| |__ ___ _ __ _ __ ___ (_)_ __ __| |
8 | # | '_ \ / _ \ __| '_ \ / _ \ '__| '_ ` _ \| | '_ \ / _` |
9 | # | | | | __/ |_| | | | __/ | | | | | | | | | | | (_| |
10 | # |_| |_|\___|\__|_| |_|\___|_| |_| |_| |_|_|_| |_|\__,_|
11 |
12 | el-nethermind:
13 | profiles: [el-nethermind]
14 | image: nethermind/nethermind:${EL_NETHERMIND_VERSION:-1.35.2}
15 | restart: unless-stopped
16 | ports:
17 | - ${EL_PORT_P2P:-30303}:30303/tcp # P2P TCP
18 | - ${EL_PORT_P2P:-30303}:30303/udp # P2P UDP
19 | - ${EL_IP_HTTP:-127.0.0.1}:${EL_PORT_HTTP:-8545}:8545 # JSON-RPC
20 | - ${EL_IP_ENGINE:-127.0.0.1}:${EL_PORT_ENGINE:-8551}:8551 # ENGINE-API
21 | labels:
22 | - "promtail-monitored=${EL_NETHERMIND_PROMTAIL_MONITORED:-true}"
23 | command: |
24 | --config=${NETWORK}
25 | --data-dir=/nethermind/data
26 | --HealthChecks.Enabled=true
27 | --JsonRpc.Enabled=true
28 | --JsonRpc.JwtSecretFile="/root/jwt/jwt.hex"
29 | --JsonRpc.EngineHost=0.0.0.0
30 | --JsonRpc.EnginePort=8551
31 | --JsonRpc.Host=0.0.0.0
32 | --JsonRpc.Port=8545
33 | --Metrics.Enabled=true
34 | --Metrics.ExposePort=8008
35 | --Sync.SnapSync=true
36 | --History.Pruning=Rolling
37 | networks: [dvnode]
38 | volumes:
39 | - ./data/nethermind:/nethermind/data
40 | - ./jwt:/root/jwt
41 |
42 | # _ _
43 | # _ __ ___| |_| |__
44 | # | '__/ _ \ __| '_ \
45 | # | | | __/ |_| | | |
46 | # |_| \___|\__|_| |_|
47 |
48 | el-reth:
49 | profiles: [el-reth]
50 | image: ghcr.io/paradigmxyz/reth:${EL_RETH_VERSION:-v1.9.3}
51 | restart: unless-stopped
52 | ports:
53 | - ${EL_PORT_P2P:-30303}:30303/tcp # P2P TCP
54 | - ${EL_PORT_P2P:-30303}:30303/udp # P2P UDP
55 | - ${EL_IP_HTTP:-127.0.0.1}:${EL_PORT_HTTP:-8545}:8545 # JSON-RPC
56 | - ${EL_IP_ENGINE:-127.0.0.1}:${EL_PORT_ENGINE:-8551}:8551 # ENGINE-API
57 | labels:
58 | - "promtail-monitored=${EL_RETH_PROMTAIL_MONITORED:-true}"
59 | command: |
60 | node
61 | --full
62 | --chain=${NETWORK}
63 | --datadir=/reth/data
64 | --authrpc.jwtsecret="/root/jwt/jwt.hex"
65 | --authrpc.addr=0.0.0.0
66 | --authrpc.port=8551
67 | --http
68 | --http.addr=0.0.0.0
69 | --http.port=8545
70 | --metrics=0.0.0.0:8008
71 | networks: [dvnode]
72 | volumes:
73 | - ./data/reth:/reth/data
74 | - ./jwt:/root/jwt
75 |
--------------------------------------------------------------------------------
/compose-vc.yml:
--------------------------------------------------------------------------------
1 | # Override any defaults specified by `${FOO:-bar}` in `.env` with `FOO=qux`.
2 | # ${VARIABLE:-default} evaluates to default if VARIABLE is unset or empty in the environment.
3 | # ${VARIABLE-default} evaluates to default only if VARIABLE is unset in the environment.
4 |
5 | services:
6 | # _ _ _
7 | # | | ___ __| | ___ ___| |_ __ _ _ __
8 | # | |/ _ \ / _` |/ _ \/ __| __/ _` | '__|
9 | # | | (_) | (_| | __/\__ \ || (_| | |
10 | # |_|\___/ \__,_|\___||___/\__\__,_|_|
11 |
12 | vc-lodestar:
13 | profiles: [vc-lodestar]
14 | image: chainsafe/lodestar:${VC_LODESTAR_VERSION:-v1.38.0}
15 | depends_on: [charon]
16 | entrypoint: /opt/lodestar/run.sh
17 | networks: [dvnode]
18 | environment:
19 | BEACON_NODE_ADDRESS: http://charon:3600
20 | NETWORK: ${NETWORK}
21 | BUILDER_API_ENABLED: ${BUILDER_API_ENABLED:-true}
22 | BUILDER_SELECTION: ${VC_LODESTAR_BUILDER_SELECTION:-builderalways}
23 | labels:
24 | - "promtail-monitored=${VC_LODESTAR_PROMTAIL_MONITORED:-true}"
25 | volumes:
26 | - ./lodestar/run.sh:/opt/lodestar/run.sh
27 | - .charon/validator_keys:/home/charon/validator_keys
28 | - ./data/lodestar:/opt/data # Keep data in lodestar and not vc-lodestar for backwards compatibility
29 | restart: unless-stopped
30 |
31 | # _ _
32 | # _ __ (_)_ __ ___ | |__ _ _ ___
33 | # | '_ \| | '_ ` _ \| '_ \| | | / __|
34 | # | | | | | | | | | | |_) | |_| \__ \
35 | # |_| |_|_|_| |_| |_|_.__/ \__,_|___/
36 |
37 | vc-nimbus:
38 | profiles: [vc-nimbus]
39 | image: "lido-nimbus-validator-client:${VC_NIMBUS_VERSION:-multiarch-v25.11.1}"
40 | build:
41 | context: nimbus
42 | args:
43 | VERSION: ${VC_NIMBUS_VERSION:-multiarch-v25.11.1}
44 | depends_on: [charon]
45 | networks: [dvnode]
46 | environment:
47 | BEACON_NODE_ADDRESS: http://charon:3600
48 | BUILDER_API_ENABLED: ${BUILDER_API_ENABLED:-true}
49 | labels:
50 | - "promtail-monitored=${VC_NIMBUS_PROMTAIL_MONITORED:-true}"
51 | volumes:
52 | - ./nimbus/run.sh:/home/user/data/run.sh
53 | - .charon/validator_keys:/home/validator_keys
54 | - ./data/vc-nimbus:/home/user/data
55 | restart: unless-stopped
56 |
57 | # _ __ _ __ _ _ ___ _ __ ___
58 | # | '_ \| '__| | | / __| '_ ` _ \
59 | # | |_) | | | |_| \__ \ | | | | |
60 | # | .__/|_| \__, |___/_| |_| |_|
61 | # |_| |___/
62 |
63 | vc-prysm:
64 | profiles: [vc-prysm]
65 | image: offchainlabs/prysm-validator:${VC_PRYSM_VERSION:-v7.1.0}
66 | platform: "linux/amd64"
67 | depends_on: [charon]
68 | networks: [dvnode]
69 | entrypoint: /home/prysm/run.sh
70 | environment:
71 | BEACON_NODE_ADDRESS: http://charon:3600
72 | NETWORK: ${NETWORK}
73 | labels:
74 | - "promtail-monitored=${VC_PRYSM_PROMTAIL_MONITORED:-true}"
75 | volumes:
76 | - ./prysm/run.sh:/home/prysm/run.sh
77 | - ./data/vc-prysm:/data/vc
78 | - .charon/validator_keys:/home/charon/validator_keys
79 | restart: unless-stopped
80 |
81 | # _ _
82 | # | |_ ___| | ___ _
83 | # | __/ _ \ |/ / | | |
84 | # | || __/ <| |_| |
85 | # \__\___|_|\_\\__,_|
86 |
87 | vc-teku:
88 | profiles: [vc-teku]
89 | image: consensys/teku:${VC_TEKU_VERSION:-25.12.0}
90 | command: |
91 | validator-client
92 | --beacon-node-api-endpoint "http://charon:3600"
93 | --network="${NETWORK}"
94 | --data-base-path=/home/data
95 | --validator-keys="/opt/charon/validator_keys:/opt/charon/validator_keys"
96 | --validators-keystore-locking-enabled false
97 | --validators-external-signer-slashing-protection-enabled true
98 | --validators-builder-registration-default-enabled ${BUILDER_API_ENABLED:-true}
99 | --validators-proposer-default-fee-recipient "0x0000000000000000000000000000000000000000"
100 | --Xobol-dvt-integration-enabled true
101 | depends_on: [charon]
102 | networks: [dvnode]
103 | labels:
104 | - "promtail-monitored=${VC_TEKU_PROMTAIL_MONITORED:-true}"
105 | volumes:
106 | - .charon/validator_keys:/opt/charon/validator_keys
107 | - ./data/vc-teku:/home/data
108 | restart: unless-stopped
109 |
--------------------------------------------------------------------------------
/compose-cl.yml:
--------------------------------------------------------------------------------
1 | # Override any defaults specified by `${FOO:-bar}` in `.env` with `FOO=qux`.
2 | # ${VARIABLE:-default} evaluates to default if VARIABLE is unset or empty in the environment.
3 | # ${VARIABLE-default} evaluates to default only if VARIABLE is unset in the environment.
4 |
5 | services:
6 | # _ _
7 | # __ _ _ __ __ _ _ __ __| (_)_ __ ___
8 | # / _` | '__/ _` | '_ \ / _` | | '_ \ / _ \
9 | # | (_| | | | (_| | | | | (_| | | | | | __/
10 | # \__, |_| \__,_|_| |_|\__,_|_|_| |_|\___|
11 | # |___/
12 |
13 | cl-grandine:
14 | profiles: [cl-grandine]
15 | image: sifrai/grandine:${GRANDINE_VERSION:-2.0.1}
16 | restart: unless-stopped
17 | labels:
18 | - "promtail-monitored=${CL_GRANDINE_PROMTAIL_MONITORED:-true}"
19 | command:
20 | - --data-dir=/root/.grandine
21 | - --eth1-rpc-urls=http://${EL}:8551
22 | - --jwt-secret=/jwt/jwt.hex
23 | - --http-address=0.0.0.0
24 | - --http-port=5052
25 | - --network=${NETWORK}
26 | - --metrics
27 | - --metrics-port=5054
28 | - --metrics-address=0.0.0.0
29 | - --checkpoint-sync-url=${LIGHTHOUSE_CHECKPOINT_SYNC_URL}
30 | - --builder-url=http://${MEV}:18550
31 | - --max-empty-slots=4096
32 | ports:
33 | - ${CL_PORT_P2P:-9000}:9000 # P2P TCP+UDP
34 | volumes:
35 | - ./data/cl-grandine:/root/.grandine
36 | - ./jwt:/jwt:ro
37 | networks: [dvnode]
38 |
39 | # _ _ _ _ _
40 | # | (_) __ _| |__ | |_| |__ ___ _ _ ___ ___
41 | # | | |/ _` | '_ \| __| '_ \ / _ \| | | / __|/ _ \
42 | # | | | (_| | | | | |_| | | | (_) | |_| \__ \ __/
43 | # |_|_|\__, |_| |_|\__|_| |_|\___/ \__,_|___/\___|
44 | # |___/
45 |
46 | cl-lighthouse:
47 | profiles: [cl-lighthouse]
48 | image: sigp/lighthouse:${LIGHTHOUSE_VERSION:-v8.0.1}
49 | restart: unless-stopped
50 | labels:
51 | - "promtail-monitored=${CL_LIGHTHOUSE_PROMTAIL_MONITORED:-true}"
52 | command: |
53 | lighthouse bn
54 | --network=${NETWORK}
55 | --checkpoint-sync-url=${LIGHTHOUSE_CHECKPOINT_SYNC_URL}
56 | --checkpoint-sync-url-timeout=600
57 | --execution-endpoint=http://${EL}:8551
58 | --execution-jwt=/opt/jwt/jwt.hex
59 | --datadir=/opt/app/beacon/
60 | --builder=http://${MEV}:18550
61 | --http
62 | --http-address=0.0.0.0
63 | --http-port=5052
64 | --metrics
65 | --metrics-address=0.0.0.0
66 | --metrics-port=5054
67 | --metrics-allow-origin="*"
68 | ports:
69 | - ${CL_PORT_P2P:-9000}:9000 # P2P TCP+UDP
70 | volumes:
71 | - ./data/lighthouse:/opt/app/beacon # Keep data in lighthouse and not cl-lighthouse for backwards compatibility
72 | - ./jwt:/opt/jwt
73 | networks: [dvnode]
74 |
75 | # _ _
76 | # | |_ ___| | ___ _
77 | # | __/ _ \ |/ / | | |
78 | # | || __/ <| |_| |
79 | # \__\___|_|\_\\__,_|
80 |
81 | cl-teku:
82 | profiles: [cl-teku]
83 | image: consensys/teku:${VC_VERSION:-25.12.0}
84 | restart: unless-stopped
85 | labels:
86 | - "promtail-monitored=${CL_TEKU_PROMTAIL_MONITORED:-true}"
87 | command: |
88 | --network=${NETWORK}
89 | --checkpoint-sync-url=${LIGHTHOUSE_CHECKPOINT_SYNC_URL}
90 | --ee-endpoint=http://${EL}:8551
91 | --ee-jwt-secret-file=/jwt/jwt.hex
92 | --data-base-path=/opt/teku/data
93 | --builder-endpoint=http://${MEV}:18550
94 | --rest-api-enabled=true
95 | --rest-api-interface=0.0.0.0
96 | --rest-api-port=5052
97 | --rest-api-host-allowlist="*"
98 | --metrics-enabled=true
99 | --metrics-interface=0.0.0.0
100 | --metrics-port=5054
101 | --metrics-host-allowlist="*"
102 | volumes:
103 | - ./data/cl-teku:/opt/teku/data
104 | - ./jwt:/jwt:ro
105 | networks: [dvnode]
106 |
107 | # _ _ _
108 | # | | ___ __| | ___ ___| |_ __ _ _ __
109 | # | |/ _ \ / _` |/ _ \/ __| __/ _` | '__|
110 | # | | (_) | (_| | __/\__ \ || (_| | |
111 | # |_|\___/ \__,_|\___||___/\__\__,_|_|
112 |
113 | cl-lodestar:
114 | profiles: [cl-lodestar]
115 | image: chainsafe/lodestar:${VC_VERSION:-v1.38.0}
116 | restart: unless-stopped
117 | labels:
118 | - "promtail-monitored=${CL_LODESTAR_PROMTAIL_MONITORED:-true}"
119 | command: |
120 | beacon
121 | --network=${NETWORK}
122 | --checkpointSyncUrl=${LIGHTHOUSE_CHECKPOINT_SYNC_URL}
123 | --execution.urls=http://${EL}:8551
124 | --jwt-secret=/jwt/jwt.hex
125 | --dataDir=/opt/lodestar/data
126 | --builder
127 | --builder.url=http://${MEV}:18550
128 | --rest
129 | --rest.address=0.0.0.0
130 | --rest.port=5052
131 | --metrics
132 | --metrics.address=0.0.0.0
133 | --metrics.port=5054
134 | volumes:
135 | - ./data/cl-lodestar:/opt/lodestar/data
136 | - ./jwt:/jwt:ro
137 | networks: [dvnode]
138 |
--------------------------------------------------------------------------------
/grafana/dashboards/logs_dashboard.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": {
7 | "type": "grafana",
8 | "uid": "-- Grafana --"
9 | },
10 | "enable": true,
11 | "hide": true,
12 | "iconColor": "rgba(0, 211, 255, 1)",
13 | "name": "Annotations & Alerts",
14 | "target": {
15 | "limit": 100,
16 | "matchAny": false,
17 | "tags": [],
18 | "type": "dashboard"
19 | },
20 | "type": "dashboard"
21 | }
22 | ]
23 | },
24 | "editable": true,
25 | "fiscalYearStartMonth": 0,
26 | "graphTooltip": 0,
27 | "id": 3,
28 | "links": [
29 | {
30 | "asDropdown": false,
31 | "icon": "external link",
32 | "includeVars": false,
33 | "keepTime": false,
34 | "tags": [],
35 | "targetBlank": false,
36 | "title": "Explore Charon Logs",
37 | "tooltip": "Explore Charon Logs",
38 | "type": "link",
39 | "url": "/explore?orgId=1&left=%7B%22datasource%22:%22loki%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22expr%22:%22%7Bcompose_service%3D%5C%22charon%5C%22%7D%20%7C%20logfmt%20%7C%20line_format%20%60%7B%7B.level%7D%7D%5Ct%7B%7B.msg%7D%7D%60%22,%22queryType%22:%22range%22,%22editorMode%22:%22builder%22%7D%5D,%22range%22:%7B%22from%22:%22now-1h%22,%22to%22:%22now%22%7D%7D"
40 | }
41 | ],
42 | "liveNow": false,
43 | "panels": [
44 | {
45 | "datasource": {
46 | "type": "loki",
47 | "uid": "loki"
48 | },
49 | "description": "Top 10 count of errors and warning per minute grouped by message. ",
50 | "fieldConfig": {
51 | "defaults": {
52 | "color": {
53 | "mode": "palette-classic"
54 | },
55 | "custom": {
56 | "axisLabel": "",
57 | "axisPlacement": "auto",
58 | "barAlignment": 0,
59 | "drawStyle": "line",
60 | "fillOpacity": 0,
61 | "gradientMode": "none",
62 | "hideFrom": {
63 | "legend": false,
64 | "tooltip": false,
65 | "viz": false
66 | },
67 | "lineInterpolation": "linear",
68 | "lineWidth": 1,
69 | "pointSize": 5,
70 | "scaleDistribution": {
71 | "type": "linear"
72 | },
73 | "showPoints": "auto",
74 | "spanNulls": false,
75 | "stacking": {
76 | "group": "A",
77 | "mode": "none"
78 | },
79 | "thresholdsStyle": {
80 | "mode": "off"
81 | }
82 | },
83 | "mappings": [],
84 | "min": 0,
85 | "noValue": "No warnings",
86 | "thresholds": {
87 | "mode": "absolute",
88 | "steps": [
89 | {
90 | "color": "green",
91 | "value": null
92 | },
93 | {
94 | "color": "red",
95 | "value": 80
96 | }
97 | ]
98 | }
99 | },
100 | "overrides": [
101 | {
102 | "__systemRef": "hideSeriesFrom",
103 | "matcher": {
104 | "id": "byNames",
105 | "options": {
106 | "mode": "exclude",
107 | "names": [
108 | "E [sched] Emit scheduled slot event: eth2wrap: failed to send proposal preparations: POST failed with status 404: 404 page not found\n"
109 | ],
110 | "prefix": "All except:",
111 | "readOnly": true
112 | }
113 | },
114 | "properties": [
115 | {
116 | "id": "custom.hideFrom",
117 | "value": {
118 | "legend": false,
119 | "tooltip": false,
120 | "viz": true
121 | }
122 | }
123 | ]
124 | }
125 | ]
126 | },
127 | "gridPos": {
128 | "h": 6,
129 | "w": 12,
130 | "x": 0,
131 | "y": 0
132 | },
133 | "id": 2,
134 | "options": {
135 | "legend": {
136 | "calcs": [],
137 | "displayMode": "list",
138 | "placement": "right",
139 | "showLegend": true
140 | },
141 | "tooltip": {
142 | "mode": "single",
143 | "sort": "none"
144 | }
145 | },
146 | "targets": [
147 | {
148 | "datasource": {
149 | "type": "loki",
150 | "uid": "loki"
151 | },
152 | "editorMode": "code",
153 | "expr": "topk(10,sum(count_over_time({service=\"charon\"} | logfmt | level=~`(warn|error)` | label_format level=\"{{trunc 1 .level | upper}}\"[1m])) by (level,msg,topic))",
154 | "legendFormat": "{{level}} [{{topic}}] {{msg}}",
155 | "queryType": "range",
156 | "refId": "A"
157 | }
158 | ],
159 | "title": "Top Warnings and Errors",
160 | "type": "timeseries"
161 | },
162 | {
163 | "datasource": {
164 | "type": "loki",
165 | "uid": "loki"
166 | },
167 | "description": "Reasons why duties failed prefixed by slot and duty type",
168 | "gridPos": {
169 | "h": 6,
170 | "w": 12,
171 | "x": 12,
172 | "y": 0
173 | },
174 | "id": 4,
175 | "options": {
176 | "dedupStrategy": "none",
177 | "enableLogDetails": true,
178 | "prettifyLogMessage": false,
179 | "showCommonLabels": false,
180 | "showLabels": false,
181 | "showTime": false,
182 | "sortOrder": "Descending",
183 | "wrapLogMessage": false
184 | },
185 | "targets": [
186 | {
187 | "datasource": {
188 | "type": "loki",
189 | "uid": "loki"
190 | },
191 | "expr": "{service=\"charon\"} | logfmt | msg=`Duty failed` | line_format `{{.duty}}\t{{.reason}}`",
192 | "queryType": "range",
193 | "refId": "A"
194 | }
195 | ],
196 | "title": "Duty Failed Reasons",
197 | "type": "logs"
198 | },
199 | {
200 | "datasource": {
201 | "type": "loki",
202 | "uid": "loki"
203 | },
204 | "gridPos": {
205 | "h": 14,
206 | "w": 24,
207 | "x": 0,
208 | "y": 6
209 | },
210 | "id": 6,
211 | "options": {
212 | "dedupStrategy": "none",
213 | "enableLogDetails": true,
214 | "prettifyLogMessage": false,
215 | "showCommonLabels": false,
216 | "showLabels": false,
217 | "showTime": true,
218 | "sortOrder": "Descending",
219 | "wrapLogMessage": false
220 | },
221 | "targets": [
222 | {
223 | "datasource": {
224 | "type": "loki",
225 | "uid": "loki"
226 | },
227 | "expr": "{service=\"charon\"} | logfmt | line_format \"{{upper .level | trunc 4 }} {{.topic}}\t{{.msg}}\t\t{{if .slot}}slot={{.slot}}{{end}}{{if .duty}}duty={{.duty}}{{end}}\"",
228 | "queryType": "range",
229 | "refId": "A"
230 | }
231 | ],
232 | "title": "All Logs",
233 | "type": "logs"
234 | }
235 | ],
236 | "schemaVersion": 37,
237 | "style": "dark",
238 | "tags": [],
239 | "templating": {
240 | "list": []
241 | },
242 | "time": {
243 | "from": "now-6h",
244 | "to": "now"
245 | },
246 | "timepicker": {},
247 | "timezone": "",
248 | "title": "Charon Log Dashboard",
249 | "uid": "charon_log_dashboard",
250 | "version": 3,
251 | "weekStart": ""
252 | }
253 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # Override any defaults specified by `${FOO:-bar}` in `.env` with `FOO=qux`.
2 | # ${VARIABLE:-default} evaluates to default if VARIABLE is unset or empty in the environment.
3 | # ${VARIABLE-default} evaluates to default only if VARIABLE is unset in the environment.
4 |
5 | services:
6 | # _ _ _ _
7 | # _ __ ___| |_| |__ ___ _ __ _ __ ___ (_)_ __ __| |
8 | # | '_ \ / _ \ __| '_ \ / _ \ '__| '_ ` _ \| | '_ \ / _` |
9 | # | | | | __/ |_| | | | __/ | | | | | | | | | | | (_| |
10 | # |_| |_|\___|\__|_| |_|\___|_| |_| |_| |_|_|_| |_|\__,_|
11 |
12 | nethermind:
13 | image: nethermind/nethermind:${NETHERMIND_VERSION:-1.35.2}
14 | profiles: [""]
15 | restart: unless-stopped
16 | ports:
17 | - ${NETHERMIND_PORT_P2P:-30303}:30303/tcp # P2P TCP
18 | - ${NETHERMIND_PORT_P2P:-30303}:30303/udp # P2P UDP
19 | - ${NETHERMIND_IP_HTTP:-127.0.0.1}:${NETHERMIND_PORT_HTTP:-8545}:8545 # JSON-RPC
20 | - ${NETHERMIND_IP_ENGINE:-127.0.0.1}:${NETHERMIND_PORT_ENGINE:-8551}:8551 # ENGINE-API
21 | labels:
22 | - "promtail-monitored=${NETHERMIND_PROMTAIL_MONITORED:-true}"
23 | command: |
24 | --config=${NETWORK}
25 | --datadir=data
26 | --HealthChecks.Enabled=true
27 | --JsonRpc.Enabled=true
28 | --JsonRpc.JwtSecretFile="/root/jwt/jwt.hex"
29 | --JsonRpc.EngineHost=0.0.0.0
30 | --JsonRpc.EnginePort=8551
31 | --JsonRpc.Host=0.0.0.0
32 | --JsonRpc.Port=8545
33 | --Metrics.Enabled=true
34 | --Metrics.ExposePort=8008
35 | --Sync.SnapSync=true
36 | --History.Pruning=Rolling
37 | networks: [dvnode]
38 | volumes:
39 | - ./data/nethermind:/nethermind/data
40 | - ./jwt:/root/jwt
41 |
42 | # _ _ _ _ _
43 | # | (_) __ _| |__ | |_| |__ ___ _ _ ___ ___
44 | # | | |/ _` | '_ \| __| '_ \ / _ \| | | / __|/ _ \
45 | # | | | (_| | | | | |_| | | | (_) | |_| \__ \ __/
46 | # |_|_|\__, |_| |_|\__|_| |_|\___/ \__,_|___/\___|
47 | # |___/
48 |
49 | lighthouse:
50 | image: sigp/lighthouse:${LIGHTHOUSE_VERSION:-v8.0.1}
51 | profiles: [""]
52 | ports:
53 | - ${LIGHTHOUSE_PORT_P2P:-9000}:9000/tcp # P2P TCP
54 | - 5054:5054/tcp # P2P TCP
55 | - ${LIGHTHOUSE_PORT_P2P:-9000}:9000/udp # P2P UDP
56 | labels:
57 | - "promtail-monitored=${LIGHTHOUSE_PROMTAIL_MONITORED:-true}"
58 | command: |
59 | lighthouse bn
60 | --network=${NETWORK}
61 | --checkpoint-sync-url=${LIGHTHOUSE_CHECKPOINT_SYNC_URL}
62 | --checkpoint-sync-url-timeout=600
63 | --execution-endpoint=http://nethermind:8551
64 | --execution-jwt=/opt/jwt/jwt.hex
65 | --datadir=/opt/app/beacon/
66 | --builder=http://mev-boost:18550
67 | --http
68 | --http-address=0.0.0.0
69 | --http-port=5052
70 | --metrics
71 | --metrics-address=0.0.0.0
72 | --metrics-port=5054
73 | --metrics-allow-origin="*"
74 | networks: [dvnode]
75 | volumes:
76 | - ./data/lighthouse:/opt/app/beacon
77 | - ./jwt:/opt/jwt
78 | restart: unless-stopped
79 |
80 | # _
81 | # ___| |__ __ _ _ __ ___ _ __
82 | # / __| '_ \ / _` | '__/ _ \| '_ \
83 | # | (__| | | | (_| | | | (_) | | | |
84 | # \___|_| |_|\__,_|_| \___/|_| |_|
85 |
86 | charon:
87 | image: obolnetwork/charon:${CHARON_VERSION:-v1.8.0}
88 | environment:
89 | - CHARON_BEACON_NODE_ENDPOINTS=${CHARON_BEACON_NODE_ENDPOINTS:-http://${CL:-lighthouse}:5052}
90 | - CHARON_BEACON_NODE_TIMEOUT=${CHARON_BEACON_NODE_TIMEOUT:-3s}
91 | - CHARON_BEACON_NODE_SUBMIT_TIMEOUT=${CHARON_BEACON_NODE_SUBMIT_TIMEOUT:-4s}
92 | - CHARON_FALLBACK_BEACON_NODE_ENDPOINTS=${CHARON_FALLBACK_BEACON_NODE_ENDPOINTS:-}
93 | - CHARON_LOG_LEVEL=${CHARON_LOG_LEVEL:-info}
94 | - CHARON_LOG_FORMAT=${CHARON_LOG_FORMAT:-console}
95 | - CHARON_P2P_RELAYS=${CHARON_P2P_RELAYS:-https://0.relay.obol.tech,https://1.relay.obol.tech/}
96 | - CHARON_P2P_EXTERNAL_HOSTNAME=${CHARON_P2P_EXTERNAL_HOSTNAME:-} # Empty default required to avoid warnings.
97 | - CHARON_P2P_TCP_ADDRESS=0.0.0.0:${CHARON_PORT_P2P_TCP:-3610}
98 | - CHARON_VALIDATOR_API_ADDRESS=0.0.0.0:3600
99 | - CHARON_MONITORING_ADDRESS=0.0.0.0:3620
100 | - CHARON_BUILDER_API=${BUILDER_API_ENABLED:-true}
101 | - CHARON_FEATURE_SET_ENABLE=${CHARON_FEATURE_SET_ENABLE:-}
102 | - CHARON_LOKI_ADDRESSES=${CHARON_LOKI_ADDRESSES:-http://loki:3100/loki/api/v1/push}
103 | - CHARON_LOKI_SERVICE=charon
104 | - CHARON_NICKNAME=${CHARON_NICKNAME:-}
105 | - CHARON_EXECUTION_CLIENT_RPC_ENDPOINT=${CHARON_EXECUTION_CLIENT_RPC_ENDPOINT:-http://nethermind:8545}
106 | ports:
107 | - ${CHARON_PORT_P2P_TCP:-3610}:${CHARON_PORT_P2P_TCP:-3610}/tcp # P2P TCP libp2p
108 | labels:
109 | - "promtail-monitored=${CHARON_PROMTAIL_MONITORED:-true}"
110 | networks: [dvnode]
111 | volumes:
112 | - .charon:/opt/charon/.charon
113 | restart: unless-stopped
114 | healthcheck:
115 | test: wget -qO- http://localhost:3620/readyz
116 |
117 | # _ _ _
118 | # | | ___ __| | ___ ___| |_ __ _ _ __
119 | # | |/ _ \ / _` |/ _ \/ __| __/ _` | '__|
120 | # | | (_) | (_| | __/\__ \ || (_| | |
121 | # |_|\___/ \__,_|\___||___/\__\__,_|_|
122 |
123 | lodestar:
124 | image: chainsafe/lodestar:${LODESTAR_VERSION:-v1.37.0}
125 | profiles: [""]
126 | depends_on: [charon]
127 | entrypoint: /opt/lodestar/run.sh
128 | networks: [dvnode]
129 | environment:
130 | BEACON_NODE_ADDRESS: http://charon:3600
131 | NETWORK: ${NETWORK}
132 | BUILDER_API_ENABLED: ${BUILDER_API_ENABLED:-true}
133 | BUILDER_SELECTION: ${BUILDER_SELECTION:-builderalways}
134 | labels:
135 | - "promtail-monitored=${LODESTAR_PROMTAIL_MONITORED:-true}"
136 | volumes:
137 | - ./lodestar/run.sh:/opt/lodestar/run.sh
138 | - .charon/validator_keys:/home/charon/validator_keys
139 | - ./data/lodestar:/opt/data
140 | restart: unless-stopped
141 |
142 | # _ _
143 | # _ __ ___ _____ __ | |__ ___ ___ ___| |_
144 | # | '_ ` _ \ / _ \ \ / /____| '_ \ / _ \ / _ \/ __| __|
145 | # | | | | | | __/\ V /_____| |_) | (_) | (_) \__ \ |_
146 | # |_| |_| |_|\___| \_/ |_.__/ \___/ \___/|___/\__|
147 |
148 | mev-boost:
149 | image: ${MEVBOOST_IMAGE:-flashbots/mev-boost}:${MEVBOOST_VERSION:-1.10.1}
150 | profiles: [""]
151 | command: |
152 | -${NETWORK}
153 | -loglevel=debug
154 | -addr=0.0.0.0:18550
155 | -relay-check
156 | -relays=${MEVBOOST_RELAYS}
157 | labels:
158 | - "promtail-monitored=${MEV_BOOST_PROMTAIL_MONITORED:-true}"
159 | networks: [dvnode]
160 | restart: unless-stopped
161 |
162 | # _ _ _
163 | # _ __ ___ ___ _ __ (_) |_ ___ _ __(_)_ __ __ _
164 | # | '_ ` _ \ / _ \| '_ \| | __/ _ \| '__| | '_ \ / _` |
165 | # | | | | | | (_) | | | | | || (_) | | | | | | | (_| |
166 | # |_| |_| |_|\___/|_| |_|_|\__\___/|_| |_|_| |_|\__, |
167 | # |___/
168 |
169 | prometheus:
170 | image: prom/prometheus:${PROMETHEUS_VERSION:-v3.7.3}
171 | user: ":"
172 | networks: [dvnode]
173 | environment:
174 | PROM_REMOTE_WRITE_TOKEN: ${PROM_REMOTE_WRITE_TOKEN}
175 | SERVICE_OWNER: ${SERVICE_OWNER:-"obol-cdvn"}
176 | volumes:
177 | - ./prometheus:/etc/prometheus
178 | - ./data/prometheus:/prometheus
179 | entrypoint: /etc/prometheus/run.sh
180 | restart: unless-stopped
181 |
182 | grafana:
183 | image: grafana/grafana:${GRAFANA_VERSION:-12.2.1}
184 | user: ":"
185 | ports:
186 | - ${MONITORING_IP_GRAFANA:-0.0.0.0}:${MONITORING_PORT_GRAFANA:-3000}:3000
187 | networks: [dvnode]
188 | volumes:
189 | - ./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml
190 | - ./grafana/dashboards.yml:/etc/grafana/provisioning/dashboards/datasource.yml
191 | - ./grafana/grafana.ini:/etc/grafana/grafana.ini:ro
192 | - ./grafana/dashboards:/etc/dashboards
193 | - ./data/grafana:/var/lib/grafana
194 | restart: unless-stopped
195 |
196 | networks:
197 | dvnode:
198 |
--------------------------------------------------------------------------------
/.env.sample.hoodi:
--------------------------------------------------------------------------------
1 | # This is a sample environment file that allows overriding default configuration defined
2 | # in docker-compose.yml. Rename this file to `.env` and then uncomment and set any variable below.
3 |
4 | # Overrides network for all the relevant services.
5 | NETWORK=hoodi
6 |
7 | # Execution layer client to be used in a DV setup. Uncomment only the desired client.
8 | EL=el-nethermind
9 | #EL=el-reth
10 | #EL=el-none
11 |
12 | # Consensus layer client to be used in a DV setup. Uncomment only the desired client.
13 | CL=cl-lighthouse
14 | #CL=cl-grandine
15 | #CL=cl-teku
16 | #CL=cl-lodestar
17 | #CL=cl-none
18 |
19 | # Validator client to be used in a DV setup. Uncomment only the desired client.
20 | VC=vc-lodestar
21 | #VC=vc-nimbus
22 | #VC=vc-prysm
23 | #VC=vc-teku
24 |
25 | # MEV client to be used in a DV setup. Uncomment only the desired client.
26 | MEV=mev-mevboost
27 | #MEV=mev-commitboost
28 | #MEV=mev-none
29 |
30 | # Do not edit. These profiles and files dictate Docker which client types and from where to start for execution, consensus, validator and MEV.
31 | # The actual adjustable values are specified above
32 | COMPOSE_PROFILES=${EL},${CL},${VC},${MEV}
33 | COMPOSE_FILE=compose-el.yml:compose-cl.yml:compose-vc.yml:compose-mev.yml:docker-compose.yml
34 |
35 | # Enables Builder API.
36 | BUILDER_API_ENABLED=true
37 |
38 | ######### Execution Layer Config #########
39 |
40 | # EL host exposed IPs and ports.
41 | #EL_PORT_P2P=
42 | #EL_IP_HTTP=
43 | #EL_PORT_HTTP=
44 | #EL_IP_ENGINE=
45 | #EL_PORT_ENGINE=
46 |
47 | # Nethermind docker container image version.
48 | # See available tags https://hub.docker.com/r/nethermind/nethermind/tags
49 | #NETHERMIND_VERSION=
50 |
51 | # Reth docker container image version.
52 | # See available tags https://github.com/paradigmxyz/reth/pkgs/container/reth
53 | #RETH_VERSION=
54 |
55 | ######### Consensus Layer Config #########
56 |
57 | # CL beacon node host exposed ports.
58 | #CL_PORT_P2P=
59 |
60 | # Lighthouse becon node checkpoint sync URL used by consensus layer to fast sync.
61 | LIGHTHOUSE_CHECKPOINT_SYNC_URL=https://checkpoint-sync.hoodi.ethpandaops.io/
62 |
63 | # Lighthouse beacon node docker container image version.
64 | # See available tags https://hub.docker.com/r/sigp/lighthouse/tags.
65 | #CL_LIGHTHOUSE_VERSION=
66 |
67 | # Grandine beacon node docker container image version.
68 | # See available tags https://hub.docker.com/r/sifrai/grandine/tags.
69 | #CL_GRANDINE_VERSION=
70 |
71 | # Teku beacon node docker container image version.
72 | # See available tags https://hub.docker.com/r/consensys/teku/tags
73 | #CL_TEKU_VERSION=
74 |
75 | # Lodestar beacon node docker container image version.
76 | # See available tags https://hub.docker.com/r/chainsafe/lodestar/tags
77 | #CL_LODESTAR_VERSION=
78 |
79 | ######### Validator Client Config #########
80 |
81 | # Override prometheus metrics port for validator client.
82 | #VC_PORT_METRICS=
83 |
84 | # Lodestar validator client docker container image version.
85 | # See available tags https://hub.docker.com/r/chainsafe/lodestar/tags
86 | #VC_LODESTAR_VERSION=
87 |
88 | #VC_LODESTAR_BUILDER_SELECTION=
89 |
90 | # Nimbus validator client docker container image version.
91 | # See available tags https://hub.docker.com/r/statusim/nimbus-validator-client/tags
92 | #VC_NIMBUS_VERSION=
93 |
94 | # Prysm validator client docker container image version.
95 | # See available tags https://hub.docker.com/r/offchainlabs/prysm-validator/tags
96 | #VC_PRYSM_VERSION=
97 |
98 | # Teku validator client docker container image version.
99 | # See available tags https://hub.docker.com/r/consensys/teku/tags
100 | #VC_TEKU_VERSION=
101 |
102 | ######### MEV Config #########
103 |
104 | # MEV timeouts.
105 | # N.B.: Commit-boost uses TOML configuration instead of env variables. Configure those at commit-boost/.
106 | #MEV_TIMEOUT_GETHEADER=
107 | #MEV_TIMEOUT_GETPAYLOAD=
108 | #MEV_TIMEOUT_REGVAL=
109 |
110 | # Comma separated list of MEV relays. You can choose public relays from https://enchanted-direction-844.notion.site/6d369eb33f664487800b0dedfe32171e?v=d255247c822c409f99c498aeb6a4e51d.
111 | # N.B.: Commit-boost uses TOML configuration instead of env variables. Configure those at commit-boost/.
112 | MEV_RELAYS=https://0x98f0ef62f00780cf8eb06701a7d22725b9437d4768bb19b363e882ae87129945ec206ec2dc16933f31d983f8225772b6@hoodi.aestus.live
113 |
114 | # MEV-Boost docker container image version.
115 | #MEV_MEVBOOST_VERSION=
116 |
117 | # Commit-Boost docker container image version.
118 | #MEV_COMMITBOOST_VERSION=
119 |
120 | ######### Charon Config #########
121 |
122 | # Charon docker container image version.
123 | # See available tags https://hub.docker.com/r/obolnetwork/charon/tags.
124 | #CHARON_VERSION=
125 |
126 | # Define custom relays. One or more ENRs or an http URL that return an ENR. Use a comma separated list excluding spaces.
127 | #CHARON_P2P_RELAYS=
128 |
129 | # Connect to one or more external beacon nodes. Use a comma separated list excluding spaces.
130 | #CHARON_BEACON_NODE_ENDPOINTS=
131 |
132 | # Supply optional HTTP headers during beacon node requests. These headers are sent to all primary and fallback endpoints, be sure to rely on trusted BNs only.
133 | #CHARON_BEACON_NODE_HEADERS=
134 |
135 | # Specify one or more fallback beacon node endpoints, which are called in the case that the primary beacon nodes are offline or unhealthy.
136 | # Use fallback beacon nodes sparingly, particularly if latency is high, which can impact DV cluster performance.
137 | #CHARON_FALLBACK_BEACON_NODE_ENDPOINTS=
138 |
139 | # Increase the duration charon will wait for requests to the beacon node.
140 | #CHARON_BEACON_NODE_TIMEOUT=
141 |
142 | # Increase the duration charon will wait while publishing data to the beacon node.
143 | #CHARON_BEACON_NODE_SUBMIT_TIMEOUT=
144 |
145 | # The address of the execution engine JSON-RPC API.
146 | #CHARON_EXECUTION_CLIENT_RPC_ENDPOINT=
147 |
148 | # Override the charon logging level; debug, info, warning, error.
149 | #CHARON_LOG_LEVEL=
150 |
151 | # Override the charon logging format; console, logfmt, json. Grafana panels require logfmt.
152 | #CHARON_LOG_FORMAT=
153 |
154 | # Advertise a custom external DNS hostname or IP address for libp2p peer discovery.
155 | #CHARON_P2P_EXTERNAL_HOSTNAME=
156 |
157 | # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address.
158 | #CHARON_LOKI_ADDRESSES=
159 |
160 | # Charon Cluster Name. Mandatory to send logs with Promtail.
161 | #CLUSTER_NAME=
162 |
163 | # Charon Cluster Peer. Mandatory to send logs with Promtail.
164 | #CLUSTER_PEER=
165 |
166 | # Nickname to identify this charon node on monitoring (max 32 characters).
167 | #CHARON_NICKNAME=
168 |
169 | # Docker network of running charon node. See `docker network ls`.
170 | #CHARON_DOCKER_NETWORK=
171 |
172 | # Charon host exposed ports.
173 | #CHARON_PORT_P2P_TCP=
174 |
175 | ######### Monitoring Config #########
176 |
177 | # Grafana docker container image version.
178 | # See available tags https://github.com/grafana/grafana/releases.
179 | #GRAFANA_VERSION=
180 |
181 | # Grafana host exposed IP and port.
182 | #MONITORING_IP_GRAFANA=
183 | #MONITORING_PORT_GRAFANA=
184 |
185 | # Prometheus docker container image version.
186 | # See available tags https://github.com/prometheus/prometheus/releases.
187 | #PROMETHEUS_VERSION=
188 |
189 | # Prometheus remote write token used for accessing external prometheus.
190 | #PROM_REMOTE_WRITE_TOKEN=
191 |
192 | # Prometheus service owner used to uniquely identify user from which metrics are pushed.
193 | #SERVICE_OWNER=charon_user
194 |
195 | # Uncomment these if you have log exporting with Promtail
196 | # and want to disable log export on a particular container.
197 | #EL_NETHERMIND_PROMTAIL_MONITORED=false
198 | #EL_RETH_PROMTAIL_MONITORED=false
199 | #CL_LIGHTHOUSE_PROMTAIL_MONITORED=false
200 | #CL_GRANDINE_PROMTAIL_MONITORED=false
201 | #CL_TEKU_PROMTAIL_MONITORED=false
202 | #CL_LODESTAR_PROMTAIL_MONITORED=false
203 | #CHARON_PROMTAIL_MONITORED=false
204 | #VC_LODESTAR_PROMTAIL_MONITORED=false
205 | #VC_NIMBUS_PROMTAIL_MONITORED=false
206 | #VC_PRYSM_PROMTAIL_MONITORED=false
207 | #VC_TEKU_PROMTAIL_MONITORED=false
208 | #MEV_MEV_BOOST_PROMTAIL_MONITORED=false
209 | #MEV_COMMIT_BOOST_PROMTAIL_MONITORED=false
210 | #EJECTOR_PROMTAIL_MONITORED=false
211 | #DV_EXIT_PROMTAIL_MONITORED=false
212 |
213 | ######### Debug Config #########
214 |
215 | # This applies to compose-debug.yml only.
216 |
217 | # Prometheus Node exporter docker container image version.
218 | # See available tags https://hub.docker.com/r/bitnamilegacy/node-exporter/tags.
219 | #NODE_EXPORTER_VERSION=
220 |
221 | # Grafana Tempo docker container image version.
222 | # Use Grafana Explore to access Tempo data.
223 | # See available tags https://hub.docker.com/r/grafana/tempo/tags.
224 | #TEMPO_VERSION=
225 |
226 | # Grafana Loki docker container image version.
227 | # See available tags https://hub.docker.com/r/grafana/loki/tags.
228 | #LOKI_VERSION=
229 |
230 | # Loki host exposed port.
231 | #MONITORING_PORT_LOKI=
232 |
--------------------------------------------------------------------------------
/.env.sample.holesky:
--------------------------------------------------------------------------------
1 | # This is a sample environment file that allows overriding default configuration defined
2 | # in docker-compose.yml. Rename this file to `.env` and then uncomment and set any variable below.
3 |
4 | # Overrides network for all the relevant services.
5 | NETWORK=holesky
6 |
7 | # Execution layer client to be used in a DV setup. Uncomment only the desired client.
8 | EL=el-nethermind
9 | #EL=el-reth
10 | #EL=el-none
11 |
12 | # Consensus layer client to be used in a DV setup. Uncomment only the desired client.
13 | CL=cl-lighthouse
14 | #CL=cl-grandine
15 | #CL=cl-teku
16 | #CL=cl-lodestar
17 | #CL=cl-none
18 |
19 | # Validator client to be used in a DV setup. Uncomment only the desired client.
20 | VC=vc-lodestar
21 | #VC=vc-nimbus
22 | #VC=vc-prysm
23 | #VC=vc-teku
24 |
25 | # MEV client to be used in a DV setup. Uncomment only the desired client.
26 | MEV=mev-mevboost
27 | #MEV=mev-commitboost
28 | #MEV=mev-none
29 |
30 | # Do not edit. These profiles and files dictate Docker which client types and from where to start for execution, consensus, validator and MEV.
31 | # The actual adjustable values are specified above
32 | COMPOSE_PROFILES=${EL},${CL},${VC},${MEV}
33 | COMPOSE_FILE=compose-el.yml:compose-cl.yml:compose-vc.yml:compose-mev.yml:docker-compose.yml
34 |
35 | # Enables Builder API.
36 | BUILDER_API_ENABLED=true
37 |
38 | ######### Execution Layer Config #########
39 |
40 | # EL host exposed IPs and ports.
41 | #EL_PORT_P2P=
42 | #EL_IP_HTTP=
43 | #EL_PORT_HTTP=
44 | #EL_IP_ENGINE=
45 | #EL_PORT_ENGINE=
46 |
47 | # Nethermind docker container image version.
48 | # See available tags https://hub.docker.com/r/nethermind/nethermind/tags
49 | #NETHERMIND_VERSION=
50 |
51 | # Reth docker container image version.
52 | # See available tags https://github.com/paradigmxyz/reth/pkgs/container/reth
53 | #RETH_VERSION=
54 |
55 | ######### Consensus Layer Config #########
56 |
57 | # CL beacon node host exposed ports.
58 | #CL_PORT_P2P=
59 |
60 | # Lighthouse becon node checkpoint sync URL used by consensus layer to fast sync.
61 | LIGHTHOUSE_CHECKPOINT_SYNC_URL=https://checkpoint-sync.holesky.ethpandaops.io/
62 |
63 | # Lighthouse beacon node docker container image version.
64 | # See available tags https://hub.docker.com/r/sigp/lighthouse/tags.
65 | #CL_LIGHTHOUSE_VERSION=
66 |
67 | # Grandine beacon node docker container image version.
68 | # See available tags https://hub.docker.com/r/sifrai/grandine/tags.
69 | #CL_GRANDINE_VERSION=
70 |
71 | # Teku beacon node docker container image version.
72 | # See available tags https://hub.docker.com/r/consensys/teku/tags
73 | #CL_TEKU_VERSION=
74 |
75 | # Lodestar beacon node docker container image version.
76 | # See available tags https://hub.docker.com/r/chainsafe/lodestar/tags
77 | #CL_LODESTAR_VERSION=
78 |
79 | ######### Validator Client Config #########
80 |
81 | # Override prometheus metrics port for validator client.
82 | #VC_PORT_METRICS=
83 |
84 | # Lodestar validator client docker container image version.
85 | # See available tags https://hub.docker.com/r/chainsafe/lodestar/tags
86 | #VC_LODESTAR_VERSION=
87 |
88 | #VC_LODESTAR_BUILDER_SELECTION=
89 |
90 | # Nimbus validator client docker container image version.
91 | # See available tags https://hub.docker.com/r/statusim/nimbus-validator-client/tags
92 | #VC_NIMBUS_VERSION=
93 |
94 | # Prysm validator client docker container image version.
95 | # See available tags https://hub.docker.com/r/offchainlabs/prysm-validator/tags
96 | #VC_PRYSM_VERSION=
97 |
98 | # Teku validator client docker container image version.
99 | # See available tags https://hub.docker.com/r/consensys/teku/tags
100 | #VC_TEKU_VERSION=
101 |
102 | ######### MEV Config #########
103 |
104 | # MEV timeouts.
105 | # N.B.: Commit-boost uses TOML configuration instead of env variables. Configure those at commit-boost/.
106 | #MEV_TIMEOUT_GETHEADER=
107 | #MEV_TIMEOUT_GETPAYLOAD=
108 | #MEV_TIMEOUT_REGVAL=
109 |
110 | # Comma separated list of MEV relays. You can choose public relays from https://enchanted-direction-844.notion.site/6d369eb33f664487800b0dedfe32171e?v=d255247c822c409f99c498aeb6a4e51d.
111 | # N.B.: Commit-boost uses TOML configuration instead of env variables. Configure those at commit-boost/.
112 | MEV_RELAYS=https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live,https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money,https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz
113 |
114 | # MEV-Boost docker container image version.
115 | #MEV_MEVBOOST_VERSION=
116 |
117 | # Commit-Boost docker container image version.
118 | #MEV_COMMITBOOST_VERSION=
119 |
120 | ######### Charon Config #########
121 |
122 | # Charon docker container image version.
123 | # See available tags https://hub.docker.com/r/obolnetwork/charon/tags.
124 | #CHARON_VERSION=
125 |
126 | # Define custom relays. One or more ENRs or an http URL that return an ENR. Use a comma separated list excluding spaces.
127 | #CHARON_P2P_RELAYS=
128 |
129 | # Connect to one or more external beacon nodes. Use a comma separated list excluding spaces.
130 | #CHARON_BEACON_NODE_ENDPOINTS=
131 |
132 | # Supply optional HTTP headers during beacon node requests. These headers are sent to all primary and fallback endpoints, be sure to rely on trusted BNs only.
133 | #CHARON_BEACON_NODE_HEADERS=
134 |
135 | # Specify one or more fallback beacon node endpoints, which are called in the case that the primary beacon nodes are offline or unhealthy.
136 | # Use fallback beacon nodes sparingly, particularly if latency is high, which can impact DV cluster performance.
137 | #CHARON_FALLBACK_BEACON_NODE_ENDPOINTS=
138 |
139 | # Increase the duration charon will wait for requests to the beacon node.
140 | #CHARON_BEACON_NODE_TIMEOUT=
141 |
142 | # Increase the duration charon will wait while publishing data to the beacon node.
143 | #CHARON_BEACON_NODE_SUBMIT_TIMEOUT=
144 |
145 | # The address of the execution engine JSON-RPC API.
146 | #CHARON_EXECUTION_CLIENT_RPC_ENDPOINT=
147 |
148 | # Override the charon logging level; debug, info, warning, error.
149 | #CHARON_LOG_LEVEL=
150 |
151 | # Override the charon logging format; console, logfmt, json. Grafana panels require logfmt.
152 | #CHARON_LOG_FORMAT=
153 |
154 | # Advertise a custom external DNS hostname or IP address for libp2p peer discovery.
155 | #CHARON_P2P_EXTERNAL_HOSTNAME=
156 |
157 | # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address.
158 | #CHARON_LOKI_ADDRESSES=
159 |
160 | # Charon Cluster Name. Mandatory to send logs with Promtail.
161 | #CLUSTER_NAME=
162 |
163 | # Charon Cluster Peer. Mandatory to send logs with Promtail.
164 | #CLUSTER_PEER=
165 |
166 | # Nickname to identify this charon node on monitoring (max 32 characters).
167 | #CHARON_NICKNAME=
168 |
169 | # Docker network of running charon node. See `docker network ls`.
170 | #CHARON_DOCKER_NETWORK=
171 |
172 | # Charon host exposed ports.
173 | #CHARON_PORT_P2P_TCP=
174 |
175 | ######### Monitoring Config #########
176 |
177 | # Grafana docker container image version.
178 | # See available tags https://github.com/grafana/grafana/releases.
179 | #GRAFANA_VERSION=
180 |
181 | # Grafana host exposed IP and port.
182 | #MONITORING_IP_GRAFANA=
183 | #MONITORING_PORT_GRAFANA=
184 |
185 | # Prometheus docker container image version.
186 | # See available tags https://github.com/prometheus/prometheus/releases.
187 | #PROMETHEUS_VERSION=
188 |
189 | # Prometheus remote write token used for accessing external prometheus.
190 | #PROM_REMOTE_WRITE_TOKEN=
191 |
192 | # Prometheus service owner used to uniquely identify user from which metrics are pushed.
193 | #SERVICE_OWNER=charon_user
194 |
195 | # Uncomment these if you have log exporting with Promtail
196 | # and want to disable log export on a particular container.
197 | #EL_NETHERMIND_PROMTAIL_MONITORED=false
198 | #EL_RETH_PROMTAIL_MONITORED=false
199 | #CL_LIGHTHOUSE_PROMTAIL_MONITORED=false
200 | #CL_GRANDINE_PROMTAIL_MONITORED=false
201 | #CL_TEKU_PROMTAIL_MONITORED=false
202 | #CL_LODESTAR_PROMTAIL_MONITORED=false
203 | #CHARON_PROMTAIL_MONITORED=false
204 | #VC_LODESTAR_PROMTAIL_MONITORED=false
205 | #VC_NIMBUS_PROMTAIL_MONITORED=false
206 | #VC_PRYSM_PROMTAIL_MONITORED=false
207 | #VC_TEKU_PROMTAIL_MONITORED=false
208 | #MEV_MEV_BOOST_PROMTAIL_MONITORED=false
209 | #MEV_COMMIT_BOOST_PROMTAIL_MONITORED=false
210 | #EJECTOR_PROMTAIL_MONITORED=false
211 | #DV_EXIT_PROMTAIL_MONITORED=false
212 |
213 | ######### Debug Config #########
214 |
215 | # This applies to compose-debug.yml only.
216 |
217 | # Prometheus Node exporter docker container image version.
218 | # See available tags https://hub.docker.com/r/bitnamilegacy/node-exporter/tags.
219 | #NODE_EXPORTER_VERSION=
220 |
221 | # Grafana Tempo docker container image version.
222 | # Use Grafana Explore to access Tempo data.
223 | # See available tags https://hub.docker.com/r/grafana/tempo/tags.
224 | #TEMPO_VERSION=
225 |
226 | # Grafana Loki docker container image version.
227 | # See available tags https://hub.docker.com/r/grafana/loki/tags.
228 | #LOKI_VERSION=
229 |
230 | # Loki host exposed port.
231 | #MONITORING_PORT_LOKI=
232 |
--------------------------------------------------------------------------------
/.env.sample.mainnet:
--------------------------------------------------------------------------------
1 | # This is a sample environment file that allows overriding default configuration defined
2 | # in docker-compose.yml. Rename this file to `.env` and then uncomment and set any variable below.
3 |
4 | # Overrides network for all the relevant services.
5 | NETWORK=mainnet
6 |
7 | # Execution layer client to be used in a DV setup. Uncomment only the desired client.
8 | EL=el-nethermind
9 | #EL=el-reth
10 | #EL=el-none
11 |
12 | # Consensus layer client to be used in a DV setup. Uncomment only the desired client.
13 | CL=cl-lighthouse
14 | #CL=cl-grandine
15 | #CL=cl-teku
16 | #CL=cl-lodestar
17 | #CL=cl-none
18 |
19 | # Validator client to be used in a DV setup. Uncomment only the desired client.
20 | VC=vc-lodestar
21 | #VC=vc-nimbus
22 | #VC=vc-prysm
23 | #VC=vc-teku
24 |
25 | # MEV client to be used in a DV setup. Uncomment only the desired client.
26 | MEV=mev-mevboost
27 | #MEV=mev-commitboost
28 | #MEV=mev-none
29 |
30 | # Do not edit. These profiles and files dictate Docker which client types and from where to start for execution, consensus, validator and MEV.
31 | # The actual adjustable values are specified above
32 | COMPOSE_PROFILES=${EL},${CL},${VC},${MEV}
33 | COMPOSE_FILE=compose-el.yml:compose-cl.yml:compose-vc.yml:compose-mev.yml:docker-compose.yml
34 |
35 | # Enables Builder API.
36 | BUILDER_API_ENABLED=true
37 |
38 | ######### Execution Layer Config #########
39 |
40 | # EL host exposed IPs and ports.
41 | #EL_PORT_P2P=
42 | #EL_IP_HTTP=
43 | #EL_PORT_HTTP=
44 | #EL_IP_ENGINE=
45 | #EL_PORT_ENGINE=
46 |
47 | # Nethermind docker container image version.
48 | # See available tags https://hub.docker.com/r/nethermind/nethermind/tags
49 | #NETHERMIND_VERSION=
50 |
51 | # Reth docker container image version.
52 | # See available tags https://github.com/paradigmxyz/reth/pkgs/container/reth
53 | #RETH_VERSION=
54 |
55 | ######### Consensus Layer Config #########
56 |
57 | # CL beacon node host exposed ports.
58 | #CL_PORT_P2P=
59 |
60 | # Lighthouse becon node checkpoint sync URL used by consensus layer to fast sync.
61 | LIGHTHOUSE_CHECKPOINT_SYNC_URL=https://mainnet.checkpoint.sigp.io/
62 |
63 | # Lighthouse beacon node docker container image version.
64 | # See available tags https://hub.docker.com/r/sigp/lighthouse/tags.
65 | #CL_LIGHTHOUSE_VERSION=
66 |
67 | # Grandine beacon node docker container image version.
68 | # See available tags https://hub.docker.com/r/sifrai/grandine/tags.
69 | #CL_GRANDINE_VERSION=
70 |
71 | # Teku beacon node docker container image version.
72 | # See available tags https://hub.docker.com/r/consensys/teku/tags
73 | #CL_TEKU_VERSION=
74 |
75 | # Lodestar beacon node docker container image version.
76 | # See available tags https://hub.docker.com/r/chainsafe/lodestar/tags
77 | #CL_LODESTAR_VERSION=
78 |
79 | ######### Validator Client Config #########
80 |
81 | # Override prometheus metrics port for validator client.
82 | #VC_PORT_METRICS=
83 |
84 | # Lodestar validator client docker container image version.
85 | # See available tags https://hub.docker.com/r/chainsafe/lodestar/tags
86 | #VC_LODESTAR_VERSION=
87 |
88 | #VC_LODESTAR_BUILDER_SELECTION=
89 |
90 | # Nimbus validator client docker container image version.
91 | # See available tags https://hub.docker.com/r/statusim/nimbus-validator-client/tags
92 | #VC_NIMBUS_VERSION=
93 |
94 | # Prysm validator client docker container image version.
95 | # See available tags https://hub.docker.com/r/offchainlabs/prysm-validator/tags
96 | #VC_PRYSM_VERSION=
97 |
98 | # Teku validator client docker container image version.
99 | # See available tags https://hub.docker.com/r/consensys/teku/tags
100 | #VC_TEKU_VERSION=
101 |
102 | ######### MEV Config #########
103 |
104 | # MEV timeouts.
105 | # N.B.: Commit-boost uses TOML configuration instead of env variables. Configure those at commit-boost/.
106 | #MEV_TIMEOUT_GETHEADER=
107 | #MEV_TIMEOUT_GETPAYLOAD=
108 | #MEV_TIMEOUT_REGVAL=
109 |
110 | # Comma separated list of MEV relays. You can choose public relays from https://enchanted-direction-844.notion.site/6d369eb33f664487800b0dedfe32171e?v=d255247c822c409f99c498aeb6a4e51d.
111 | # N.B.: Commit-boost uses TOML configuration instead of env variables. Configure those at commit-boost/.
112 | MEV_RELAYS=https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xb0b07cd0abef743db4260b0ed50619cf6ad4d82064cb4fbec9d3ec530f7c5e6793d9f286c4e082c0244ffb9f2658fe88@bloxroute.regulated.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz
113 |
114 | # MEV-Boost docker container image version.
115 | #MEV_MEVBOOST_VERSION=
116 |
117 | # Commit-Boost docker container image version.
118 | #MEV_COMMITBOOST_VERSION=
119 |
120 | ######### Charon Config #########
121 |
122 | # Charon docker container image version.
123 | # See available tags https://hub.docker.com/r/obolnetwork/charon/tags.
124 | #CHARON_VERSION=
125 |
126 | # Define custom relays. One or more ENRs or an http URL that return an ENR. Use a comma separated list excluding spaces.
127 | #CHARON_P2P_RELAYS=
128 |
129 | # Connect to one or more external beacon nodes. Use a comma separated list excluding spaces.
130 | #CHARON_BEACON_NODE_ENDPOINTS=
131 |
132 | # Supply optional HTTP headers during beacon node requests. These headers are sent to all primary and fallback endpoints, be sure to rely on trusted BNs only.
133 | #CHARON_BEACON_NODE_HEADERS=
134 |
135 | # Specify one or more fallback beacon node endpoints, which are called in the case that the primary beacon nodes are offline or unhealthy.
136 | # Use fallback beacon nodes sparingly, particularly if latency is high, which can impact DV cluster performance.
137 | #CHARON_FALLBACK_BEACON_NODE_ENDPOINTS=
138 |
139 | # Increase the duration charon will wait for requests to the beacon node.
140 | #CHARON_BEACON_NODE_TIMEOUT=
141 |
142 | # Increase the duration charon will wait while publishing data to the beacon node.
143 | #CHARON_BEACON_NODE_SUBMIT_TIMEOUT=
144 |
145 | # The address of the execution engine JSON-RPC API.
146 | #CHARON_EXECUTION_CLIENT_RPC_ENDPOINT=
147 |
148 | # Override the charon logging level; debug, info, warning, error.
149 | #CHARON_LOG_LEVEL=
150 |
151 | # Override the charon logging format; console, logfmt, json. Grafana panels require logfmt.
152 | #CHARON_LOG_FORMAT=
153 |
154 | # Advertise a custom external DNS hostname or IP address for libp2p peer discovery.
155 | #CHARON_P2P_EXTERNAL_HOSTNAME=
156 |
157 | # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address.
158 | #CHARON_LOKI_ADDRESSES=
159 |
160 | # Charon Cluster Name. Mandatory to send logs with Promtail.
161 | #CLUSTER_NAME=
162 |
163 | # Charon Cluster Peer. Mandatory to send logs with Promtail.
164 | #CLUSTER_PEER=
165 |
166 | # Nickname to identify this charon node on monitoring (max 32 characters).
167 | #CHARON_NICKNAME=
168 |
169 | # Docker network of running charon node. See `docker network ls`.
170 | #CHARON_DOCKER_NETWORK=
171 |
172 | # Charon host exposed ports.
173 | #CHARON_PORT_P2P_TCP=
174 |
175 | ######### Monitoring Config #########
176 |
177 | # Grafana docker container image version.
178 | # See available tags https://github.com/grafana/grafana/releases.
179 | #GRAFANA_VERSION=
180 |
181 | # Grafana host exposed IP and port.
182 | #MONITORING_IP_GRAFANA=
183 | #MONITORING_PORT_GRAFANA=
184 |
185 | # Prometheus docker container image version.
186 | # See available tags https://github.com/prometheus/prometheus/releases.
187 | #PROMETHEUS_VERSION=
188 |
189 | # Prometheus remote write token used for accessing external prometheus.
190 | #PROM_REMOTE_WRITE_TOKEN=
191 |
192 | # Prometheus service owner used to uniquely identify user from which metrics are pushed.
193 | #SERVICE_OWNER=charon_user
194 |
195 | # Uncomment these if you have log exporting with Promtail
196 | # and want to disable log export on a particular container.
197 | #EL_NETHERMIND_PROMTAIL_MONITORED=false
198 | #EL_RETH_PROMTAIL_MONITORED=false
199 | #CL_LIGHTHOUSE_PROMTAIL_MONITORED=false
200 | #CL_GRANDINE_PROMTAIL_MONITORED=false
201 | #CL_TEKU_PROMTAIL_MONITORED=false
202 | #CL_LODESTAR_PROMTAIL_MONITORED=false
203 | #CHARON_PROMTAIL_MONITORED=false
204 | #VC_LODESTAR_PROMTAIL_MONITORED=false
205 | #VC_NIMBUS_PROMTAIL_MONITORED=false
206 | #VC_PRYSM_PROMTAIL_MONITORED=false
207 | #VC_TEKU_PROMTAIL_MONITORED=false
208 | #MEV_MEV_BOOST_PROMTAIL_MONITORED=false
209 | #MEV_COMMIT_BOOST_PROMTAIL_MONITORED=false
210 | #EJECTOR_PROMTAIL_MONITORED=false
211 | #DV_EXIT_PROMTAIL_MONITORED=false
212 |
213 | ######### Debug Config #########
214 |
215 | # This applies to compose-debug.yml only.
216 |
217 | # Prometheus Node exporter docker container image version.
218 | # See available tags https://hub.docker.com/r/bitnamilegacy/node-exporter/tags.
219 | #NODE_EXPORTER_VERSION=
220 |
221 | # Grafana Tempo docker container image version.
222 | # Use Grafana Explore to access Tempo data.
223 | # See available tags https://hub.docker.com/r/grafana/tempo/tags.
224 | #TEMPO_VERSION=
225 |
226 | # Grafana Loki docker container image version.
227 | # See available tags https://hub.docker.com/r/grafana/loki/tags.
228 | #LOKI_VERSION=
229 |
230 | # Loki host exposed port.
231 | #MONITORING_PORT_LOKI=
232 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 |