├── .gitignore ├── LICENSE ├── docker-compose.testnet.yaml ├── docker-compose.pubnet.yaml ├── README.md └── catchup.sh /.gitignore: -------------------------------------------------------------------------------- 1 | data-result 2 | history-result 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 SatoshiPay Ltd. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docker-compose.testnet.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | stellar-core-postgres: 4 | image: postgres:12 5 | restart: on-failure 6 | volumes: 7 | - db-data:/var/lib/postgresql/data 8 | environment: 9 | - POSTGRES_DB=stellar-core 10 | - POSTGRES_PASSWORD=RonUQgj23En5Mmo4 11 | 12 | stellar-core: 13 | image: satoshipay/stellar-core:12.5.0 14 | restart: on-failure 15 | volumes: 16 | - core-data:/data 17 | environment: 18 | - DATABASE=postgresql://dbname=stellar-core user=postgres password=DATABASE_PASSWORD host=stellar-core-postgres 19 | - DATABASE_PASSWORD=RonUQgj23En5Mmo4 20 | - INITIALIZE_DB=false 21 | - INITIALIZE_HISTORY_ARCHIVES=false 22 | # public key GACVY6FX6ZSGBEWQF6DG25VOAB25F72EI4WS676QIO3HO2YBKAKDI44H 23 | - NODE_SEED=SDEZOI3BOCW6DUY23ZCJQALULOIEIQMZW4UTFXPRQEMMJMOG7CVD7WMO satoshipay-catchup 24 | - NODE_IS_VALIDATOR=true 25 | - NETWORK_PASSPHRASE=Test SDF Network ; September 2015 26 | - MAX_PEER_CONNECTIONS=30 27 | - "KNOWN_PEERS=\ 28 | stellar-testnet-de-fra.satoshipay.io,\ 29 | core-testnet1.stellar.org,\ 30 | core-testnet2.stellar.org,\ 31 | core-testnet3.stellar.org" 32 | - "PREFERRED_PEERS=\ 33 | stellar-testnet-de-fra.satoshipay.io,\ 34 | core-testnet1.stellar.org,\ 35 | core-testnet2.stellar.org,\ 36 | core-testnet3.stellar.org" 37 | - "NODE_NAMES=\ 38 | GC5TQJOHBX6NRGJHKVN3YMQGCVHJHKND4J2CV6RYVPIYZRX7STPSLEJL satoshipay-testnet-de-fra,\ 39 | GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y sdf1,\ 40 | GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP sdf2,\ 41 | GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z sdf3" 42 | - >- 43 | QUORUM_SET=[ 44 | { 45 | "threshold_percent": 66, 46 | "validators": [ 47 | "$$satoshipay-testnet-de-fra", 48 | "$$sdf1", 49 | "$$sdf2", 50 | "$$sdf3" 51 | ] 52 | } 53 | ] 54 | - KNOWN_CURSORS=HORIZON 55 | - UNSAFE_QUORUM=true 56 | - FAILURE_SAFETY=1 57 | - >- 58 | HISTORY={ 59 | "satoshipay-testnet-de-fra": {"get": "curl -sf https://stellar-history-testnet-de-fra.satoshipay.io/{0} -o {1}"}, 60 | "sdf1": {"get": "curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}"}, 61 | "sdf2": {"get": "curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}"}, 62 | "sdf3": {"get": "curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}"}, 63 | "local": { 64 | "get": "cp /data/history/{0} {1}", 65 | "put": "cp {0} /data/history/{1}", 66 | "mkdir": "mkdir -p /data/history/{0}" 67 | } 68 | } 69 | 70 | volumes: 71 | db-data: 72 | core-data: 73 | -------------------------------------------------------------------------------- /docker-compose.pubnet.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | stellar-core-postgres: 4 | image: postgres:12 5 | restart: on-failure 6 | volumes: 7 | - db-data:/var/lib/postgresql/data 8 | environment: 9 | - POSTGRES_DB=stellar-core 10 | - POSTGRES_PASSWORD=RonUQgj23En5Mmo4 11 | 12 | stellar-core: 13 | image: satoshipay/stellar-core:12.5.0 14 | restart: on-failure 15 | volumes: 16 | - core-data:/data 17 | environment: 18 | - DATABASE=postgresql://dbname=stellar-core user=postgres password=DATABASE_PASSWORD host=stellar-core-postgres 19 | - DATABASE_PASSWORD=RonUQgj23En5Mmo4 20 | - INITIALIZE_DB=false 21 | - INITIALIZE_HISTORY_ARCHIVES=false 22 | # public key GACVY6FX6ZSGBEWQF6DG25VOAB25F72EI4WS676QIO3HO2YBKAKDI44H 23 | - NODE_SEED=SDEZOI3BOCW6DUY23ZCJQALULOIEIQMZW4UTFXPRQEMMJMOG7CVD7WMO catchup 24 | - NODE_IS_VALIDATOR=true 25 | - NETWORK_PASSPHRASE=Public Global Stellar Network ; September 2015 26 | - MAX_PEER_CONNECTIONS=30 27 | - "KNOWN_PEERS=\ 28 | stellar-de-fra.satoshipay.io,\ 29 | stellar-sg-sin.satoshipay.io,\ 30 | stellar-us-iowa.satoshipay.io,\ 31 | core-live-a.stellar.org,\ 32 | core-live-b.stellar.org,\ 33 | core-live-c.stellar.org" 34 | - "PREFERRED_PEERS=\ 35 | stellar-de-fra.satoshipay.io,\ 36 | stellar-sg-sin.satoshipay.io,\ 37 | stellar-us-iowa.satoshipay.io,\ 38 | core-live-a.stellar.org,\ 39 | core-live-b.stellar.org,\ 40 | core-live-c.stellar.org" 41 | - "NODE_NAMES=\ 42 | GC5SXLNAM3C4NMGK2PXK4R34B5GNZ47FYQ24ZIBFDFOCU6D4KBN4POAE satoshipay-de-fra,\ 43 | GBJQUIXUO4XSNPAUT6ODLZUJRV2NPXYASKUBY4G5MYP3M47PCVI55MNT satoshipay-sg-sin,\ 44 | GAK6Z5UVGUVSEK6PEOCAYJISTT5EJBB34PN3NOLEQG2SUKXRVV2F6HZY satoshipay-us-iowa,\ 45 | GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH sdf_watcher1,\ 46 | GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK sdf_watcher2,\ 47 | GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ sdf_watcher3,\ 48 | GAOO3LWBC4XF6VWRP5ESJ6IBHAISVJMSBTALHOQM2EZG7Q477UWA6L7U eno" 49 | - >- 50 | QUORUM_SET=[ 51 | { 52 | "threshold_percent": 66, 53 | "validators": [ 54 | "$$satoshipay-de-fra", 55 | "$$satoshipay-sg-sin", 56 | "$$satoshipay-us-iowa", 57 | "$$sdf_watcher1", 58 | "$$sdf_watcher2", 59 | "$$sdf_watcher3", 60 | "$$eno" 61 | ] 62 | } 63 | ] 64 | - KNOWN_CURSORS=HORIZON 65 | - >- 66 | HISTORY={ 67 | "coinqvest-de": {"get": "curl -sf https://germany.stellar.coinqvest.com/history/{0} -o {1}"}, 68 | "coinqvest-fi": {"get": "curl -sf https://finland.stellar.coinqvest.com/history/{0} -o {1}"}, 69 | "lobstr1": {"get": "curl -sf https://stellar-archive-1-lobstr.s3.amazonaws.com/{0} -o {1}"}, 70 | "lobstr2": {"get": "curl -sf https://stellar-archive-2-lobstr.s3.amazonaws.com/{0} -o {1}"}, 71 | "satoshipay-de-fra": {"get": "curl -sf https://stellar-history-de-fra.satoshipay.io/{0} -o {1}"}, 72 | "satoshipay-sg-sin": {"get": "curl -sf https://stellar-history-sg-sin.satoshipay.io/{0} -o {1}"}, 73 | "satoshipay-us-iowa": {"get": "curl -sf https://stellar-history-us-iowa.satoshipay.io/{0} -o {1}"}, 74 | "sdf1": {"get": "curl -sf http://history.stellar.org/prd/core-live/core_live_001/{0} -o {1}"}, 75 | "sdf2": {"get": "curl -sf http://history.stellar.org/prd/core-live/core_live_002/{0} -o {1}"}, 76 | "sdf3": {"get": "curl -sf http://history.stellar.org/prd/core-live/core_live_003/{0} -o {1}"}, 77 | "local": { 78 | "get": "cp /data/history/{0} {1}", 79 | "put": "cp {0} /data/history/{1}", 80 | "mkdir": "mkdir -p /data/history/{0}" 81 | } 82 | } 83 | 84 | volumes: 85 | db-data: 86 | core-data: 87 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Parallel Stellar Core Catchup ⚡ 2 | 3 | ## Background 4 | 5 | ### Goal 6 | 7 | Sync a full Stellar validator node (including full history) as fast as possible. 8 | 9 | ### Problem 10 | 11 | A full catchup takes weeks/months – even without publishing to an archive. 12 | 13 | ### Idea 14 | 15 | * Split the big ledger into small chunks of size `CHUNK_SIZE`. 16 | * Run a catchup for the chunks in parallel with `WORKERS` worker processes. 17 | * Stitch together the resulting database and history archive. 18 | 19 | ## Usage 20 | 21 | ``` 22 | ./catchup.sh DOCKER_COMPOSE_FILE LEDGER_MIN LEDGER_MAX CHUNK_SIZE WORKERS 23 | ``` 24 | 25 | Arguments: 26 | 27 | * `DOCKER_COMPOSE_FILE`: use `docker-compose.pubnet.yaml` for the public network (`docker-compose.testnet.yaml` for testnet). 28 | * `LEDGER_MIN`: smallest ledger number you want. Use `1` for doing a full sync. 29 | * `LEDGER_MAX`: largest ledger number you want, usually you'll want the latest one which is exposed as `core_latest_ledger` in any synced Horizon server, e.g. https://stellar-horizon.satoshipay.io/. 30 | * `CHUNK_SIZE`: number of ledgers to work on in one worker. 31 | * `WORKERS`: number of workers that should be spawned. For best performance this should not exceed the number of CPUs. 32 | 33 | ## Hardware sizing and timing examples 34 | 35 | * 2019-05-19: 23 hours with a `CHUNK_SIZE` of `32768` and 50 workers on a `n1-standard-64` machine on Google Cloud (64 CPUs, 240GB RAM, 2TB SSD) 36 | 37 | ``` 38 | ./catchup.sh docker-compose.pubnet.yaml 1 23920640 32768 50 2>&1 | tee catchup.log 39 | ``` 40 | 41 | * 2018-12-20: 24 hours with a `CHUNK_SIZE` of `32768` and 32 workers on a `n1-standard-32` machine on Google Cloud (32 CPUs, 120GB RAM, 1TB SSD) 42 | 43 | ``` 44 | ./catchup.sh docker-compose.pubnet.yaml 1 20971520 32768 32 2>&1 | tee catchup.log 45 | ``` 46 | 47 | * ... add your achieved result here by submitting a PR. 48 | 49 | ## Example run on dedicated Google Cloud machine 50 | 51 | ``` 52 | sudo apt-get update 53 | sudo apt-get install -y \ 54 | apt-transport-https \ 55 | ca-certificates \ 56 | curl \ 57 | gnupg2 \ 58 | software-properties-common \ 59 | python-pip 60 | curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - 61 | sudo add-apt-repository \ 62 | "deb [arch=amd64] https://download.docker.com/linux/debian \ 63 | $(lsb_release -cs) \ 64 | stable" 65 | sudo apt-get update 66 | sudo apt-get install -y docker-ce 67 | sudo pip install docker-compose 68 | echo '{"default-address-pools":[{"base":"172.80.0.0/16","size":29}]}' | sudo tee /etc/docker/daemon.json 69 | sudo usermod -G docker andre 70 | sudo reboot 71 | # log in again and check whether docker works 72 | docker ps 73 | ``` 74 | 75 | ``` 76 | git clone git@github.com:satoshipay/stellar-core-parallel-catchup.git 77 | cd stellar-core-parallel-catchup 78 | ./catchup.sh docker-compose.pubnet.yaml 1 20971520 32768 32 2>&1 | tee catchup.log 79 | ``` 80 | 81 | You will get 3 important pieces of data for Stellar Core: 82 | 83 | * SQL database: if you need to move the data to another container/machine you can dump the database by running the following: 84 | 85 | ``` 86 | docker exec catchup-result_stellar-core-postgres_1 pg_dump -F d -f catchup-sqldump -j 10 -U postgres -d stellar-core 87 | ``` 88 | 89 | Then copy the `catchup-sqldump` directory to the target container/machine and restore with `pg_restore`. 90 | 91 | * `data-result` directory: contains the `buckets` directory that Stellar Core needs for continuing with the current state in the SQL database. 92 | * `history-result` directory: contains the full history that can be published to help other validator nodes to catch up (e.g., S3, GCS, IPFS, or any other file storage). 93 | 94 | Note: make sure you have a consistent state of the three pieces of data before starting Stellar Core in SCP mode (e.g., when moving data to another machine). 95 | 96 | ## Reset 97 | 98 | If you need to start from scratch again you can delete all docker-compose projects: 99 | 100 | ``` 101 | for PROJECT in $(docker ps --filter "label=com.docker.compose.project" -q | xargs docker inspect --format='{{index .Config.Labels "com.docker.compose.project"}}'| uniq | grep catchup-); do docker-compose -f docker-compose.pubnet.yaml -p $PROJECT down -v; done 102 | docker volume prune 103 | ``` 104 | -------------------------------------------------------------------------------- /catchup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Hacky parallel catchup 4 | # see also https://github.com/stellar/docs/blob/3d060c0f1afb2eaff8a4076f673a8688d36e4aa5/software/known-issues.md 5 | 6 | set -eu 7 | set -o pipefail 8 | 9 | if [ "$#" -ne 5 ]; then 10 | echo "Usage: ./catchup.sh DOCKER_COMPOSE_FILE LEDGER_MIN LEDGER_MAX CHUNK_SIZE WORKERS" 11 | exit 1 12 | fi 13 | 14 | DOCKER_COMPOSE_FILE=$1 15 | LEDGER_MIN=$2 16 | LEDGER_MAX=$3 17 | CHUNK_SIZE=$4 18 | WORKERS=$5 19 | 20 | # temporary files, job queue, and locks 21 | PREFIX=$(mktemp -u -t catchup-XXXX) 22 | JOB_QUEUE=${PREFIX}-job-queue 23 | JOB_QUEUE_LOCK=${PREFIX}-job-queue-lock 24 | touch $JOB_QUEUE_LOCK 25 | mkfifo $JOB_QUEUE 26 | 27 | cleanup() { 28 | rm $JOB_QUEUE 29 | rm $JOB_QUEUE_LOCK 30 | } 31 | trap cleanup 0 32 | 33 | log() { 34 | echo "$(date +'%Y-%m-%d %H:%M:%S') $1" 35 | } 36 | 37 | run-catchup-job() { 38 | JOB_ID=$1 39 | CATCHUP_LEDGER_MIN=$2 40 | CATCHUP_LEDGER_MAX=$3 41 | 42 | JOB_LOG_FILE=${PREFIX}-job-${JOB_ID}.log 43 | 44 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} up -d stellar-core-postgres 45 | sleep 30 46 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} run stellar-core stellar-core new-db --conf /stellar-core.cfg 2>&1 > $JOB_LOG_FILE 47 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} run stellar-core stellar-core new-hist local --conf /stellar-core.cfg 2>&1 >> $JOB_LOG_FILE 48 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} run stellar-core stellar-core catchup $CATCHUP_LEDGER_MAX/$(($CATCHUP_LEDGER_MAX - $CATCHUP_LEDGER_MIN)) --conf /stellar-core.cfg 2>&1 >> $JOB_LOG_FILE 49 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} run stellar-core stellar-core publish --conf /stellar-core.cfg 2>&1 >> $JOB_LOG_FILE 50 | 51 | # free up resources (ram, networks), volumes are retained 52 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} down 53 | 54 | touch ${PREFIX}-job-${JOB_ID}-finished 55 | } 56 | 57 | worker() { 58 | WORKER=$1 59 | 60 | exec 201<$JOB_QUEUE 61 | exec 202<$JOB_QUEUE_LOCK 62 | 63 | touch ${PREFIX}-worker-$WORKER-started 64 | 65 | log "Worker $WORKER: started." 66 | 67 | while true; do 68 | flock 202 69 | read -u 201 JOB_ID JOB_LEDGER_MIN JOB_LEDGER_MAX || { log "Worker $WORKER: finished."; exit 0; } 70 | flock -u 202 71 | 72 | log "Worker $WORKER: starting job $JOB_ID (ledgers ${JOB_LEDGER_MIN}–${JOB_LEDGER_MAX})." 73 | run-catchup-job $JOB_ID $JOB_LEDGER_MIN $JOB_LEDGER_MAX 74 | log "Worker $WORKER: finished job $JOB_ID (ledgers ${JOB_LEDGER_MIN}–${JOB_LEDGER_MAX})." 75 | done 76 | } 77 | 78 | # start workers 79 | for WORKER in $(seq 1 $WORKERS); do 80 | worker $WORKER & 81 | done 82 | 83 | MAX_JOB_ID=$(( ( LEDGER_MAX - LEDGER_MIN ) / CHUNK_SIZE )) 84 | if [ "$(( LEDGER_MIN - 1 + MAX_JOB_ID * CHUNK_SIZE ))" -lt "$LEDGER_MAX" ]; then 85 | MAX_JOB_ID=$(( MAX_JOB_ID + 1 )) 86 | fi 87 | 88 | log "Running $MAX_JOB_ID jobs with $WORKERS workers" 89 | 90 | # job producer 91 | { 92 | exec 201>$JOB_QUEUE 93 | 94 | log "wait for workers" 95 | # wait for workers to start 96 | for WORKER in $(seq 1 $WORKERS); do 97 | while [ ! -f ${PREFIX}-worker-$WORKER-started ]; do 98 | sleep 1 99 | done 100 | done 101 | 102 | # produce jobs 103 | for JOB_ID in $(seq 1 $MAX_JOB_ID); do 104 | JOB_LEDGER_MIN=$(( LEDGER_MIN + (JOB_ID - 1) * CHUNK_SIZE)) 105 | JOB_LEDGER_MAX=$(( LEDGER_MIN - 1 + JOB_ID * CHUNK_SIZE )) 106 | if [ "$JOB_LEDGER_MAX" -ge "$LEDGER_MAX" ]; then 107 | JOB_LEDGER_MAX=$LEDGER_MAX 108 | fi 109 | echo "$JOB_ID $JOB_LEDGER_MIN $JOB_LEDGER_MAX" >& 201 110 | done 111 | exec 201<&- 112 | } & 113 | 114 | # merge results 115 | log "Starting result database..." 116 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-result up -d stellar-core-postgres 117 | sleep 60 118 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-result run stellar-core stellar-core new-db --conf /stellar-core.cfg 119 | 120 | # wipe data to prevent conflicts with job 1 121 | for TABLE in ledgerheaders txhistory txfeehistory upgradehistory; do 122 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-result exec -T stellar-core-postgres \ 123 | psql stellar-core postgres -c "DELETE FROM $TABLE" 124 | done 125 | 126 | for JOB_ID in $(seq 1 $MAX_JOB_ID); do 127 | log "Waiting for job $JOB_ID..." 128 | while [ ! -f ${PREFIX}-job-${JOB_ID}-finished ]; do 129 | sleep 10 130 | done 131 | rm -f ${PREFIX}-job-${JOB_ID}-finished 132 | log "Job $JOB_ID finished, recreating database container..." 133 | 134 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} up -d stellar-core-postgres 135 | sleep 15 136 | 137 | JOB_LEDGER_MIN=$(( LEDGER_MIN + (JOB_ID - 1) * CHUNK_SIZE)) 138 | JOB_LEDGER_MAX=$(( LEDGER_MIN - 1 + JOB_ID * CHUNK_SIZE )) 139 | if [ "$JOB_LEDGER_MAX" -ge "$LEDGER_MAX" ]; then 140 | JOB_LEDGER_MAX=$LEDGER_MAX 141 | fi 142 | 143 | if [ "$JOB_ID" != "1" ]; then 144 | log "Match last hash of result data with previous hash of the first ledger of job $JOB_ID" 145 | LAST_RESULT_LEDGER=$(( JOB_LEDGER_MIN - 1)) 146 | LAST_RESULT_HASH=$(docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-result exec stellar-core-postgres psql -t stellar-core postgres -c "SELECT ledgerhash FROM ledgerheaders WHERE ledgerseq = $LAST_RESULT_LEDGER") 147 | PREVIOUS_JOB_HASH=$(docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} exec stellar-core-postgres psql -t stellar-core postgres -c "SELECT prevhash FROM ledgerheaders WHERE ledgerseq = $JOB_LEDGER_MIN") 148 | if [ "$LAST_RESULT_HASH" != "$PREVIOUS_JOB_HASH" ]; then 149 | log "Last result hash $LAST_RESULT_HASH (ledger $LAST_RESULT_LEDGER) does not match previous hash $PREVIOUS_JOB_HASH of first ledger of job $JOB_ID (ledger $JOB_LEDGER_MIN)" 150 | exit 1 151 | fi 152 | fi 153 | 154 | log "Merging database of job $JOB_ID in result database..." 155 | for TABLE in ledgerheaders txhistory txfeehistory upgradehistory; do 156 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} exec -T stellar-core-postgres \ 157 | psql stellar-core postgres -c "COPY (SELECT * FROM $TABLE WHERE ledgerseq >= $JOB_LEDGER_MIN AND ledgerseq <= $JOB_LEDGER_MAX) TO STDOUT WITH (FORMAT BINARY)" | 158 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-result exec -T stellar-core-postgres \ 159 | psql stellar-core postgres -c "COPY $TABLE FROM STDIN WITH (FORMAT BINARY)" 160 | done 161 | 162 | if [ "$JOB_ID" = "$MAX_JOB_ID" ]; then 163 | log "Copy state from job $JOB_ID to result database..." 164 | for TABLE in accountdata accounts ban offers peers publishqueue pubsub quoruminfo scphistory scpquorums storestate trustlines; do 165 | # wipe existing data 166 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-result exec -T stellar-core-postgres \ 167 | psql stellar-core postgres -c "DELETE FROM $TABLE" 168 | # copy state 169 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} exec -T stellar-core-postgres \ 170 | psql stellar-core postgres -c "COPY $TABLE TO STDOUT WITH (FORMAT BINARY)" | 171 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-result exec -T stellar-core-postgres \ 172 | psql stellar-core postgres -c "COPY $TABLE FROM STDIN WITH (FORMAT BINARY)" 173 | done 174 | fi 175 | 176 | log "Merging history of job $JOB_ID..." 177 | docker container create --name catchup-job-${JOB_ID} -v catchup-job-${JOB_ID}_core-data:/data hello-world 178 | docker cp catchup-job-${JOB_ID}:/data/history ./history-${JOB_ID} 179 | if [ "$JOB_ID" = "$MAX_JOB_ID" ]; then 180 | log "Copy all data from job ${JOB_ID}..." 181 | docker cp catchup-job-${JOB_ID}:/data ./data-result 182 | fi 183 | docker rm catchup-job-${JOB_ID} 184 | rsync -a ./history-${JOB_ID}/ ./history-result/ 185 | rm -rf ./history-${JOB_ID} 186 | 187 | # clean up job containers and volumes 188 | docker-compose -f $DOCKER_COMPOSE_FILE -p catchup-job-${JOB_ID} down -v 189 | done 190 | 191 | wait 192 | 193 | log "Done" 194 | --------------------------------------------------------------------------------