├── .gitignore ├── BigchainDB ├── bench.py ├── config.toml ├── driver.py ├── lib-txdelay.py ├── scripts │ ├── .bigchaindb │ ├── ntpset │ ├── start-all.sh │ └── tendermint_init └── setup.sh ├── BlockchainDB ├── Makefile ├── README.md ├── bcdbnode │ ├── config │ │ └── config.go │ └── server.go ├── benchmark │ ├── util.go │ └── ycsb │ │ └── ycsb.go ├── blockchainconnectors │ ├── blockchainConnector.go │ ├── ethereumconnector │ │ └── ethereumConnector.go │ ├── test │ │ └── test.go │ └── testconnector.go ├── cmd │ ├── bcdbnode │ │ └── bcdbnode.go │ └── tests │ │ ├── main.go │ │ └── main.go.bak ├── proto │ └── blockchaindb │ │ ├── blockchaindb.pb.go │ │ └── blockchaindb.proto ├── scripts │ ├── eth │ │ ├── deploy_contract.sh │ │ ├── env.sh │ │ ├── gen_eth_config.sh │ │ ├── geth_build_from_source.sh │ │ ├── init_eth_account.sh │ │ ├── start_eth_container.sh │ │ └── start_eth_node.sh │ ├── experiments │ │ ├── experiment1.sh │ │ ├── experiment2.sh │ │ ├── experiment3.sh │ │ ├── experiment4.sh │ │ ├── experiment6.sh │ │ └── run_all_tests.sh │ ├── fab │ │ ├── restart.sh │ │ └── setupEnv.sh │ ├── gen_config.sh │ ├── libs │ │ ├── gen_proto.sh │ │ └── install.sh │ ├── start_eth_network.sh │ ├── start_nodes.sh │ ├── stop_nodes.sh │ └── ycsb │ │ ├── gen_ycsb_data.sh │ │ └── start_ycsb_test.sh ├── shardingMgr │ ├── partition.go │ └── shardingMgr.go ├── storage │ └── ethereum │ │ ├── clientSDK │ │ └── main.go │ │ ├── contracts │ │ ├── KVStore │ │ │ ├── KVStore.go │ │ │ └── KVStore.sol │ │ └── deploy │ │ │ └── contract_deploy.go │ │ ├── networks │ │ └── CustomGenesis.template │ │ └── tests │ │ └── main.go └── transactionMgr │ └── transactionMgr.go ├── LICENSE ├── README.md ├── cmd ├── kafkabench │ └── main.go ├── mongodb │ └── main.go ├── raftkv │ └── main.go ├── redis │ └── main.go ├── redisql │ └── main.go ├── redisqlexample │ └── main.go ├── tso │ └── main.go ├── veritas-redisql │ └── main.go ├── veritas │ └── main.go ├── veritastm-mongodb │ └── main.go └── veritastm │ └── main.go ├── dbconn ├── mongodb.go ├── redis.go └── redisql.go ├── docker ├── bigchaindb │ ├── Alpine.Dockerfile │ ├── Dockerfile │ └── build_docker.sh ├── blockchaindb │ ├── Dockerfile │ └── build_docker.sh └── veritas │ ├── Dockerfile │ ├── build_docker.sh │ └── tendermint ├── docker_compose.yml ├── go.mod ├── kafkarole ├── consumer.go └── producer.go ├── proto ├── raftkv │ ├── raftkv.pb.go │ └── raftkv.proto └── veritas │ ├── veritas.pb.go │ └── veritas.proto ├── raftkv ├── badgerkv.go ├── command.go ├── config.go ├── fsm.go ├── fsmsnapshot.go ├── kv.go ├── peer.go └── rediskv.go ├── scripts ├── build_binaries.sh ├── env.sh ├── gen_ycsb_data.sh ├── genproto.sh ├── get_kafka_ops.sh ├── install_dependencies.sh ├── kill_containers_bigchaindb.sh ├── kill_containers_blockchaindb.sh ├── kill_containers_veritas.sh ├── multi_node.sh ├── parse_bigchaindb_blocksize.sh ├── parse_bigchaindb_profiles.py ├── restart_cluster_bigchaindb.sh ├── restart_cluster_blockchaindb.sh ├── restart_cluster_veritas.sh ├── run_benchmark_bigchaindb_clients.sh ├── run_benchmark_bigchaindb_distribution.sh ├── run_benchmark_bigchaindb_networking.sh ├── run_benchmark_bigchaindb_nodes.sh ├── run_benchmark_bigchaindb_proctime.sh ├── run_benchmark_bigchaindb_recordsize.sh ├── run_benchmark_bigchaindb_workload.sh ├── run_benchmark_blockchaindb_blocksize.sh ├── run_benchmark_blockchaindb_clients.sh ├── run_benchmark_blockchaindb_distribution.sh ├── run_benchmark_blockchaindb_networking.sh ├── run_benchmark_blockchaindb_nodes.sh ├── run_benchmark_blockchaindb_proctime.sh ├── run_benchmark_blockchaindb_recordsize.sh ├── run_benchmark_blockchaindb_sharding.sh ├── run_benchmark_blockchaindb_workload.sh ├── run_benchmark_db.sh ├── run_benchmark_veritas_kafka_blocksize.sh ├── run_benchmark_veritas_kafka_clients.sh ├── run_benchmark_veritas_kafka_clients_tso_zk.sh ├── run_benchmark_veritas_kafka_database.sh ├── run_benchmark_veritas_kafka_distribution.sh ├── run_benchmark_veritas_kafka_networking.sh ├── run_benchmark_veritas_kafka_nodes.sh ├── run_benchmark_veritas_kafka_proctime.sh ├── run_benchmark_veritas_kafka_recordsize.sh ├── run_benchmark_veritas_kafka_redisql_clients.sh ├── run_benchmark_veritas_kafka_redisql_workload.sh ├── run_benchmark_veritas_kafka_workload.sh ├── run_benchmark_veritas_tendermint_clients.sh ├── run_benchmark_veritas_tendermint_distribution.sh ├── run_benchmark_veritas_tendermint_mongodb_clients.sh ├── run_benchmark_veritas_tendermint_networking.sh ├── run_benchmark_veritas_tendermint_nodes.sh ├── run_benchmark_veritas_tendermint_proctime.sh ├── run_benchmark_veritas_tendermint_recordsize.sh ├── run_benchmark_veritas_tendermint_workload.sh ├── run_iperf_ping.sh ├── run_redis_benchmark.sh ├── set_ovs_bigchaindb.sh ├── set_ovs_blockchaindb.sh ├── set_ovs_bw_limit.sh ├── set_ovs_veritas.sh ├── set_tc.sh ├── start_bigchaindb.sh ├── start_blockchaindb.sh ├── start_containers_bigchaindb.sh ├── start_containers_blockchaindb.sh ├── start_containers_veritas.sh ├── start_veritas_kafka.sh ├── start_veritas_kafka_delay.sh ├── start_veritas_kafka_redisql.sh ├── start_veritas_kafka_tso_zk.sh ├── start_veritas_tendermint.sh ├── start_veritas_tendermint_delay.sh ├── start_veritas_tendermint_mongodb.sh ├── stop_bigchaindb.sh ├── stop_blockchaindb.sh ├── stop_veritas_kafka.sh ├── stop_veritas_kafka_redisql.sh ├── stop_veritas_kafka_tso_zk.sh ├── stop_veritas_tendermint.sh ├── tendermint_config.py ├── tendermint_config_bigchaindb.py ├── unset_ovs_bigchaindb.sh ├── unset_ovs_blockchaindb.sh ├── unset_ovs_veritas.sh ├── veritas-pprof.patch ├── veritas-redisql.patch ├── veritas-tendermint-mongodb.patch ├── veritas-tso-zk.patch └── veritas-txdelay.patch ├── tso ├── README.md ├── examples │ ├── benchmark.go │ ├── client.go │ └── server.go ├── oracle.go ├── oracleclient.go ├── oraclemarshal.go └── utils.go ├── tso_zookeeper └── tso.go ├── veritas ├── benchmark │ ├── randbench │ │ └── main.go │ ├── util.go │ └── ycsbbench │ │ └── main.go ├── config.go ├── db │ ├── db.go │ └── transaction.go ├── driver │ └── driver.go ├── entry.go ├── keylocker │ └── mgr.go ├── ledger │ ├── merkletree │ │ ├── proofs.go │ │ ├── smt.go │ │ ├── smt_test.go │ │ ├── store.go │ │ ├── treehasher.go │ │ └── utils.go │ ├── storage.go │ └── storage_test.go ├── queue.go └── server.go ├── veritastm ├── benchmark │ └── main.go ├── config.go ├── driver.go ├── ledgerapp.go ├── server.go ├── test │ ├── main.go │ └── test └── util.go └── workloads ├── latest ├── workloada ├── workloadb └── workloadc ├── sizes ├── workloada_128kB ├── workloada_2kB ├── workloada_32kB ├── workloada_512B └── workloada_8kB ├── uniform ├── workloada ├── workloadb └── workloadc └── zipfian ├── workloada ├── workloadb └── workloadc /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | # 17 | # Logs 18 | scripts/logs* 19 | -------------------------------------------------------------------------------- /BigchainDB/config.toml: -------------------------------------------------------------------------------- 1 | # This is a TOML config file. 2 | # For more information, see https://github.com/toml-lang/toml 3 | 4 | proxy_app = "tcp://bigchaindb:26658" 5 | moniker = "anonymous" 6 | fast_sync = true 7 | db_backend = "leveldb" 8 | log_level = "state:debug,*:error" 9 | 10 | [mempool] 11 | size = 20000 12 | 13 | [consensus] 14 | create_empty_blocks = false 15 | 16 | [rpc] 17 | laddr = "tcp://0.0.0.0:26657" 18 | 19 | [p2p] 20 | laddr = "tcp://0.0.0.0:26656" 21 | seeds = "" 22 | -------------------------------------------------------------------------------- /BigchainDB/scripts/.bigchaindb: -------------------------------------------------------------------------------- 1 | { 2 | "database": { 3 | "backend": "localmongodb", 4 | "ca_cert": null, 5 | "certfile": null, 6 | "connection_timeout": 5000, 7 | "crlfile": null, 8 | "host": "localhost", 9 | "keyfile": null, 10 | "keyfile_passphrase": null, 11 | "login": null, 12 | "max_tries": 3, 13 | "name": "bigchain", 14 | "password": null, 15 | "port": 27017, 16 | "replicaset": null, 17 | "ssl": false 18 | }, 19 | "log": { 20 | "datefmt_console": "%Y-%m-%d %H:%M:%S", 21 | "datefmt_logfile": "%Y-%m-%d %H:%M:%S", 22 | "error_file": "/root/bigchaindb-errors.log", 23 | "file": "/root/bigchaindb.log", 24 | "fmt_console": "[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s (%(processName)-10s - pid: %(process)d)", 25 | "fmt_logfile": "[%(asctime)s] [%(levelname)s] (%(name)s) %(message)s (%(processName)-10s - pid: %(process)d)", 26 | "granular_levels": {}, 27 | "level_console": "info", 28 | "level_logfile": "info" 29 | }, 30 | "server": { 31 | "bind": "0.0.0.0:9984", 32 | "loglevel": "info", 33 | "workers": 4 34 | }, 35 | "tendermint": { 36 | "host": "localhost", 37 | "port": 26657, 38 | "version": "v0.31.5" 39 | }, 40 | "wsserver": { 41 | "advertised_host": "0.0.0.0", 42 | "advertised_port": 9985, 43 | "advertised_scheme": "ws", 44 | "host": "0.0.0.0", 45 | "port": 9985, 46 | "scheme": "ws" 47 | } 48 | } 49 | 50 | -------------------------------------------------------------------------------- /BigchainDB/scripts/ntpset: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | timedatectl set-ntp no 3 | sed -i 's/pool 0.ubuntu.pool.ntp.org iburst/server 0.de.pool.ntp.org iburst/' /etc/ntp.conf 4 | sed -i 's/pool 1.ubuntu.pool.ntp.org iburst/server 1.de.pool.ntp.org iburst/' /etc/ntp.conf 5 | sed -i 's/pool 2.ubuntu.pool.ntp.org iburst/server 2.de.pool.ntp.org iburst/' /etc/ntp.conf 6 | sed -i 's/pool 3.ubuntu.pool.ntp.org iburst/server 3.de.pool.ntp.org iburst/' /etc/ntp.conf 7 | -------------------------------------------------------------------------------- /BigchainDB/scripts/start-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # preconf 4 | # ./ntpset 5 | cp .bigchaindb /root/.bigchaindb 6 | cd /root 7 | 8 | # MongoDB 9 | # [ "$(stat -c %U /data/db)" = mongodb ] || chown -R mongodb /data/db 10 | nohup mongod --bind_ip_all > mongodb.log 2>&1 & 11 | 12 | # Tendermint Init 13 | /usr/local/bin/tendermint init 14 | 15 | # BigchainDB 16 | bigchaindb start > bigchaindb-output.log 2>&1 & 17 | 18 | # BigchainDB + Profiling 19 | # python3 -m cProfile -o bigchaindb-profile.log /usr/local/bin/bigchaindb start > bigchaindb-output.log 2>&1 & 20 | 21 | # BigchainDB (PV) 22 | # bigchaindb start --experimental-parallel-validation > bigchaindb-output.log 2>&1 & 23 | 24 | # BigchainDB (PV) + Profiling 25 | # python3 -m cProfile -o bigchaindb-profile.log /usr/local/bin/bigchaindb start --experimental-parallel-validation > bigchaindb-output.log 2>&1 & 26 | 27 | # Tendermint Start - Start it in the script 28 | # /usr/local/bin/tendermint node --p2p.laddr "tcp://0.0.0.0:26656" --proxy_app="tcp://0.0.0.0:26658" --consensus.create_empty_blocks=false --p2p.pex=false > tendermint.log 2>&1 & 29 | -------------------------------------------------------------------------------- /BigchainDB/scripts/tendermint_init: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import json 3 | import re 4 | import sys 5 | from base64 import b64decode 6 | 7 | (TENDERMINT_USER, # str 8 | GENESIS_TIME, # str 9 | CHAIN_ID, # str 10 | B64_VALIDATORS, # base64 encoded json string 11 | VALIDATOR_POWERS, # comma separated string of ints or `default` 12 | NODE_IDS, # comma separated list of node ids 13 | NODE_IPS # comma separated list of node ips 14 | ) = sys.argv[1:] 15 | 16 | #GENESIS_FILE = ('/home/{tu}/.tendermint/config/genesis.json' 17 | # .format(tu=TENDERMINT_USER)) 18 | #TM_CONFIG_FILE = ('/home/{tu}/.tendermint/config/config.toml' 19 | # .format(tu=TENDERMINT_USER)) 20 | GENESIS_FILE = '/root/.tendermint/config/genesis.json' 21 | M_CONFIG_FILE = '/root/.tendermint/config/config.toml' 22 | 23 | def edit_genesis() -> None: 24 | """Insert validators genesis time and chain_id to genesis file.""" 25 | 26 | validators = json.loads('[{}]'.format(b64decode(B64_VALIDATORS).decode())) 27 | 28 | # Update validators powers 29 | for v, p in zip(validators, VALIDATOR_POWERS.split(',')): 30 | if p != 'default': 31 | v['power'] = p 32 | 33 | with open(GENESIS_FILE, 'r') as gf: 34 | genesis_conf = json.load(gf) 35 | genesis_conf['validators'] = validators 36 | genesis_conf['genesis_time'] = GENESIS_TIME 37 | genesis_conf['chain_id'] = CHAIN_ID 38 | 39 | with open(GENESIS_FILE, 'w') as gf: 40 | json.dump(genesis_conf, gf, indent=True) 41 | 42 | return None 43 | 44 | 45 | def edit_config() -> None: 46 | """Insert peers ids and addresses to tendermint config file.""" 47 | 48 | ips = NODE_IPS.split(',') 49 | ids = NODE_IDS.split(',') 50 | 51 | persistent_peers = ',\\\n'.join([ 52 | '{}@{}:26656'.format(nid, nip) for nid, nip in zip(ids, ips) 53 | ]) 54 | 55 | with open(TM_CONFIG_FILE, 'r') as f: 56 | tm_config_toml = f.read() 57 | 58 | with open(TM_CONFIG_FILE, 'w') as f: 59 | f.write( 60 | re.sub( 61 | r'^persistent_peers\s*=\s*".*"', 62 | r'persistent_peers="{pp}"'.format(pp=persistent_peers), 63 | tm_config_toml, 64 | flags=re.MULTILINE 65 | ) 66 | ) 67 | return None 68 | 69 | 70 | if __name__ == '__main__': 71 | edit_genesis() 72 | edit_config() 73 | -------------------------------------------------------------------------------- /BigchainDB/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt update 4 | sudo apt full-upgrade -y 5 | sudo apt install -y python3-pip libssl-dev unzip mongodb git tmux jq 6 | 7 | wget https://github.com/tendermint/tendermint/releases/download/v0.22.8/tendermint_0.22.8_linux_amd64.zip -O tendermint.zip 8 | unzip tendermint.zip 9 | rm tendermint.zip 10 | sudo mv tendermint /usr/local/bin 11 | 12 | sudo ufw allow 22/tcp 13 | sudo ufw allow 9984/tcp 14 | sudo ufw allow 9985/tcp 15 | sudo ufw allow 26656/tcp 16 | sudo ufw allow 26657/tcp 17 | yes | sudo ufw enable 18 | 19 | git clone https://github.com/bigchaindb/benchmark.git 20 | git clone https://github.com/bigchaindb/bigchaindb.git 21 | 22 | sudo pip3 install -e bigchaindb/ 23 | sudo pip3 install -e benchmark/ 24 | 25 | pip3 uninstall -y requests 26 | pip3 install requests==2.23.0 27 | pip3 uninstall -y python-rapidjson 28 | pip3 install python-rapidjson==0.9.1 29 | pip3 uninstall -y cryptoconditions 30 | pip3 install cryptoconditions==0.8.0 31 | pip3 uninstall -y aiohttp 32 | pip3 install aiohttp==3.6.2 33 | pip3 install psutil==5.7.0 34 | -------------------------------------------------------------------------------- /BlockchainDB/Makefile: -------------------------------------------------------------------------------- 1 | binaries := cmd/bcdbnode/bcdbnode benchmark/ycsb/ycsbtest storage/ethereum/contracts/deploy/deyploycontract 2 | nodes := 4 3 | clients := 4 4 | shards := 1 5 | workload := a 6 | distribution := ycsb_data 7 | 8 | .PHONY: all build clean download $(binaries) ethnet install verify test 9 | 10 | all: download build ethnet install verify 11 | 12 | fast: build ethnet install verify 13 | 14 | clean: 15 | @rm -rfv .bin 16 | 17 | build: clean 18 | @go build -o ./.bin/bcdbnode $(GCFLAGS) ./cmd/bcdbnode 19 | @go build -o ./.bin/benchmark_bcdb $(GCFLAGS) ./benchmark/ycsb 20 | @go build -o ./.bin/deploy_contract $(GCFLAGS) ./storage/ethereum/contracts/deploy 21 | 22 | docker: 23 | @/bin/bash ../docker/blockchaindb/build_docker.sh 24 | 25 | download: 26 | @/bin/bash scripts/ycsb/gen_ycsb_data.sh 27 | @/bin/bash scripts/libs/get_docker_images.sh 28 | @go mod download 29 | 30 | ethnet: 31 | @/bin/bash scripts/start_eth_network.sh $(shards) $(nodes) 32 | 33 | install: 34 | @/bin/bash scripts/stop_nodes.sh 35 | @/bin/bash scripts/gen_config.sh $(shards) $(nodes) 36 | @/bin/bash scripts/start_nodes.sh $(shards) $(nodes) > server.$(shards).$(nodes).log 2>&1 37 | 38 | verify: 39 | @go run cmd/tests/main.go 40 | 41 | test: 42 | @echo "Test start with node size: $(nodes), client size: $(clients)" 43 | @/bin/bash scripts/ycsb/start_ycsb_test.sh $(nodes) $(clients) $(workload) $(distribution) >> test.$(nodes).$(clients).log 2>&1 44 | 45 | -------------------------------------------------------------------------------- /BlockchainDB/README.md: -------------------------------------------------------------------------------- 1 | # BlockchainDB 2 | 3 | ##### 1.Prepare 4 | 5 | ``` 6 | make download 7 | make build 8 | ``` 9 | 10 | 11 | 12 | #### 2.(option one) Start all by one step 13 | 14 | ``` 15 | make fast shards=1 nodes=4 16 | 17 | ``` 18 | 19 | #### 2.(option two) Start step by step 20 | 21 | ##### 2.1 Start blockchain network 22 | 23 | (default: ethereum poa) 24 | 25 | ``` 26 | make ethnet shards=1 nodes=4 27 | 28 | ``` 29 | 30 | ##### 2.2 Start bcdb nodes 31 | 32 | ``` 33 | make install shards=1 nodes=4 34 | 35 | ``` 36 | 37 | 38 | 39 | #### 3. Run ycsb tests 40 | 41 | ``` 42 | make test nodes=4 clients=4 workload=a 43 | ``` 44 | 45 | Check test result: test.4.4.log 46 | -------------------------------------------------------------------------------- /BlockchainDB/bcdbnode/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "github.com/spf13/pflag" 5 | "github.com/spf13/viper" 6 | ) 7 | 8 | type Options struct { 9 | SelfID string `mapstructure:"self-id"` 10 | ServerNodeAddr string `mapstructure:"server-node-addr"` 11 | Type string `mapstructure:"shard-type"` 12 | Delay int `mapstructure:"delay"` 13 | EthNode string `mapstructure:"eth-node"` 14 | EthHexAddr string `mapstructure:"eth-hexaddr"` 15 | EthHexKey string `mapstructure:"eth-hexkey"` 16 | EthBootSigner string `mapstructure:"eth-boot-signer-address"` 17 | FabNode string `mapstructure:"fab-node"` 18 | FabConfig string `mapstructure:"fab-config"` 19 | ShardNumber int `mapstructure:"shard-number"` 20 | Shards []Shard 21 | } 22 | 23 | type Shard struct { 24 | ID string `mapstructure:"shard-id"` 25 | Type string `mapstructure:"shard-type"` 26 | PartitionKey string `mapstructure:"shard-patition-key"` 27 | EthNode string `mapstructure:"eth-node"` 28 | EthHexAddr string `mapstructure:"eth-hexaddr"` 29 | EthHexKey string `mapstructure:"eth-hexkey"` 30 | FabNode string `mapstructure:"fab-node"` 31 | FabConfig string `mapstructure:"fab-config"` 32 | } 33 | 34 | func ReadConfig(opts interface{}, configfile string) (err error) { 35 | err = viper.BindPFlags(pflag.CommandLine) 36 | if err != nil { 37 | return err 38 | } 39 | if configfile != "" { 40 | //viper.SetConfigFile(configfile) 41 | viper.SetConfigName(configfile) 42 | } else { 43 | viper.SetConfigName("blockchaindb") 44 | } 45 | 46 | viper.AddConfigPath(".") 47 | err = viper.ReadInConfig() 48 | if err != nil { 49 | return err 50 | } 51 | err = viper.Unmarshal(opts) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | return nil 57 | } 58 | -------------------------------------------------------------------------------- /BlockchainDB/bcdbnode/server.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "time" 7 | 8 | "hybrid/BlockchainDB/bcdbnode/config" 9 | pbv "hybrid/BlockchainDB/proto/blockchaindb" 10 | sharding "hybrid/BlockchainDB/shardingMgr" 11 | ) 12 | 13 | var _ pbv.BCdbNodeServer = (*ServerNode)(nil) 14 | 15 | type ServerNode struct { 16 | shardingMgr *sharding.ShardingMgr 17 | txDelay int 18 | } 19 | 20 | func NewServerNode(conf *config.Options) (*ServerNode, error) { 21 | // version 1.0 22 | // ethereumconn, err := EthClientSDK.NewEthereumKVStoreInstance(conf.EthNode, conf.EthHexAddr, conf.EthHexKey) 23 | // if err != nil { 24 | // log.Println("Failed to NewEthereumKVStoreInstance", err) 25 | // return nil, err 26 | // } 27 | // fabricconn, err := FabClientSDK.NewFabricKVStoreInstance() 28 | // if err != nil { 29 | // fmt.Println("Failed to NewFabricKVStoreInstance", err) 30 | // return nil, err 31 | // } 32 | // shamgr := &sharding.ShardingMgr{EthConn: ethereumconn, FabConn: fabricconn} 33 | 34 | // version 2.0 35 | shamgr, err := sharding.NewShardingMgr(conf) 36 | if err != nil { 37 | log.Println("Failed to NewShardingMgr", err) 38 | return nil, err 39 | } 40 | if conf.Delay > 0 { 41 | log.Println("Enable txDelay Experiment(ms): ", conf.Delay) 42 | } 43 | return &ServerNode{shardingMgr: shamgr, txDelay: conf.Delay}, nil 44 | } 45 | 46 | func (sv *ServerNode) Get(ctx context.Context, req *pbv.GetRequest) (*pbv.GetResponse, error) { 47 | if sv.txDelay > 0 { 48 | time.Sleep(time.Duration(sv.txDelay) * time.Millisecond) 49 | } 50 | val, err := sv.shardingMgr.Read(ctx, req.GetKey()) 51 | if err != nil { 52 | return nil, err 53 | } 54 | return &pbv.GetResponse{Value: []byte(val)}, nil 55 | } 56 | 57 | func (sv *ServerNode) Set(ctx context.Context, req *pbv.SetRequest) (*pbv.SetResponse, error) { 58 | if sv.txDelay > 0 { 59 | time.Sleep(time.Duration(sv.txDelay) * time.Millisecond) 60 | } 61 | tx, err := sv.shardingMgr.Write(ctx, req.GetKey(), req.GetValue()) 62 | if err != nil { 63 | return nil, err 64 | } 65 | return &pbv.SetResponse{Tx: tx}, nil 66 | } 67 | 68 | func (sv *ServerNode) Verify(ctx context.Context, req *pbv.VerifyRequest) (*pbv.VerifyResponse, error) { 69 | result, err := sv.shardingMgr.Verify(ctx, req.GetOpt(), req.GetKey(), req.GetTx()) 70 | if err != nil { 71 | return nil, err 72 | } 73 | return &pbv.VerifyResponse{Success: result}, nil 74 | } 75 | -------------------------------------------------------------------------------- /BlockchainDB/benchmark/util.go: -------------------------------------------------------------------------------- 1 | package benchmark 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "math/rand" 7 | "strings" 8 | ) 9 | 10 | const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 11 | const ( 12 | letterIdxBits = 6 // 6 bits to represent a letter index 13 | letterIdxMask = 1<= 0; { 59 | if remain == 0 { 60 | cache, remain = rand.Int63(), letterIdxMax 61 | } 62 | if idx := int(cache & letterIdxMask); idx < len(letterBytes) { 63 | b[i] = letterBytes[idx] 64 | i-- 65 | } 66 | cache >>= letterIdxBits 67 | remain-- 68 | } 69 | return string(b) 70 | } 71 | -------------------------------------------------------------------------------- /BlockchainDB/blockchainconnectors/blockchainConnector.go: -------------------------------------------------------------------------------- 1 | package connectors 2 | 3 | import "context" 4 | 5 | type BlockchainConnector interface { 6 | Read(context.Context, string) (string, error) 7 | Write(context.Context, string, string) (string, error) 8 | Verify(context.Context, string, string, string) (bool, error) 9 | } 10 | -------------------------------------------------------------------------------- /BlockchainDB/blockchainconnectors/test/test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | Connectors "hybrid/BlockchainDB/blockchainconnectors" 5 | ) 6 | 7 | func NewShardingMgr() (Connectors.BlockchainConnector, error) { 8 | var bcConn Connectors.BlockchainConnector 9 | bcConn = &Connectors.Testconnector{} 10 | return bcConn, nil 11 | // bcConn, err := EthClientSDK.NewEthereumKVStoreInstance(conf.EthNode, conf.EthHexAddr, conf.EthHexKey) 12 | // if err != nil { 13 | // log.Println("Failed to NewEthereumKVStoreInstance", err) 14 | // return nil, err 15 | // } 16 | //return &ShardingMgr{BCConn: bcConn}, nil 17 | } 18 | -------------------------------------------------------------------------------- /BlockchainDB/blockchainconnectors/testconnector.go: -------------------------------------------------------------------------------- 1 | package connectors 2 | 3 | import "context" 4 | 5 | type Testconnector struct { 6 | } 7 | 8 | func (t *Testconnector) Read(ctx context.Context, key string) (string, error) { 9 | return "", nil 10 | } 11 | 12 | func (t *Testconnector) Write(ctx context.Context, key, value string) (string, error) { 13 | return "", nil 14 | } 15 | 16 | func (t *Testconnector) Verify(ctx context.Context, opt, key, tx string) (bool, error) { 17 | return true, nil 18 | } 19 | -------------------------------------------------------------------------------- /BlockchainDB/cmd/bcdbnode/bcdbnode.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "net" 8 | "os" 9 | "os/signal" 10 | 11 | "google.golang.org/grpc" 12 | service "hybrid/BlockchainDB/bcdbnode" 13 | "hybrid/BlockchainDB/bcdbnode/config" 14 | pbv "hybrid/BlockchainDB/proto/blockchaindb" 15 | ) 16 | 17 | func main() { 18 | 19 | configFile := flag.String("config", "config.toml", "The path to the config file") 20 | flag.Parse() 21 | var conf config.Options 22 | err := config.ReadConfig(&conf, *configFile) //default config file "config.toml" 23 | if err != nil { 24 | fmt.Fprintf(os.Stderr, "Failed to read config: %v\n", err) 25 | os.Exit(1) 26 | } 27 | 28 | s := grpc.NewServer() 29 | 30 | svr, err := service.NewServerNode(&conf) 31 | if err != nil { 32 | log.Fatalf("New ServerNode err %v", err) 33 | } 34 | 35 | pbv.RegisterBCdbNodeServer(s, svr) 36 | lis, err := net.Listen("tcp", conf.ServerNodeAddr) 37 | if err != nil { 38 | log.Fatalf("Node listen err %v", err) 39 | } else { 40 | log.Println("Node listen address: " + conf.ServerNodeAddr) 41 | } 42 | 43 | go func() { 44 | log.Println("Node Serving gRPC: ", conf.ServerNodeAddr) 45 | s.Serve(lis) 46 | }() 47 | 48 | ch := make(chan os.Signal, 1) 49 | signal.Notify(ch, os.Interrupt, os.Kill) 50 | sig := <-ch 51 | log.Printf("Received signal %v, quiting gracefully", sig) 52 | } 53 | -------------------------------------------------------------------------------- /BlockchainDB/cmd/tests/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "sync" 8 | "time" 9 | 10 | pbv "hybrid/BlockchainDB/proto/blockchaindb" 11 | 12 | "google.golang.org/grpc" 13 | ) 14 | 15 | func main() { 16 | //local testing mode 17 | //addr := "127.0.0.1:50001" 18 | 19 | // ovs multi_node testing mode 20 | //addr := "192.168.20.2:50001" 21 | addr := flag.String("addr", "192.168.20.2:50001", "bcdb server node address") 22 | flag.Parse() 23 | key := "tianwen" + time.Now().Format("20060102150405") 24 | value := "66666666666666666666666666" 25 | 26 | conn, err := grpc.Dial(*addr, grpc.WithInsecure()) 27 | if err != nil { 28 | panic(err) 29 | } 30 | cli := pbv.NewBCdbNodeClient(conn) 31 | 32 | start := time.Now() 33 | res, err := cli.Set(context.Background(), &pbv.SetRequest{Key: key, Value: value}) 34 | if err != nil { 35 | fmt.Println(err) 36 | } else { 37 | fmt.Println(res.Tx) 38 | } 39 | fmt.Println("1.BlockchainDB Set done.") 40 | 41 | lastkey := key 42 | lastopt := "set" 43 | lasttx := res.Tx 44 | wg3 := sync.WaitGroup{} 45 | wg3.Add(1) 46 | go func() { 47 | defer wg3.Done() 48 | for { 49 | if lastkey == "" || lastopt == "" || lasttx == "" { 50 | fmt.Println("No setopt tx to verify .") 51 | break 52 | } 53 | verify, err := cli.Verify(context.Background(), &pbv.VerifyRequest{Opt: lastopt, Key: lastkey, Tx: lasttx}) 54 | if err != nil { 55 | fmt.Println(err) 56 | } else { 57 | if verify != nil && verify.Success { 58 | fmt.Println("Last tx verify success.") 59 | } else { 60 | fmt.Println("Last tx verify stop.") 61 | } 62 | break 63 | } 64 | 65 | time.Sleep(2 * time.Second) 66 | } 67 | }() 68 | wg3.Wait() 69 | fmt.Println("2.BlockchainDB verify done.") 70 | 71 | res1, err := cli.Get(context.Background(), &pbv.GetRequest{Key: key}) 72 | if err != nil { 73 | fmt.Println(err) 74 | } else { 75 | fmt.Println(string(res1.Value)) 76 | 77 | } 78 | elapsed := time.Since(start) 79 | fmt.Printf("Tx set-get took %s\n", elapsed) 80 | fmt.Println("3.Blockchain Get done.") 81 | } 82 | -------------------------------------------------------------------------------- /BlockchainDB/cmd/tests/main.go.bak: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "sync" 8 | "time" 9 | 10 | pbv "hybrid/BlockchainDB/proto/blockchaindb" 11 | 12 | "google.golang.org/grpc" 13 | ) 14 | 15 | func main() { 16 | //local testing mode 17 | //addr := "127.0.0.1:50001" 18 | 19 | // ovs multi_node testing mode 20 | //addr := "192.168.20.2:50001" 21 | addr := flag.String("addr", "192.168.20.2:50001", "bcdb server node address") 22 | flag.Parse() 23 | key := "tianwen" + time.Now().Format("20060102150405") 24 | value := "66666666666666666666666666" 25 | 26 | conn, err := grpc.Dial(*addr, grpc.WithInsecure()) 27 | if err != nil { 28 | panic(err) 29 | } 30 | cli := pbv.NewBCdbNodeClient(conn) 31 | 32 | start := time.Now() 33 | res, err := cli.Set(context.Background(), &pbv.SetRequest{Key: key, Value: value}) 34 | if err != nil { 35 | fmt.Println(err) 36 | } else { 37 | fmt.Println(res.Tx) 38 | } 39 | fmt.Println("1.BlockchainDB Set done.") 40 | 41 | counter := 0 42 | for { 43 | counter += 1 44 | res1, err := cli.Get(context.Background(), &pbv.GetRequest{Key: key}) 45 | if err != nil { 46 | fmt.Println(err) 47 | } else { 48 | fmt.Println(string(res1.Value)) 49 | elapsed := time.Since(start) 50 | fmt.Printf("Tx set-get took %s\n", elapsed) 51 | break 52 | } 53 | } 54 | fmt.Println("read query round ", counter) 55 | fmt.Println("2.Blockchain Get done.") 56 | 57 | lastkey := key 58 | lastopt := "set" 59 | wg3 := sync.WaitGroup{} 60 | wg3.Add(1) 61 | go func() { 62 | defer wg3.Done() 63 | for { 64 | if lastkey == "" || lastopt == "" { 65 | fmt.Println("No setopt tx to verify .") 66 | break 67 | } 68 | verify, err := cli.Verify(context.Background(), &pbv.VerifyRequest{Opt: lastopt, Key: lastkey}) 69 | if err != nil { 70 | fmt.Println(err) 71 | } else { 72 | if verify != nil && verify.Success { 73 | fmt.Println("Last tx verify success.") 74 | } else { 75 | fmt.Println("Last tx verify done.") 76 | } 77 | break 78 | } 79 | 80 | time.Sleep(2 * time.Second) 81 | } 82 | }() 83 | wg3.Wait() 84 | fmt.Println("3.BlockchainDB verify done.") 85 | } 86 | -------------------------------------------------------------------------------- /BlockchainDB/proto/blockchaindb/blockchaindb.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package controller; 3 | 4 | option go_package = "proto/blockchaindb"; 5 | 6 | service BCdbNode { 7 | rpc Get (GetRequest) returns (GetResponse); 8 | rpc Set (SetRequest) returns (SetResponse); 9 | rpc Verify (VerifyRequest) returns (VerifyResponse); 10 | } 11 | 12 | message GetRequest { 13 | string key = 1; 14 | } 15 | 16 | message GetResponse { 17 | bytes value = 1; 18 | } 19 | 20 | message SetRequest { 21 | string key = 1; 22 | string value = 2; 23 | } 24 | 25 | message SetResponse { 26 | string tx = 1; 27 | } 28 | 29 | message VerifyRequest { 30 | string opt = 1; 31 | string key = 2; 32 | string tx = 3; 33 | } 34 | 35 | message VerifyResponse { 36 | bool success = 1; 37 | } 38 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/eth/deploy_contract.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -ex 3 | 4 | shardIDs=${1:-1} 5 | nodeIDs=${2:-4} 6 | dir=$(dirname "$0") 7 | echo $dir 8 | 9 | cd `dirname ${BASH_SOURCE-$0}` 10 | . env.sh 11 | cd - 12 | 13 | 14 | bin="${ETH_BIN}/deploy_contract" 15 | configDir="config/config.eth.${shardIDs}.${nodeIDs}" 16 | ls ${configDir} 17 | 18 | if [ ! -f ${bin} ]; then 19 | echo "Binary file ${bin} not found!" 20 | echo "Hint: " 21 | echo " Please build binaries by run command: make build " 22 | echo "exit 1 " 23 | exit 1 24 | fi 25 | 26 | for (( c=1; c<=${shardIDs}; c++ )) 27 | do 28 | #$bin --config="${configDir}/shard_${c}" >> ${configDir}/*.toml 29 | $bin --config="${configDir}/shard_${c}" | tee -a ${configDir}/*.toml 30 | echo "Deploy contract to bcdbnode$c wtih ${configDir}/shard_${c}.conf" 31 | done 32 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/eth/env.sh: -------------------------------------------------------------------------------- 1 | WORKSPACE=$HOME/go/src/github.com/nusdbsystem/Hybrid-Blockchain-Database-Systems/BlockchainDB 2 | ETH_CONFIG=$WORKSPACE/config 3 | ETH_BIN=$WORKSPACE/.bin 4 | ETH_HOME=$WORKSPACE/storage/ethereum 5 | ETH_DATA=$HOME/Data/eth 6 | 7 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/eth/gen_eth_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #args: number_of_nodes, number of networks 3 | #replicaIDs=${1:-1} 4 | set -ex 5 | 6 | shardIDs=${1:-1} 7 | nodeIDs=${2:-4} 8 | 9 | cd `dirname ${BASH_SOURCE-$0}` 10 | . env.sh 11 | 12 | rm -rf ${ETH_DATA}* 13 | mkdir -p ${ETH_CONFIG} 14 | genesisDir="${ETH_CONFIG}/config.eth.${shardIDs}.${nodeIDs}" 15 | genesisTemplate=${ETH_HOME}/networks/CustomGenesis.template 16 | mkdir -p $genesisDir 17 | 18 | echo '# This is custom genesis config template given about each shard' 19 | template=$(<${genesisTemplate}) 20 | echo "${template}" 21 | 22 | for (( j=1; j<=${shardIDs}; j++ )) 23 | do 24 | genesisFile="${genesisDir}/CustomGenesis_${j}.json" 25 | rm -f ${genesisFile} 26 | touch ${genesisFile} 27 | chainIdByShard=$((1000 + ${j})) 28 | cp $genesisTemplate ${genesisFile} 29 | for (( i=1; i<=${nodeIDs}; i++ )) 30 | do 31 | signer1=`geth --datadir=${ETH_DATA}_${j}_${i} --password <(echo -n "") account new | cut -d '{' -f2 | cut -d '}' -f1` 32 | # sed -i "s/Signer${i}/$signer1/" ${genesisFile} 33 | if (( ${i} < 2 )); then 34 | shardsigner=${signer1} 35 | allocSigners=\"${signer1}\"': { "balance": "90000000" }' 36 | else 37 | allocSigners=${allocSigners}', '\"${signer1}\"': { "balance": "90000000" }' 38 | fi 39 | # set 4 signers 40 | if (( ${i} <= ${nodeIDs} )); then 41 | signers=${signers}${signer1} 42 | fi 43 | echo "eth-node = \"${HOME}/Data/eth_${shardIDs}_${i}/geth.ipc\"" > ${genesisDir}/node_${j}_${i}.toml 44 | echo "eth-account-address = \"${signer1}\"" >> ${genesisDir}/node_${j}_${i}.toml 45 | hexkey=$(jq -r '.crypto.ciphertext' <<< cat ${HOME}/Data/eth_${j}_${i}/keystore/UTC*) 46 | echo "eth-hexkey = \"${hexkey}\"" >> ${genesisDir}/node_${j}_${i}.toml 47 | echo "Generate node account file ${genesisDir}/node_${j}_${i}.toml" 48 | done 49 | extraData="0x0000000000000000000000000000000000000000000000000000000000000000${signers}0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" 50 | sed -i "s/ChainIdByShard/${chainIdByShard}/" ${genesisFile} 51 | sed -i "s/ExtraData/${extraData}/" ${genesisFile} 52 | sed -i "s/AllocSigners/${allocSigners}/" ${genesisFile} 53 | 54 | echo "Generate genesis file ${genesisFile}" 55 | 56 | echo "eth-node = \"${HOME}/Data/eth_${j}_1/geth.ipc\"" > ${genesisDir}/shard_${j}.toml 57 | echo "eth-boot-signer-address = \"${shardsigner}\"" >> ${genesisDir}/shard_${j}.toml 58 | hexkey=$(jq -r '.crypto.ciphertext' <<< cat ${HOME}/Data/eth_${j}_1/keystore/UTC*) 59 | echo "eth-hexkey = \"${hexkey}\"" >> ${genesisDir}/shard_${j}.toml 60 | echo "Generate shard file ${genesisDir}/shard_${j}.toml" 61 | 62 | echo "chainId: $chainIdByShard" 63 | done 64 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/eth/geth_build_from_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # geth-v1.8.23 4 | 5 | git clone https://github.com/ethereum/go-ethereum.git 6 | cd go-ethereum 7 | git checkout v1.8.23 8 | export GO111MODULE=off 9 | make -------------------------------------------------------------------------------- /BlockchainDB/scripts/eth/init_eth_account.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #args: number of networks, number_of_nodes 4 | # 5 | set -ex 6 | 7 | shardIDs=${1:-1} 8 | nodeIDs=${2:-4} 9 | 10 | cd `dirname ${BASH_SOURCE-$0}` 11 | . env.sh 12 | cd - 13 | genesisDir="${ETH_CONFIG}/config.eth.${shardIDs}.${nodeIDs}" 14 | 15 | for (( j=1; j<=${shardIDs}; j++ )) 16 | do 17 | genesisFile="${genesisDir}/CustomGenesis_${j}.json" 18 | for (( i=1; i<=${nodeIDs}; i++ )) 19 | do 20 | echo "Using custom genesis file: ${genesisFile}, datadir: ${ETH_DATA}_${j}_${i}" 21 | # rm -rf ${ETH_DATA}_${j}_${i}/* 22 | geth --datadir=${ETH_DATA}_${j}_${i} init ${genesisFile} 23 | done 24 | done -------------------------------------------------------------------------------- /BlockchainDB/scripts/eth/start_eth_container.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #set -x 3 | 4 | 5 | 6 | replicaIDs=${1:-1} 7 | shardID=${2:-1} 8 | # docker pull ethereum/client-go:v1.8.23 9 | 10 | docker rm -f $(sudo -S docker ps -aq --filter ancestor=ethereum/client-go ) 11 | 12 | for (( c=1; c<=${replicaIDs}; c++ )) 13 | do 14 | docker run -itd --name geth${c}-shard${shardID} -p $((20070 + ${c})):8545 ethereum/client-go 15 | echo "geth start with port $((20070 + ${c}))" 16 | done 17 | 18 | echo "#########################################################################" 19 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/eth/start_eth_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | shardID=${1:-1} 5 | nodes=${2:-4} 6 | #bootnode 7 | nodeID=1 8 | 9 | cd `dirname ${BASH_SOURCE-$0}` 10 | . env.sh 11 | cd - 12 | # geth --datadir=${ETH_DATA}_${nodeID} --rpc --rpcport "8000" --syncmode "full" --cache 4096 --gasprice 0 --networkid 10001 --mine --minerthreads 1 --unlock 0 console 2> ${ETH_DATA}_${nodeID}/geth.log 13 | #--password <(echo -n "") js <(echo 'console.log(admin.nodeInfo.enode);') 14 | #--nodiscover 15 | #--targetgaslimit '67219750000000' 16 | 17 | # geth --datadir=${ETH_DATA}_${shardID}_${nodeID} \ 18 | # --rpc --rpcport "$((9000 + ${nodeID} + 1000*${shardID}))" \ 19 | # --port "$((30303 + ${nodeID} + 1000*(${shardID}-1)))" \ 20 | # -networkid $((1000 + ${shardID})) \ 21 | # --syncmode "full" --cache 4096 --gasprice 0 -\ 22 | # --mine --minerthreads 1 \ 23 | # --unlock 0 --password <(echo -n "") 2> ${ETH_DATA}_${shardID}_${nodeID}/eth.log & 24 | 25 | # pkill -f "geth" || true 26 | # kill $(ps -ef|grep "geth"|grep -v "grep"|awk '{print $2}') || true 27 | pgeth=`ps -ef|grep "geth"|grep -v "grep"|wc -l` 28 | echo ${pgeth} 29 | if (( ${pgeth} > 0 )); then 30 | kill $(ps -ef|grep "geth"|grep -v "grep"|awk '{print $2}') 31 | fi 32 | sleep 2 33 | # start bootnode 34 | # --miner.gaslimit 67219750000000 35 | # --netrestrict --gcmode 'archive' 36 | geth --datadir=${ETH_DATA}_${shardID}_${nodeID} \ 37 | --rpc --rpcaddr 'localhost' --rpcport "$((9000 + ${nodeID} + 1000*${shardID}))" \ 38 | --port "$((30303 + ${nodeID} + 1000*(${shardID}-1)))" \ 39 | --gasprice 0 --targetgaslimit 10000000 --mine --minerthreads 1 --unlock 0 --password <(echo -n "") \ 40 | --syncmode 'full' \ 41 | --nat extip:127.0.0.1 \ 42 | -networkid $((1000 + ${shardID})) 2> ${ETH_DATA}_${shardID}_${nodeID}/eth.log & 43 | 44 | echo "Sleep 4s to wait for bootnode start..." 45 | sleep 4 46 | 47 | bootenode=`geth attach ${ETH_DATA}_${shardID}_${nodeID}/geth.ipc --exec admin.nodeInfo.enode | tr -d '"'` 48 | 49 | for (( j=2; j<=${nodes}; j++ )) 50 | do 51 | geth --datadir=${ETH_DATA}_${shardID}_${j} \ 52 | --rpc --rpcaddr 'localhost' --rpcport "$((9000 + ${j} + 1000*${shardID}))" \ 53 | --port "$((30303 + ${j} + 1000*(${shardID}-1)))" \ 54 | --gasprice 0 --targetgaslimit 10000000 --mine --minerthreads 1 --unlock 0 --password <(echo -n "") \ 55 | --syncmode 'full' \ 56 | -networkid $((1000 + ${shardID})) \ 57 | --bootnodes ${bootenode} 2> ${ETH_DATA}_${shardID}_${j}/eth.log & 58 | echo "member node: ${ETH_DATA}_${shardID}_${j}" 59 | done 60 | 61 | echo "Sleep 2s to add peers to network..." 62 | sleep 2 63 | # check bootnode admin peers 64 | geth attach ${ETH_DATA}_${shardID}_${nodeID}/geth.ipc --exec admin.peers 65 | 66 | #geth --unlock ${BootSignerAddress} --gasprice 0 --password <(echo -n "") 67 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/experiments/experiment1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #set -x 3 | 4 | size=${1:-4} 5 | 6 | # Experiment 1 7 | echo "========================================================" 8 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 9 | echo " Experiment 1 start" 10 | #make fast shards=1 nodes=${size} 11 | make test nodes=${size} clients=4 12 | make test nodes=${size} clients=8 13 | make test nodes=${size} clients=16 14 | make test nodes=${size} clients=32 15 | make test nodes=${size} clients=64 16 | make test nodes=${size} clients=128 17 | make test nodes=${size} clients=192 18 | make test nodes=${size} clients=256 19 | echo " Experiment 1 stop" 20 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 21 | echo "========================================================" 22 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/experiments/experiment2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #set -x 3 | 4 | bestclients=${1:-16} 5 | 6 | # Experiment 2 7 | echo "========================================================" 8 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 9 | echo " Experiment 2 start" 10 | make fast nodes=8 11 | make test nodes=8 clients=${bestclients} 12 | 13 | make fast nodes=16 14 | make test nodes=16 clients=${bestclients} 15 | 16 | make fast nodes=32 17 | make test nodes=32 clients=${bestclients} 18 | 19 | make fast nodes=64 20 | make test nodes=64 clients=${bestclients} 21 | 22 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 23 | echo "========================================================" 24 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/experiments/experiment3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | bestnodes=${1:-4} 5 | bestclients=${2:-256} 6 | 7 | # Experiment 3 8 | # DISTROS="uniform latest zipfian" 9 | echo "========================================================" 10 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 11 | echo " Experiment 3 start" 12 | #make fast nodes=${bestnodes} 13 | make test nodes=${bestnodes} clients=${bestclients} distribution=ycsb_data 14 | make test nodes=${bestnodes} clients=${bestclients} distribution=ycsb_data_latest 15 | make test nodes=${bestnodes} clients=${bestclients} distribution=ycsb_data_zipfian 16 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 17 | echo "========================================================" 18 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/experiments/experiment4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | bestnodes=${1:-4} 5 | bestclients=${2:-256} 6 | 7 | # Experiment 4 8 | echo "========================================================" 9 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 10 | echo " Experiment 4 start" 11 | #make fast nodes=${bestnodes} 12 | make test nodes=${bestnodes} clients=${bestclients} workload=a 13 | make test nodes=${bestnodes} clients=${bestclients} workload=b 14 | make test nodes=${bestnodes} clients=${bestclients} workload=c 15 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 16 | echo "========================================================" 17 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/experiments/experiment6.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | bestnodes=${1:-4} 5 | bestclients=${2:-256} 6 | TXSIZES="512B 2kB 8kB 32kB 128kB" 7 | 8 | # Experiment 6 9 | # DISTROS="uniform latest zipfian" 10 | echo "========================================================" 11 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 12 | echo " Experiment 3 start" 13 | #make fast nodes=${bestnodes} 14 | make test nodes=${bestnodes} clients=${bestclients} distribution=ycsb_data_512B 15 | make test nodes=${bestnodes} clients=${bestclients} distribution=ycsb_data_2kB 16 | make test nodes=${bestnodes} clients=${bestclients} distribution=ycsb_data_8kB 17 | make test nodes=${bestnodes} clients=${bestclients} distribution=ycsb_data_32kB 18 | make test nodes=${bestnodes} clients=${bestclients} distribution=ycsb_data_128kB 19 | printf -v date '%(%Y-%m-%d %H:%M:%S)T\n' -1 20 | echo "========================================================" 21 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/experiments/run_all_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | 5 | bestnodes=${1:-4} 6 | bestclients=${2:-256} 7 | dir=$(pwd) 8 | mkdir -p experiments.log 9 | 10 | # Experiment 4 11 | #${dir}/scripts/experiments/experiment4.sh ${bestnodes} ${bestclients} >> experiments.log/experiment4.log 2>&1 12 | 13 | 14 | # Experiment 3 15 | ${dir}/scripts/experiments/experiment3.sh ${bestnodes} ${bestclients} >> experiments.log/experiment3.log 2>&1 16 | 17 | # Experiment 6 18 | ${dir}/scripts/experiments/experiment6.sh ${bestnodes} ${bestclients} >> experiments.log/experiment6.log 2>&1 19 | 20 | # Experiment 1 21 | ${dir}/scripts/experiments/experiment1.sh 4 >> experiments.log/experiment1.log 2>&1 22 | 23 | 24 | # Experiment 2 25 | #${dir}/scripts/experiments/experiment2.sh ${bestclients} >> experiments.log/experiment2.log 2>&1 26 | 27 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/fab/restart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # fabric repository: run script to deploy kvstore chaincode 4 | #./network.sh -h 5 | ./network.sh down 6 | ./network.sh up createChannel 7 | docker ps -a 8 | 9 | ./network.sh createChannel -c kvchannel 10 | ./network.sh deployCC -c kvchannel -ccn kvstore -ccp ../kvstore/chaincode-go -ccl go 11 | 12 | echo "=================== Success ===================" 13 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/fab/setupEnv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # 4 | 5 | mkdir -p ~/go/src/github.com/hyperledger 6 | cd ~/go/src/github.com/hyperledger 7 | curl -sSL https://bit.ly/2ysbOFE | bash -s -- 2.3.1 1.4.9 8 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/gen_config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | 5 | shardIDs=${1:-1} 6 | replicaIDs=${2:-4} 7 | 8 | echo "Usage: ./scripts/gen_config.sh 4 1" 9 | echo "Generate config files, shards: ${shardIDs}, replicas: ${replicaIDs}" 10 | cd `dirname ${BASH_SOURCE-$0}` 11 | . eth/env.sh 12 | cd - 13 | tomlDir="${ETH_CONFIG}/config.nodes.${shardIDs}.${replicaIDs}" 14 | shardDir="${ETH_CONFIG}/config.eth.${shardIDs}.${replicaIDs}" 15 | rm -rf ${tomlDir}/* 16 | mkdir -p ${tomlDir} 17 | 18 | for (( i=1; i<=${shardIDs}; i++ )) 19 | do 20 | for (( c=1; c<=${replicaIDs}; c++ )) 21 | do 22 | tomlFile="${tomlDir}/config_${i}_${c}.toml" 23 | rm -f ${tomlFile} 24 | touch ${tomlFile} 25 | echo "self-id = ${i}_${c}" > ${tomlFile} 26 | echo "server-node-addr = \"127.0.0.1:$((50000 + ${c}))\"" >> ${tomlFile} 27 | echo "shard-type = \"ethereum\"" >> ${tomlFile} 28 | echo "shard-number = \"${shardIDs}\"" >> ${tomlFile} 29 | (cat "$shardDir/node_${i}_${c}.toml"; echo) >> ${tomlFile} 30 | # echo "fab-node = \"127.0.0.1:$((40000 + ${c}))\"" >> ${tomlFile} 31 | # echo "fab-config = \"connection${c}.yaml\"" >> ${tomlFile} 32 | echo '' >> ${tomlFile} 33 | 34 | echo '# This is the information that each replica is given about the other shards' >> ${tomlFile} 35 | for (( j=1; j<=${shardIDs}; j++ )) 36 | do 37 | echo '[[shards]]' >> ${tomlFile} 38 | echo "shard-id = ${j}" >> ${tomlFile} 39 | echo "shard-partition-key = \"eth${j}-\"" >> ${tomlFile} 40 | echo "shard-type = \"ethereum\"" >> ${tomlFile} 41 | (cat "$shardDir/shard_${j}.toml"; echo) >> ${tomlFile} 42 | #echo "eth-node = \"http://localhost:$((9000 + ${c} + 1000*${j}))\"" >> ${tomlFile} 43 | # echo "eth-node = \"$HOME/Data/eth_${shardIDs}_${c}/geth.ipc\"" >> ${tomlFile} 44 | # echo "eth-hexaddr = \"0x70fa2c27a4e365cdf64b2d8a6c36121eb80bb442\"" >> ${tomlFile} 45 | # echo "eth-hexkey = \"35fc8e4f2065b6813078a08069e3a946f203029ce2bc6a62339d30c37f978403\"" >> ${tomlFile} 46 | # echo "fab-node = \"127.0.0.1:$((40000 + ${j}))\"" >> ${tomlFile} 47 | # echo "fab-config = \"connection${j}.yaml\"" >> ${tomlFile} 48 | echo '' >> ${tomlFile} 49 | done 50 | echo "Generate config file ${tomlFile}" 51 | done 52 | done 53 | 54 | echo "Done!" -------------------------------------------------------------------------------- /BlockchainDB/scripts/libs/gen_proto.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | dir=$(dirname "${BASH_SOURCE[0]}") 3 | #go get -d google.golang.org/grpc 4 | #go get -d github.com/golang/protobuf/{proto,protoc-gen-go} 5 | 6 | protoc --go_out=plugins=grpc:. ./proto/blockchaindb/blockchaindb.proto 7 | 8 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/libs/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # installing ethereum and docker 3 | sudo apt-get install software-properties-common 4 | sudo add-apt-repository -y ppa:ethereum/ethereum 5 | sudo add-apt-repository -y ppa:ethereum/ethereum-dev 6 | sudo apt-get install apt-transport-https ca-certificates 7 | sudo apt-get update 8 | sudo apt-get install -y ethereum 9 | sudo apt-get install solc 10 | 11 | 12 | 13 | # Tools 14 | docker pull ethereum/client-go:v1.8.23 15 | sudo apt-get install jq 16 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/start_eth_network.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | echo "restart: kill all ethnodes" 5 | # pkill -f "geth" || true 6 | pgeth=`ps -ef|grep "geth"|grep -v "grep"|wc -l` 7 | echo ${pgeth} 8 | if (( ${pgeth} > 0 )); then 9 | kill $(ps -ef|grep "geth"|grep -v "grep"|awk '{print $2}') 10 | fi 11 | 12 | sleep 2 13 | 14 | echo "Start ethereum nodes, Please input shard size(default 1), node size(default 4)" 15 | shards=${1:-1} 16 | nodes=${2:-4} 17 | 18 | 19 | dir=$(dirname "$0") 20 | echo "##################### generate ethereum genesis config ##########" 21 | ${dir}/eth/gen_eth_config.sh ${shards} ${nodes} 22 | 23 | echo "##################### init geth nodes using genesis file ##########" 24 | ${dir}/eth/init_eth_account.sh ${shards} ${nodes} 25 | 26 | echo "##################### start geth bootnode and add peers ##########" 27 | ${dir}/eth/start_eth_node.sh ${shards} ${nodes} 28 | 29 | echo "##################### deploy KVContract to eth network ##########" 30 | ${dir}/eth/deploy_contract.sh ${shards} ${nodes} 31 | 32 | 33 | echo "##################### Setup ethereum network successfully! ##########" 34 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/start_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | # echo "restart: kill all previous bcdbnode" 5 | # pgrep -f "bcdbnode" 6 | # pkill -f "bcdbnode" 7 | # kill -9 $(ps -ef|grep "geth"|grep -v "grep"|awk '{print $2}') 8 | # sleep 5 9 | #dir=$(dirname "$0") 10 | 11 | echo "Start blockchaindb server nodes, Please input server node size(default 4)" 12 | shardIDs=${1:-1} 13 | replicaIDs=${2:-4} 14 | dir=$(dirname "$0") 15 | echo $dir 16 | 17 | cd `dirname ${BASH_SOURCE-$0}` 18 | . eth/env.sh 19 | cd - 20 | 21 | bin="${ETH_BIN}/bcdbnode" 22 | tomlDir="config/config.nodes.${shardIDs}.${replicaIDs}" 23 | mkdir -p nodelog 24 | if [ ! -f ${bin} ]; then 25 | echo "Binary file ${bin} not found!" 26 | echo "Hint: " 27 | echo " Please build binaries by run command: make build " 28 | echo "exit 1 " 29 | exit 1 30 | fi 31 | for (( i=1; i<=${shardIDs}; i++ )) 32 | do 33 | for (( c=1; c<=$replicaIDs; c++ )) 34 | do 35 | $bin --config="${tomlDir}/config_${i}_${c}" > nodelog/node.${i}.${c}.log 2>&1 & 36 | echo "bcdbnode$c start with config file config.nodes.${shardIDs}.${replicaIDs}/config_${i}_${c}.toml" 37 | done 38 | done 39 | echo "#########################################################################" 40 | echo "##################### Start blockchaindb server nodes successfully! ##########" 41 | echo "#########################################################################" 42 | 43 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/stop_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # set -x 3 | 4 | echo "Stop all bcdbnodes" 5 | # pgrep -f "bcdbnode" || true 6 | # pkill -f "bcdbnode"|| true 7 | # kill -9 $(ps -ef|grep "bcdbnode"|grep -v "grep"|awk '{print $2}') 8 | pbcdbnode=`ps -ef|grep "bcdbnode"|grep -v "grep"|wc -l` 9 | echo ${pbcdbnode} 10 | if (( ${pbcdbnode} > 0 )); then 11 | kill $(ps -ef|grep "bcdbnode"|grep -v "grep"|awk '{print $2}') 12 | fi 13 | sleep 4 14 | echo "##################### Stop bcdbnodes successfully! ##########################" 15 | -------------------------------------------------------------------------------- /BlockchainDB/scripts/ycsb/start_ycsb_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | # trap 'trap - SIGTERM && kill -- -$$' SIGINT SIGTERM EXIT 4 | 5 | size=${1:-4} 6 | clients=${2:-4} 7 | workload=${3:-a} 8 | distribution=${4:-ycsb_data} 9 | ndrivers=${size} 10 | nthreads=$(( ${clients} / ${ndrivers} )) 11 | 12 | dir=$(pwd) 13 | echo $dir 14 | 15 | bin="$dir/../../.bin/benchmark_bcdb" 16 | defaultAddrs="127.0.0.1:50001" 17 | loadPath="$dir/temp/${distribution}/workload${workload}.dat" 18 | runPath="$dir/temp/${distribution}/run_workload${workload}.dat" 19 | 20 | 21 | # nthreads= 1 2 4 16 32 64 22 | # clients="4 8 16 32 64 128 192 256" 23 | 24 | if [ ! -f ${bin} ]; then 25 | echo "Binary file ${bin} not found!" 26 | echo "Hint: " 27 | echo " Please build binaries by run command: make build " 28 | echo "exit 1 " 29 | exit 1 30 | fi 31 | 32 | echo "Test start with node size: ${size}, client size: ${clients}, workload${workload}" 33 | 34 | for (( c=2; c<=${size}; c++ )) 35 | do 36 | defaultAddrs="${defaultAddrs},127.0.0.1:$((50000 + ${c}))" 37 | done 38 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 39 | 40 | 41 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} & 42 | #2>&1 | tee test.log 43 | #> test.$(nodes).${clients}.log 2>&1 && cat test.$(nodes).${clients}.log 44 | -------------------------------------------------------------------------------- /BlockchainDB/shardingMgr/partition.go: -------------------------------------------------------------------------------- 1 | package sharding 2 | 3 | type Partition struct { 4 | Key string 5 | Shard string 6 | } 7 | 8 | func PARTITION_ETH() *Partition { return &Partition{Key: "eth", Shard: "ethereum"} } 9 | 10 | func PARTITION_FAB() *Partition { return &Partition{Key: "fab", Shard: "fabric"} } 11 | 12 | func PARTITION_DEFAULT() *Partition { return &Partition{Key: "fab", Shard: "fabric"} } 13 | -------------------------------------------------------------------------------- /BlockchainDB/storage/ethereum/contracts/KVStore/KVStore.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity ^0.4.24; 3 | 4 | contract KVStore { 5 | event ItemSet(bytes32 key, bytes value); 6 | 7 | mapping (bytes32 => bytes) public items; 8 | 9 | function set(bytes32 key, bytes value) external { 10 | items[key] = value; 11 | emit ItemSet(key, value); 12 | } 13 | } -------------------------------------------------------------------------------- /BlockchainDB/storage/ethereum/contracts/deploy/contract_deploy.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "time" 10 | 11 | "hybrid/BlockchainDB/bcdbnode/config" 12 | ClientSDK "hybrid/BlockchainDB/storage/ethereum/clientSDK" 13 | 14 | "github.com/ethereum/go-ethereum/common" 15 | "github.com/ethereum/go-ethereum/ethclient" 16 | ) 17 | 18 | func main() { 19 | 20 | //local 21 | // ethnode := "/home/tianwen/Data/eth_1_1/geth.ipc" 22 | 23 | //ganache 24 | // ethnode := "http://localhost:7545" 25 | 26 | //config from file 27 | configFile := flag.String("config", "config/config.eth.1.4/shard_1", "The path to the config file") 28 | flag.Parse() 29 | var conf config.Options 30 | err := config.ReadConfig(&conf, *configFile) 31 | if err != nil { 32 | fmt.Fprintf(os.Stderr, "Failed to read config: %v\n", err) 33 | os.Exit(1) 34 | } 35 | ethnode := conf.EthNode 36 | hexkey := conf.EthHexKey 37 | 38 | hexaddress, tx, _, err := ClientSDK.DeployEthereumKVStoreContract(ethnode, hexkey) 39 | if err != nil { 40 | log.Fatal("DeployEthereumKVStoreContract", err) 41 | } 42 | 43 | fmt.Printf("eth-hexaddr = \"%v\"\n", hexaddress) 44 | fmt.Printf("contract-tx = \"%v\"\n", tx) 45 | time.Sleep(10 * time.Second) 46 | 47 | //Debug 48 | client, err := ethclient.Dial(ethnode) 49 | if err != nil { 50 | fmt.Println("error ethclient Dail "+ethnode, err) 51 | } 52 | address := common.HexToAddress(hexaddress) 53 | bytecode, err := client.CodeAt(context.Background(), address, nil) // nil is latest block 54 | if err != nil { 55 | log.Fatal(err) 56 | } 57 | 58 | isContract := len(bytecode) > 0 59 | fmt.Printf("contract = %v\n", isContract) // is contract: true 60 | } 61 | -------------------------------------------------------------------------------- /BlockchainDB/storage/ethereum/networks/CustomGenesis.template: -------------------------------------------------------------------------------- 1 | { 2 | "config": { 3 | "chainId": ChainIdByShard, 4 | "homesteadBlock": 0, 5 | "eip150Block": 0, 6 | "eip155Block": 0, 7 | "eip158Block": 0, 8 | "byzantiumBlock": 0, 9 | "constantinopleBlock": 0, 10 | "petersburgBlock": 0, 11 | "clique": { 12 | "period": PeriodX, 13 | "epoch": 30000 14 | } 15 | }, 16 | "difficulty": "1", 17 | "gasLimit": "GasLimitX", 18 | "extradata": "ExtraData", 19 | "alloc": { 20 | AllocSigners 21 | } 22 | } -------------------------------------------------------------------------------- /BlockchainDB/storage/ethereum/tests/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "time" 10 | 11 | "hybrid/BlockchainDB/bcdbnode/config" 12 | ClientSDK "hybrid/BlockchainDB/storage/ethereum/clientSDK" 13 | 14 | "github.com/ethereum/go-ethereum/common" 15 | "github.com/ethereum/go-ethereum/ethclient" 16 | ) 17 | 18 | func main() { 19 | 20 | //ganache 21 | // ethnode := "http://192.168.20.2:9001" 22 | // hexaddress := "0xf991768040e359AD2987e96A25F776f52BcbcDF0" //contract address 23 | // hexkey := "0ca03550b19c953722b4b8b7b1b226e6857c39ff5f2d2cafd96da1d37b39fb53" 24 | 25 | //local eth_1_1 26 | // ethnode := "/Data/eth_1_1/geth.ipc" 27 | // hexaddress := "0x0803521274Fb66b54Ef6CF22A801713B1299b5cD" 28 | // hexkey := "" 29 | 30 | // config from file 31 | configFile := flag.String("config", "config/config.nodes.1.4/config_1_1", "The path to the config file") 32 | flag.Parse() 33 | var conf config.Options 34 | err := config.ReadConfig(&conf, *configFile) 35 | if err != nil { 36 | fmt.Fprintf(os.Stderr, "Failed to read config: %v\n", err) 37 | os.Exit(1) 38 | } 39 | for _, shard := range conf.Shards { 40 | 41 | fmt.Println(shard.ID) 42 | ethnode := shard.EthNode 43 | hexkey := shard.EthHexKey 44 | hexaddress := shard.EthHexAddr 45 | 46 | ethereumconn, err := ClientSDK.NewEthereumKVStoreInstance(ethnode, hexaddress, hexkey) 47 | if err != nil { 48 | log.Fatal(err) 49 | } 50 | 51 | key := "tianwen-7" 52 | value := "helloworld7" 53 | result1, err := ethereumconn.Write(context.Background(), key, value) 54 | if err != nil { 55 | log.Fatal("error ethereumconn.Write ", err) 56 | } 57 | fmt.Println("write tx: ", result1) 58 | time.Sleep(5 * time.Second) 59 | 60 | result, err := ethereumconn.Read(context.Background(), key) 61 | if err != nil { 62 | log.Println("error ethereumconn.Read ", err) 63 | } 64 | fmt.Println(result) 65 | 66 | result2, err := ethereumconn.Verify(context.Background(), "set", key, result1) 67 | if err != nil { 68 | log.Println("error ethereumconn.Verify ", err) 69 | } 70 | fmt.Println(result2) 71 | 72 | // os.Exit(0) 73 | //Debug 74 | client, err := ethclient.Dial(ethnode) 75 | 76 | if err != nil { 77 | fmt.Println("error ethclient Dail "+ethnode, err) 78 | } 79 | address := common.HexToAddress(hexaddress) 80 | bytecode, err := client.CodeAt(context.Background(), address, nil) // nil is latest block 81 | if err != nil { 82 | log.Fatal(err) 83 | } 84 | 85 | isContract := len(bytecode) > 0 86 | fmt.Printf("is contract: %v\n", isContract) // is contract: true 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /BlockchainDB/transactionMgr/transactionMgr.go: -------------------------------------------------------------------------------- 1 | package transactionMgr 2 | 3 | import "sync" 4 | 5 | type TransactionMgr struct { 6 | orderingMap map[int64]string 7 | lock sync.RWMutex 8 | } 9 | 10 | func NewTransactionMgr() *TransactionMgr { 11 | 12 | return &TransactionMgr{orderingMap: make(map[int64]string), lock: sync.RWMutex{}} 13 | } 14 | 15 | func (txMgr *TransactionMgr) ReadNounce(nounce int64) string { 16 | txMgr.lock.RLock() 17 | defer txMgr.lock.RUnlock() 18 | key := txMgr.orderingMap[nounce] 19 | return key 20 | } 21 | 22 | func (txMgr *TransactionMgr) WriteNounce(nounce int64, key string) bool { 23 | txMgr.lock.Lock() 24 | defer txMgr.lock.Unlock() 25 | if _, ok := txMgr.orderingMap[nounce]; !ok { 26 | txMgr.orderingMap[nounce] = key 27 | return true 28 | } else { 29 | return false 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 rayzui 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /cmd/raftkv/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "net" 8 | "os" 9 | "os/signal" 10 | "path/filepath" 11 | "time" 12 | 13 | "google.golang.org/grpc" 14 | "gopkg.in/alecthomas/kingpin.v2" 15 | 16 | pb "hybrid/proto/raftkv" 17 | "hybrid/raftkv" 18 | ) 19 | 20 | var ( 21 | svrAddr = kingpin.Flag("svr-addr", "Address of server").Default(":19001").String() 22 | raftAddr = kingpin.Flag("raft-addr", "Address of raft module").Default("127.0.0.1:18001").String() 23 | dir = kingpin.Flag("dir", "Dir for data and log").Required().String() 24 | raftLeader = kingpin.Flag("raft-leader", "Address of the existing raft cluster leader").String() 25 | redisAddr = kingpin.Flag("redis-addr", "redis server address").String() 26 | redisDb = kingpin.Flag("redis-db", "redis db number").Int() 27 | redisPwd = kingpin.Flag("redis-pwd", "redis password").String() 28 | storage = kingpin.Flag("store", "Underlying storage [redis/badger]").Default("redis").Enum("redis", "badger") 29 | blkSize = kingpin.Flag("blk-size", "Block size in raft").Default("100").Int() 30 | ) 31 | 32 | func main() { 33 | kingpin.Parse() 34 | 35 | dataDir, logDir, ledgerDir := filepath.Join(*dir, "data"), filepath.Join(*dir, "log"), filepath.Join(*dir, "ledger") 36 | 37 | var ( 38 | kv raftkv.KV 39 | err error 40 | ) 41 | switch *storage { 42 | case "redis": 43 | kv, err = raftkv.NewRedisKV(*redisAddr, *redisPwd, *redisDb) 44 | if err != nil { 45 | panic(err) 46 | } 47 | case "badger": 48 | kv, err = raftkv.NewBadgerKV(dataDir) 49 | if err != nil { 50 | panic(err) 51 | } 52 | } 53 | 54 | ctx, cancel := context.WithCancel(context.Background()) 55 | node, err := raftkv.NewPeer(ctx, ledgerDir, kv, &raftkv.Config{ 56 | Id: fmt.Sprintf("%v", time.Now().UnixNano()), 57 | RaftDir: logDir, 58 | RaftBind: *raftAddr, 59 | RaftJoin: *raftLeader, 60 | BlockSize: *blkSize, 61 | }) 62 | if err != nil { 63 | panic(err) 64 | } 65 | 66 | s := grpc.NewServer() 67 | pb.RegisterDBServer(s, node) 68 | lis, err := net.Listen("tcp", *svrAddr) 69 | if err != nil { 70 | panic(err) 71 | } 72 | go func() { 73 | log.Printf("Serving gRPC: %s", *svrAddr) 74 | s.Serve(lis) 75 | }() 76 | 77 | ch := make(chan os.Signal, 1) 78 | signal.Notify(ch, os.Interrupt, os.Kill) 79 | sig := <-ch 80 | cancel() 81 | log.Printf("Received signal %v, quiting gracefully", sig) 82 | } 83 | -------------------------------------------------------------------------------- /cmd/redisqlexample/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "hybrid/dbconn" 6 | "strconv" 7 | 8 | "github.com/mediocregopher/radix/v3" 9 | 10 | "gopkg.in/alecthomas/kingpin.v2" 11 | ) 12 | 13 | var ( 14 | addr = kingpin.Flag("addr", "Address of redisql").Default("localhost:6379").String() 15 | connNum = kingpin.Flag("pool-size", "Size of connection pool").Default("10").Int() 16 | ) 17 | 18 | type Item struct { 19 | Key string `json:"key"` 20 | Value string `json:"value"` 21 | } 22 | 23 | func main() { 24 | kingpin.Parse() 25 | r, err := dbconn.NewRedisqlConn(*addr, *connNum) 26 | if err != nil { 27 | panic(err) 28 | } 29 | if err := r.Do(radix.Cmd(nil, "REDISQL.CREATE_DB", "BENCH")); err != nil { 30 | panic(err) 31 | } 32 | if err := r.Do(radix.Cmd( 33 | nil, 34 | "REDISQL.EXEC", 35 | "BENCH", 36 | "CREATE TABLE IF NOT EXISTS test(key TEXT, value TEXT);", 37 | )); err != nil { 38 | panic(err) 39 | } 40 | 41 | for i := 0; i < 10; i++ { 42 | fmt.Println("set", i) 43 | if err := r.Do(radix.Cmd( 44 | nil, 45 | "REDISQL.EXEC", 46 | "BENCH", 47 | fmt.Sprintf("INSERT INTO test VALUES(%s, %s);", strconv.Itoa(i), strconv.Itoa(i)), 48 | )); err != nil { 49 | panic(err) 50 | } 51 | } 52 | 53 | for i := 0; i < 10; i++ { 54 | fmt.Println("get", i) 55 | var items [][]string 56 | if err := r.Do(radix.Cmd( 57 | &items, 58 | "REDISQL.EXEC", 59 | "BENCH", 60 | fmt.Sprintf("SELECT * FROM test WHERE test.key=\"%s\";", strconv.Itoa(i)), 61 | )); err != nil { 62 | panic(err) 63 | } 64 | fmt.Println(items) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /cmd/tso/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "os/signal" 7 | "runtime" 8 | "runtime/pprof" 9 | 10 | "gopkg.in/alecthomas/kingpin.v2" 11 | 12 | "hybrid/tso" 13 | ) 14 | 15 | var ( 16 | cpuProfile = kingpin.Flag("cpuProfile", "write cpu profile to file").Default("").String() 17 | address = kingpin.Flag("addr", "listen address").Default(":7070").String() 18 | batchSize = kingpin.Flag("batch", "batch size").Default("100000").Int32() 19 | ) 20 | 21 | func main() { 22 | kingpin.Parse() 23 | 24 | runtime.GOMAXPROCS(runtime.NumCPU()) 25 | log.SetFlags(log.LstdFlags | log.Lshortfile) 26 | 27 | if *cpuProfile != "" { 28 | f, err := os.Create(*cpuProfile) 29 | if err != nil { 30 | log.Fatal(err) 31 | } 32 | pprof.StartCPUProfile(f) 33 | interrupt := make(chan os.Signal, 1) 34 | signal.Notify(interrupt) 35 | go catchKill(interrupt) 36 | } 37 | 38 | log.Println("Timestamp Oracle Started") 39 | orc := tso.NewOracle(*address, *batchSize) 40 | orc.Recover() 41 | orc.WaitForClientConnections() 42 | } 43 | 44 | func catchKill(interrupt chan os.Signal) { 45 | <-interrupt 46 | if *cpuProfile != "" { 47 | pprof.StopCPUProfile() 48 | } 49 | log.Fatalln("Caught Signal") 50 | } 51 | -------------------------------------------------------------------------------- /cmd/veritas-redisql/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "net" 6 | "os" 7 | "os/signal" 8 | "strings" 9 | 10 | "google.golang.org/grpc" 11 | "gopkg.in/alecthomas/kingpin.v2" 12 | 13 | "hybrid/dbconn" 14 | "hybrid/kafkarole" 15 | pbv "hybrid/proto/veritas" 16 | "hybrid/veritas" 17 | ) 18 | 19 | var ( 20 | signature = kingpin.Flag("signature", "server signature").Required().String() 21 | blockSize = kingpin.Flag("blk-size", "block size").Default("100").Int() 22 | parties = kingpin.Flag("parties", "party1,party2,...").Required().String() 23 | addr = kingpin.Flag("addr", "server address").Required().String() 24 | kafkaAddr = kingpin.Flag("kafka-addr", "kafka server address").Required().String() 25 | kafkaGroup = kingpin.Flag("kafka-group", "kafka group id").Required().String() 26 | kafkaTopic = kingpin.Flag("kafka-topic", "kafka topic").Required().String() 27 | redisAddr = kingpin.Flag("redis-addr", "redis server address").Required().String() 28 | redisDb = kingpin.Flag("redis-db", "redis db number").Required().Int() 29 | redisPwd = kingpin.Flag("redis-pwd", "redis password").String() 30 | ledgerPath = kingpin.Flag("ledger-path", "ledger path").Required().String() 31 | ) 32 | 33 | func check(err error) { 34 | if err != nil { 35 | panic(err) 36 | } 37 | } 38 | 39 | func main() { 40 | kingpin.Parse() 41 | 42 | r, err := dbconn.NewRedisqlConn(*redisAddr, 10) 43 | check(err) 44 | 45 | c, err := kafkarole.NewConsumer(*kafkaAddr, *kafkaGroup, []string{*kafkaTopic}) 46 | check(err) 47 | p, err := kafkarole.NewProducer(*kafkaAddr, *kafkaTopic) 48 | check(err) 49 | 50 | pm := make(map[string]struct{}) 51 | pm[*signature] = struct{}{} 52 | 53 | ps := strings.Split(*parties, ",") 54 | for _, s := range ps { 55 | pm[s] = struct{}{} 56 | } 57 | 58 | s := grpc.NewServer() 59 | svr := veritas.NewServer(r, c, p, *ledgerPath, &veritas.Config{ 60 | Signature: *signature, 61 | Topic: *kafkaTopic, 62 | Parties: pm, 63 | BlockSize: *blockSize, 64 | }) 65 | pbv.RegisterNodeServer(s, svr) 66 | lis, err := net.Listen("tcp", *addr) 67 | if err != nil { 68 | panic(err) 69 | } 70 | 71 | go func() { 72 | log.Printf("Serving gRPC: %s", *addr) 73 | s.Serve(lis) 74 | }() 75 | 76 | ch := make(chan os.Signal, 1) 77 | signal.Notify(ch, os.Interrupt, os.Kill) 78 | sig := <-ch 79 | log.Printf("Received signal %v, quiting gracefully", sig) 80 | } 81 | -------------------------------------------------------------------------------- /cmd/veritas/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "net" 6 | "os" 7 | "os/signal" 8 | "strings" 9 | 10 | "google.golang.org/grpc" 11 | "gopkg.in/alecthomas/kingpin.v2" 12 | 13 | "hybrid/dbconn" 14 | "hybrid/kafkarole" 15 | pbv "hybrid/proto/veritas" 16 | "hybrid/veritas" 17 | ) 18 | 19 | var ( 20 | signature = kingpin.Flag("signature", "server signature").Required().String() 21 | blockSize = kingpin.Flag("blk-size", "block size").Default("100").Int() 22 | parties = kingpin.Flag("parties", "party1,party2,...").Required().String() 23 | addr = kingpin.Flag("addr", "server address").Required().String() 24 | kafkaAddr = kingpin.Flag("kafka-addr", "kafka server address").Required().String() 25 | kafkaGroup = kingpin.Flag("kafka-group", "kafka group id").Required().String() 26 | kafkaTopic = kingpin.Flag("kafka-topic", "kafka topic").Required().String() 27 | redisAddr = kingpin.Flag("redis-addr", "redis server address").Required().String() 28 | redisDb = kingpin.Flag("redis-db", "redis db number").Required().Int() 29 | redisPwd = kingpin.Flag("redis-pwd", "redis password").String() 30 | ledgerPath = kingpin.Flag("ledger-path", "ledger path").Required().String() 31 | ) 32 | 33 | func check(err error) { 34 | if err != nil { 35 | panic(err) 36 | } 37 | } 38 | 39 | func main() { 40 | kingpin.Parse() 41 | 42 | r, err := dbconn.NewRedisConn(*redisAddr, *redisPwd, *redisDb) 43 | check(err) 44 | 45 | c, err := kafkarole.NewConsumer(*kafkaAddr, *kafkaGroup, []string{*kafkaTopic}) 46 | check(err) 47 | p, err := kafkarole.NewProducer(*kafkaAddr, *kafkaTopic) 48 | check(err) 49 | 50 | pm := make(map[string]struct{}) 51 | pm[*signature] = struct{}{} 52 | 53 | ps := strings.Split(*parties, ",") 54 | for _, s := range ps { 55 | pm[s] = struct{}{} 56 | } 57 | 58 | s := grpc.NewServer() 59 | svr := veritas.NewServer(r, c, p, *ledgerPath, &veritas.Config{ 60 | Signature: *signature, 61 | Topic: *kafkaTopic, 62 | Parties: pm, 63 | BlockSize: *blockSize, 64 | }) 65 | pbv.RegisterNodeServer(s, svr) 66 | lis, err := net.Listen("tcp", *addr) 67 | if err != nil { 68 | panic(err) 69 | } 70 | 71 | go func() { 72 | log.Printf("Serving gRPC: %s", *addr) 73 | s.Serve(lis) 74 | }() 75 | 76 | ch := make(chan os.Signal, 1) 77 | signal.Notify(ch, os.Interrupt, os.Kill) 78 | sig := <-ch 79 | log.Printf("Received signal %v, quiting gracefully", sig) 80 | } 81 | -------------------------------------------------------------------------------- /cmd/veritastm/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "os" 7 | "os/signal" 8 | "strings" 9 | 10 | "google.golang.org/grpc" 11 | "gopkg.in/alecthomas/kingpin.v2" 12 | 13 | "hybrid/dbconn" 14 | pbv "hybrid/proto/veritas" 15 | "hybrid/veritastm" 16 | 17 | abciserver "github.com/tendermint/tendermint/abci/server" 18 | ) 19 | 20 | var ( 21 | signature = kingpin.Flag("signature", "server signature").Required().String() 22 | blockSize = kingpin.Flag("blk-size", "block size").Default("100").Int() 23 | parties = kingpin.Flag("parties", "party1,party2,...").Required().String() 24 | addr = kingpin.Flag("addr", "server address").Required().String() 25 | redisAddr = kingpin.Flag("redis-addr", "redis server address").Required().String() 26 | redisDb = kingpin.Flag("redis-db", "redis db number").Required().Int() 27 | redisPwd = kingpin.Flag("redis-pwd", "redis password").String() 28 | ledgerPath = kingpin.Flag("ledger-path", "ledger path").Required().String() 29 | tmSocket = kingpin.Flag("tendermint-socket", "tendermint socket").Required().String() 30 | abciSocket = kingpin.Flag("abci-socket", "abci socket").Required().String() 31 | ) 32 | 33 | func check(err error) { 34 | if err != nil { 35 | panic(err) 36 | } 37 | } 38 | 39 | func main() { 40 | kingpin.Parse() 41 | 42 | r, err := dbconn.NewRedisConn(*redisAddr, *redisPwd, *redisDb) 43 | check(err) 44 | 45 | pm := make(map[string]struct{}) 46 | pm[*signature] = struct{}{} 47 | 48 | ps := strings.Split(*parties, ",") 49 | for _, s := range ps { 50 | pm[s] = struct{}{} 51 | } 52 | 53 | s := grpc.NewServer() 54 | svr := veritastm.NewServer(r, &veritastm.Config{ 55 | Signature: *signature, 56 | Parties: pm, 57 | BlockSize: *blockSize, 58 | LedgerPath: *ledgerPath, 59 | ABCIRPCAddr: *abciSocket, 60 | }) 61 | pbv.RegisterNodeServer(s, svr) 62 | lis, err := net.Listen("tcp", *addr) 63 | if err != nil { 64 | panic(err) 65 | } 66 | 67 | server := abciserver.NewSocketServer(*tmSocket, svr.Ledger) 68 | if err := server.Start(); err != nil { 69 | fmt.Fprintf(os.Stderr, "error starting socket server: %v", err) 70 | os.Exit(1) 71 | } 72 | defer server.Stop() 73 | 74 | go func() { 75 | fmt.Printf("Serving gRPC on port: %s\n", *addr) 76 | s.Serve(lis) 77 | }() 78 | 79 | ch := make(chan os.Signal, 1) 80 | signal.Notify(ch, os.Interrupt, os.Kill) 81 | sig := <-ch 82 | fmt.Printf("Received signal %v, quiting gracefully", sig) 83 | } 84 | -------------------------------------------------------------------------------- /dbconn/mongodb.go: -------------------------------------------------------------------------------- 1 | package dbconn 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "go.mongodb.org/mongo-driver/mongo" 8 | "go.mongodb.org/mongo-driver/mongo/options" 9 | ) 10 | 11 | func NewMongoConn(ctx context.Context, addr, port string) (*mongo.Client, error) { 12 | return mongo.Connect(ctx, options.Client().ApplyURI(fmt.Sprintf("mongodb://%s:%s", addr, port))) 13 | } 14 | -------------------------------------------------------------------------------- /dbconn/redis.go: -------------------------------------------------------------------------------- 1 | package dbconn 2 | 3 | import ( 4 | "github.com/go-redis/redis/v8" 5 | ) 6 | 7 | func NewRedisConn(addr, pwd string, db int) (*redis.Client, error) { 8 | rdb := redis.NewClient(&redis.Options{ 9 | Addr: addr, 10 | Password: pwd, 11 | DB: db, 12 | }) 13 | return rdb, nil 14 | } 15 | -------------------------------------------------------------------------------- /dbconn/redisql.go: -------------------------------------------------------------------------------- 1 | package dbconn 2 | 3 | import ( 4 | "github.com/mediocregopher/radix/v3" 5 | ) 6 | 7 | func NewRedisqlConn(addr string, connNum int) (*radix.Pool, error) { 8 | return radix.NewPool("tcp", addr, connNum) 9 | } 10 | -------------------------------------------------------------------------------- /docker/bigchaindb/Alpine.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.9 2 | LABEL maintainer "contact@ipdb.global" 3 | 4 | ARG TM_VERSION=v0.31.5 5 | RUN mkdir -p /usr/src/app 6 | ENV HOME /root 7 | COPY bigchaindb-2.2.2 /usr/src/app/ 8 | WORKDIR /usr/src/app 9 | 10 | RUN apk --update add sudo bash vim openssh iproute2 iperf \ 11 | && apk --update add python3 openssl ca-certificates git \ 12 | && apk --update add --virtual build-dependencies python3-dev \ 13 | libffi-dev openssl-dev build-base jq \ 14 | && apk add --no-cache libstdc++ dpkg gnupg \ 15 | && pip3 install --upgrade pip cffi \ 16 | && pip install -e . \ 17 | && apk del build-dependencies \ 18 | && rm -f /var/cache/apk/* 19 | 20 | RUN ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N "" 21 | RUN ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa && cd /root/.ssh && cp id_rsa.pub authorized_keys 22 | ADD id_rsa.pub / 23 | RUN cat /id_rsa.pub >> ~/.ssh/authorized_keys 24 | RUN echo "StrictHostKeyChecking no" > /root/.ssh/config 25 | RUN echo "PermitUserEnvironment yes" >> /etc/ssh/sshd_config 26 | RUN echo "root:newpass" | chpasswd 27 | 28 | # Install mongodb and monit 29 | RUN apk --update add mongodb monit 30 | 31 | # Install Tendermint 32 | RUN wget https://github.com/tendermint/tendermint/releases/download/${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip \ 33 | && unzip tendermint_${TM_VERSION}_linux_amd64.zip \ 34 | && mv tendermint /usr/local/bin/ \ 35 | && rm tendermint_${TM_VERSION}_linux_amd64.zip 36 | 37 | ENV TMHOME=/tendermint 38 | 39 | # Set permissions required for mongodb 40 | RUN mkdir -p /data/db /data/configdb \ 41 | && chown -R mongodb:mongodb /data/db /data/configdb 42 | 43 | # BigchainDB enviroment variables 44 | ENV BIGCHAINDB_DATABASE_PORT 27017 45 | ENV BIGCHAINDB_DATABASE_BACKEND localmongodb 46 | ENV BIGCHAINDB_SERVER_BIND 0.0.0.0:9984 47 | ENV BIGCHAINDB_WSSERVER_HOST 0.0.0.0 48 | ENV BIGCHAINDB_WSSERVER_SCHEME ws 49 | 50 | ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST 0.0.0.0 51 | ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME ws 52 | ENV BIGCHAINDB_TENDERMINT_PORT 26657 53 | 54 | # VOLUME /data/db /data/configdb /tendermint 55 | 56 | EXPOSE 27017 28017 9984 9985 26656 26657 26658 57 | 58 | WORKDIR $HOME 59 | # ENTRYPOINT ["/usr/src/app/pkg/scripts/all-in-one.bash"] 60 | 61 | -------------------------------------------------------------------------------- /docker/bigchaindb/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | ENV DEBIAN_FRONTEND=noninteractive 3 | 4 | RUN apt update 5 | RUN apt -y install build-essential git iperf wget iputils-ping net-tools libcurl4-openssl-dev libtool m4 automake openssh-server iproute2 psmisc vim dstat unzip 6 | # RUN apt -y install vim openssh iproute2 iperf python3 openssl ca-certificates git build-dependencies python3-dev libffi-dev openssl-dev build-base jq 7 | RUN apt -y install python3.6 python3-pip 8 | RUN wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1804-4.4.4.tgz && tar xf mongodb-linux-x86_64-ubuntu1804-4.4.4.tgz && cp mongodb-linux-x86_64-ubuntu1804-4.4.4/bin/mongod /usr/bin 9 | 10 | # RUN ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N "" 11 | RUN ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa && cd /root/.ssh && cp id_rsa.pub authorized_keys 12 | ADD id_rsa.pub / 13 | RUN cat /id_rsa.pub >> ~/.ssh/authorized_keys 14 | RUN echo "StrictHostKeyChecking no" > /root/.ssh/config 15 | RUN echo "PermitUserEnvironment yes" >> /etc/ssh/sshd_config 16 | RUN echo "root:newpass" | chpasswd 17 | 18 | # Install Tendermint 19 | ARG TM_VERSION=v0.31.5 20 | RUN wget https://github.com/tendermint/tendermint/releases/download/${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip \ 21 | && unzip tendermint_${TM_VERSION}_linux_amd64.zip \ 22 | && mv tendermint /usr/local/bin/ \ 23 | && rm tendermint_${TM_VERSION}_linux_amd64.zip 24 | 25 | ENV TMHOME="/tendermint" 26 | 27 | # Set permissions required for mongodb 28 | RUN mkdir -p /data/db /data/configdb 29 | # && chown -R mongodb:mongodb /data/db /data/configdb 30 | 31 | # Setup BigchainDB 32 | RUN mkdir -p /usr/src/app 33 | COPY bigchaindb-2.2.2 /usr/src/app/ 34 | WORKDIR /usr/src/app 35 | ENV LC_CTYPE=C.UTF-8 36 | RUN python3 -m pip install --upgrade pip cffi 37 | RUN python3 -m pip install -e . 38 | 39 | # BigchainDB enviroment variables 40 | ENV BIGCHAINDB_DATABASE_PORT="27017" 41 | ENV BIGCHAINDB_DATABASE_BACKEND="localmongodb" 42 | ENV BIGCHAINDB_SERVER_BIND="0.0.0.0:9984" 43 | ENV BIGCHAINDB_WSSERVER_HOST="0.0.0.0" 44 | ENV BIGCHAINDB_WSSERVER_PORT="9985" 45 | ENV BIGCHAINDB_WSSERVER_SCHEME="ws" 46 | 47 | ENV BIGCHAINDB_WSSERVER_ADVERTISED_HOST="0.0.0.0" 48 | ENV BIGCHAINDB_WSSERVER_ADVERTISED_SCHEME="ws" 49 | ENV BIGCHAINDB_TENDERMINT_PORT="26657" 50 | 51 | # VOLUME /data/db /data/configdb /tendermint 52 | 53 | EXPOSE 27017 28017 9984 9985 26656 26657 26658 54 | 55 | # WORKDIR $HOME 56 | # ENTRYPOINT ["/usr/src/app/pkg/scripts/all-in-one.bash"] 57 | 58 | -------------------------------------------------------------------------------- /docker/bigchaindb/build_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | BIGCHAINDB="bigchaindb-2.2.2" 3 | if ! [ -d "$BIGCHAINDB" ]; then 4 | wget https://github.com/bigchaindb/bigchaindb/archive/refs/tags/v2.2.2.tar.gz 5 | tar xf v2.2.2.tar.gz 6 | fi 7 | if ! [ -d "$BIGCHAINDB/scripts" ]; then 8 | cp -r ../../BigchainDB/scripts $BIGCHAINDB/ 9 | fi 10 | if ! [ -f "id_rsa.pub" ]; then 11 | if ! [ -f "$HOME/.ssh/id_rsa.pub" ]; then 12 | echo "You do not have a public SSH key. Please generate one! (ssh-keygen)" 13 | exit 1 14 | fi 15 | cp $HOME/.ssh/id_rsa.pub . 16 | fi 17 | docker build -f Dockerfile -t bigchaindb . -------------------------------------------------------------------------------- /docker/blockchaindb/Dockerfile: -------------------------------------------------------------------------------- 1 | #Dockerfile for BlockchainDB 2 | FROM ubuntu:21.04 3 | RUN apt update && apt-get -y install tzdata 4 | RUN apt update && apt -y install build-essential git jq libc6 python3-pip \ 5 | iperf wget iputils-ping net-tools libcurl4-openssl-dev libtool m4 automake openssh-server iproute2 psmisc \ 6 | vim python dstat unzip nodejs npm curl autoconf make g++ protobuf-compiler redis \ 7 | && pip3 install solc-select \ 8 | && solc-select install 0.4.24 \ 9 | && solc-select use 0.4.24 10 | 11 | ENV HOME /root 12 | 13 | RUN ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa && cd /root/.ssh && cp id_rsa.pub authorized_keys 14 | ADD id_rsa.pub / 15 | RUN cat /id_rsa.pub >> ~/.ssh/authorized_keys 16 | RUN echo "StrictHostKeyChecking no" > /root/.ssh/config 17 | RUN echo "PermitUserEnvironment yes" >> /etc/ssh/sshd_config 18 | RUN echo "root:newpass" | chpasswd 19 | RUN service ssh start 20 | CMD ["/usr/sbin/sshd","-D"] 21 | 22 | RUN wget https://dl.google.com/go/go1.16.linux-amd64.tar.gz 23 | RUN rm -rf /usr/local/go && tar -C /usr/local -xzf go1.16.linux-amd64.tar.gz 24 | ENV GOROOT /usr/local/go 25 | ENV GOPATH /go 26 | ENV PATH $PATH:/usr/local/go/bin 27 | RUN go version 28 | 29 | RUN mkdir -p /go/src/github.com/ethereum 30 | WORKDIR /go/src/github.com/ethereum 31 | RUN git clone https://github.com/ethereum/go-ethereum.git 32 | WORKDIR /go/src/github.com/ethereum/go-ethereum 33 | RUN git checkout v1.8.23 34 | ENV GO111MODULE=off 35 | RUN make geth 36 | RUN cp build/bin/geth /usr/local/go/bin/ 37 | RUN geth version 38 | RUN make devtools 39 | 40 | # RUN sed -i '/^bind 127\.0\.0\.1 ::1$/s/^/#/' /etc/redis/redis.conf 41 | # RUN sed -i '/protected-mode yes/c protected-mode no' /etc/redis/redis.conf 42 | # RUN service redis-server start 43 | 44 | RUN mkdir -p /Data/ 45 | RUN mkdir -p ${HOME}/BlockchainDB/scripts/ 46 | RUN mkdir -p ${HOME}/BlockchainDB/config/ 47 | RUN mkdir -p ${HOME}/BlockchainDB/logs/ 48 | RUN mkdir -p ${HOME}/BlockchainDB/bin/ 49 | COPY .bin ${HOME}/BlockchainDB/bin 50 | WORKDIR ${HOME}/BlockchainDB/ 51 | 52 | -------------------------------------------------------------------------------- /docker/blockchaindb/build_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | dir=$(dirname "$0") 4 | echo ${dir} 5 | 6 | if ! [ -d "${dir}/../../BlockchainDB/.bin" ]; then 7 | echo "Please build the binaries first! (cd BlockchainDB && make build)" 8 | exit 1 9 | fi 10 | 11 | rm -rf ${dir}/.bin ${dir}/.scripts 12 | cd ${dir}/../../BlockchainDB/ 13 | make build 14 | cd - 15 | cp -r ${dir}/../../BlockchainDB/.bin ${dir}/ 16 | 17 | 18 | if ! [ -f "${dir}/id_rsa.pub" ]; then 19 | if ! [ -f "$HOME/.ssh/id_rsa.pub" ]; then 20 | echo "You do not have a public SSH key. Please generate one! (ssh-keygen)" 21 | exit 1 22 | fi 23 | cp $HOME/.ssh/id_rsa.pub ${dir}/ 24 | fi 25 | 26 | docker build -f ${dir}/Dockerfile -t blockchaindb ${dir}/ 27 | 28 | rm -rf ${dir}/.bin ${dir}/.scripts ${dir}/id_rsa.pub -------------------------------------------------------------------------------- /docker/veritas/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | ENV DEBIAN_FRONTEND=noninteractive 3 | RUN apt update && apt-get -y install tzdata 4 | RUN apt update && apt -y install build-essential git iperf wget iputils-ping net-tools libcurl4-openssl-dev libtool m4 automake openssh-server iproute2 psmisc vim python dstat default-jdk unzip 5 | RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && cd ~/.ssh && cp id_rsa.pub authorized_keys 6 | ADD id_rsa.pub / 7 | RUN cat /id_rsa.pub >> ~/.ssh/authorized_keys 8 | RUN echo "StrictHostKeyChecking no" > ~/.ssh/config 9 | RUN wget https://dl.google.com/go/go1.15.6.linux-amd64.tar.gz && tar xf go1.15.6.linux-amd64.tar.gz 10 | RUN mkdir gopath 11 | ENV GOROOT /go 12 | ENV GOPATH /gopath 13 | ENV PATH $PATH:/go/bin 14 | RUN mkdir /git 15 | RUN wget https://download.redis.io/releases/redis-6.0.9.tar.gz && tar -xzf redis-6.0.9.tar.gz && cd redis-6.0.9 && make && make install 16 | RUN wget https://archive.apache.org/dist/kafka/2.7.0/kafka_2.12-2.7.0.tgz && tar -xzf kafka_2.12-2.7.0.tgz 17 | RUN wget https://github.com/RedBeardLab/rediSQL/releases/download/v1.1.1/RediSQL_v1.1.1_9b110f__release.so && mv RediSQL_v1.1.1_9b110f__release.so redisql.so && chmod u+x redisql.so 18 | RUN wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu2004-4.4.4.tgz && tar xf mongodb-linux-x86_64-ubuntu2004-4.4.4.tgz 19 | COPY tendermint /usr/local/bin 20 | COPY bin /bin 21 | RUN echo "PermitUserEnvironment yes" >> /etc/ssh/sshd_config 22 | CMD ["bash", "service ssh start"] 23 | -------------------------------------------------------------------------------- /docker/veritas/build_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! [ -d "../../bin" ]; then 4 | echo "Please build the binaries first! (cd ../../scripts; ./build_binaries.sh)" 5 | exit 1 6 | fi 7 | 8 | rm -rf bin 9 | cp -r ../../bin . 10 | 11 | if ! [ -f "id_rsa.pub" ]; then 12 | if ! [ -f "$HOME/.ssh/id_rsa.pub" ]; then 13 | echo "You do not have a public SSH key. Please generate one! (ssh-keygen)" 14 | exit 1 15 | fi 16 | cp $HOME/.ssh/id_rsa.pub . 17 | fi 18 | 19 | docker build -f Dockerfile -t veritas . -------------------------------------------------------------------------------- /docker/veritas/tendermint: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nusdbsystem/Hybrid-Blockchain-Database-Systems/948f560f36f5af215c291e46f118340fc838eac1/docker/veritas/tendermint -------------------------------------------------------------------------------- /docker_compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | zookeeper: 4 | image: wurstmeister/zookeeper 5 | ports: 6 | - "2181:2181" 7 | 8 | kafka-1: 9 | image: wurstmeister/kafka 10 | ports: 11 | - "9092:9092" 12 | environment: 13 | KAFKA_ADVERTISED_HOST_NAME: 10.0.0.4 14 | KAFKA_ADVERTISED_PORT: 9092 15 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 16 | KAFKA_LOG_DIRS: /path/to/kafka1/logs 17 | KAFKA_BROKER_ID: 500 18 | KAFKA_offsets_topic_replication_factor: 3 19 | volumes: 20 | - /path/to/var1/run/docker.sock:/var/run/docker.sock 21 | - ${KAFKA_DATA}/500:/kafka 22 | kafka-2: 23 | image: wurstmeister/kafka 24 | ports: 25 | - "9092:9092" 26 | environment: 27 | KAFKA_ADVERTISED_HOST_NAME: 10.0.0.5 28 | KAFKA_ADVERTISED_PORT: 9092 29 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 30 | KAFKA_LOG_DIRS: /path/to/kafka2/logs 31 | KAFKA_BROKER_ID: 501 32 | KAFKA_offsets_topic_replication_factor: 3 33 | volumes: 34 | - /path/to/var2/run/docker.sock:/var/run/docker.sock 35 | - ${KAFKA_DATA}/501:/kafka 36 | 37 | kafka-3: 38 | image: wurstmeister/kafka 39 | ports: 40 | - "9092:9092" 41 | environment: 42 | KAFKA_ADVERTISED_HOST_NAME: 10.0.0.6 43 | KAFKA_ADVERTISED_PORT: 9092 44 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 45 | KAFKA_LOG_DIRS: /path/to/kafka3/logs 46 | KAFKA_BROKER_ID: 502 47 | KAFKA_offsets_topic_replication_factor: 3 48 | volumes: 49 | - /path/to/var3/run/docker.sock:/var/run/docker.sock 50 | - ${KAFKA_DATA}/502:/kafka -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module hybrid 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/Workiva/go-datastructures v1.0.53 // indirect 7 | github.com/adlio/schema v1.1.14 // indirect 8 | github.com/allegro/bigcache v1.2.1 // indirect 9 | github.com/aristanetworks/goarista v0.0.0-20211115193401-1e4971faf436 // indirect 10 | github.com/btcsuite/btcd v0.22.0-beta // indirect 11 | github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect 12 | github.com/deckarep/golang-set v1.7.1 // indirect 13 | github.com/dgraph-io/badger v1.6.2 14 | github.com/dgraph-io/badger/v3 v3.2103.2 15 | github.com/ethereum/go-ethereum v1.8.23 16 | github.com/fortytw2/leaktest v1.3.0 // indirect 17 | github.com/go-kit/kit v0.12.0 // indirect 18 | github.com/go-redis/redis/v8 v8.11.4 19 | github.com/go-zookeeper/zk v1.0.2 20 | github.com/golang/protobuf v1.5.2 21 | github.com/golangci/golangci-lint v1.43.0 // indirect 22 | github.com/google/orderedcode v0.0.1 // indirect 23 | github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect 24 | github.com/hashicorp/raft v1.3.2 25 | github.com/hashicorp/raft-boltdb v0.0.0-20210422161416-485fa74b0b01 26 | github.com/json-iterator/go v1.1.12 27 | github.com/libp2p/go-buffer-pool v0.0.2 // indirect 28 | github.com/mediocregopher/radix/v3 v3.8.0 29 | github.com/mroth/weightedrand v0.4.1 // indirect 30 | github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b // indirect 31 | github.com/patrickmn/go-cache v2.1.0+incompatible // indirect 32 | github.com/pkg/errors v0.9.1 33 | github.com/pkg/profile v1.2.1 34 | github.com/rjeczalik/notify v0.9.2 // indirect 35 | github.com/rs/cors v1.8.0 // indirect 36 | github.com/rs/zerolog v1.26.0 // indirect 37 | github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa // indirect 38 | github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa // indirect 39 | github.com/spf13/pflag v1.0.5 40 | github.com/spf13/viper v1.9.0 41 | github.com/stretchr/testify v1.7.0 42 | github.com/tendermint/go-amino v0.16.0 // indirect 43 | github.com/tendermint/tendermint v0.35.0 44 | github.com/tendermint/tm-db v0.6.4 // indirect 45 | github.com/vektra/mockery/v2 v2.9.4 // indirect 46 | go.mongodb.org/mongo-driver v1.7.4 47 | go.uber.org/atomic v1.9.0 48 | golang.org/x/net v0.0.0-20211111160137-58aab5ef257a // indirect 49 | golang.org/x/sys v0.0.0-20211111213525-f221eed1c01e // indirect 50 | google.golang.org/genproto v0.0.0-20211111162719-482062a4217b // indirect 51 | google.golang.org/grpc v1.42.0 52 | google.golang.org/protobuf v1.27.1 53 | gopkg.in/alecthomas/kingpin.v2 v2.2.6 54 | gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0 55 | pgregory.net/rapid v0.4.7 // indirect 56 | ) 57 | -------------------------------------------------------------------------------- /kafkarole/consumer.go: -------------------------------------------------------------------------------- 1 | package kafkarole 2 | 3 | import ( 4 | "gopkg.in/confluentinc/confluent-kafka-go.v1/kafka" 5 | ) 6 | 7 | func NewConsumer(serverAddr, groupId string, topics []string) (*kafka.Consumer, error) { 8 | c, err := kafka.NewConsumer(&kafka.ConfigMap{ 9 | "bootstrap.servers": serverAddr, 10 | "group.id": groupId, 11 | "auto.offset.reset": "earliest", 12 | }) 13 | if err != nil { 14 | return nil, err 15 | } 16 | 17 | if err := c.SubscribeTopics(topics, nil); err != nil { 18 | return nil, err 19 | } 20 | 21 | return c, nil 22 | } 23 | -------------------------------------------------------------------------------- /kafkarole/producer.go: -------------------------------------------------------------------------------- 1 | package kafkarole 2 | 3 | import ( 4 | "gopkg.in/confluentinc/confluent-kafka-go.v1/kafka" 5 | ) 6 | 7 | func NewProducer(serverAddr, topic string) (*kafka.Producer, error) { 8 | p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": serverAddr}) 9 | if err != nil { 10 | return nil, err 11 | } 12 | return p, nil 13 | } 14 | -------------------------------------------------------------------------------- /proto/raftkv/raftkv.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package kv; 3 | 4 | option go_package = "proto/raftkv"; 5 | 6 | service DB { 7 | rpc Set (SetRequest) returns (SetResponse); 8 | rpc Get (GetRequest) returns (GetResponse); 9 | rpc Verify(VerifyRequest) returns (VerifyResponse); 10 | rpc IsLeader (IsLeaderRequest) returns (IsLeaderResponse); 11 | rpc Join (JoinRequest) returns (JoinResponse); 12 | rpc Leave (LeaveRequest) returns (LeaveResponse); 13 | } 14 | 15 | message VerifyRequest { 16 | string key = 1; 17 | } 18 | 19 | message VerifyResponse { 20 | bytes root_digest = 1; 21 | repeated bytes side_nodes = 2; 22 | bytes non_membership_leaf_data = 3; 23 | } 24 | 25 | message SetRequest { 26 | string key = 1; 27 | string value = 2; 28 | } 29 | 30 | message SetResponse { 31 | // Empty 32 | } 33 | 34 | message Block { 35 | repeated SetRequest reqs = 1; 36 | } 37 | 38 | message JoinRequest { 39 | string peer_id = 1; 40 | string peer_addr = 2; 41 | } 42 | 43 | message JoinResponse { 44 | // Empty 45 | } 46 | 47 | message LeaveRequest { 48 | string peer_id = 1; 49 | } 50 | 51 | message LeaveResponse { 52 | // Empty 53 | } 54 | 55 | message GetRequest { 56 | string key = 1; 57 | } 58 | 59 | message GetResponse { 60 | string value = 1; 61 | } 62 | 63 | message IsLeaderRequest { 64 | // Empty. 65 | } 66 | 67 | message IsLeaderResponse { 68 | bool is_leader = 1; 69 | } 70 | 71 | message KVItem { 72 | bytes key = 1; 73 | bytes value = 2; 74 | } 75 | -------------------------------------------------------------------------------- /proto/veritas/veritas.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package controller; 3 | 4 | option go_package = "proto/veritas"; 5 | 6 | service Node { 7 | rpc Get (GetRequest) returns (GetResponse); 8 | rpc Set (SetRequest) returns (SetResponse); 9 | rpc Verify(VerifyRequest) returns (VerifyResponse); 10 | rpc BatchSet (BatchSetRequest) returns (BatchSetResponse); 11 | } 12 | 13 | message VerifyRequest { 14 | string key = 1; 15 | } 16 | 17 | message VerifyResponse { 18 | bytes root_digest = 1; 19 | repeated bytes side_nodes = 2; 20 | bytes non_membership_leaf_data = 3; 21 | } 22 | 23 | enum MessageType { 24 | Approve = 0; 25 | Abort = 1; 26 | } 27 | 28 | message SharedLog { 29 | int64 seq = 1; 30 | repeated SetRequest sets = 2; 31 | } 32 | 33 | message Block { 34 | repeated SharedLog txs = 1; 35 | MessageType type = 2; 36 | string signature = 3; 37 | } 38 | 39 | message GetRequest { 40 | string signature = 1; 41 | string key = 2; 42 | } 43 | 44 | message GetResponse { 45 | string value = 1; 46 | } 47 | 48 | message SetRequest { 49 | string signature = 1; 50 | string key = 2; 51 | string value = 3; 52 | int64 version = 4; 53 | } 54 | 55 | message SetResponse { 56 | string txid = 1; 57 | } 58 | 59 | message BatchSetRequest { 60 | string signature = 1; 61 | repeated SetRequest sets = 2; 62 | } 63 | 64 | message BatchSetResponse { 65 | // Empty. 66 | } 67 | -------------------------------------------------------------------------------- /raftkv/badgerkv.go: -------------------------------------------------------------------------------- 1 | package raftkv 2 | 3 | import ( 4 | "log" 5 | "os" 6 | 7 | "github.com/dgraph-io/badger/v3" 8 | ) 9 | 10 | var _ KV = (*BadgerKV)(nil) 11 | 12 | type BadgerKV struct { 13 | db *badger.DB 14 | logger *log.Logger 15 | } 16 | 17 | type KVItem struct { 18 | key []byte 19 | value []byte 20 | err error 21 | } 22 | 23 | func (i *KVItem) IsFinished() bool { 24 | return i.err == ErrSnapshotFinished 25 | } 26 | 27 | func NewBadgerKV(dir string) (KV, error) { 28 | opt := badger.DefaultOptions(dir) 29 | db, err := badger.Open(opt) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | return &BadgerKV{ 35 | db: db, 36 | logger: log.New(os.Stderr, "[kv_badger] ", log.LstdFlags), 37 | }, nil 38 | } 39 | 40 | func (b *BadgerKV) Get(key []byte) ([]byte, error) { 41 | var value []byte 42 | 43 | err := b.db.View(func(txn *badger.Txn) error { 44 | item, err := txn.Get(key) 45 | if err != nil { 46 | if err == badger.ErrKeyNotFound { 47 | return nil 48 | } 49 | return err 50 | } 51 | value, err = item.ValueCopy(nil) 52 | return err 53 | }) 54 | if err != nil { 55 | return nil, err 56 | } else { 57 | return value, nil 58 | } 59 | } 60 | 61 | func (b *BadgerKV) Set(key, value []byte) error { 62 | err := b.db.Update(func(txn *badger.Txn) error { 63 | if err1 := txn.Set(key, value); err1 != nil { 64 | return err1 65 | } 66 | return nil 67 | }) 68 | return err 69 | } 70 | 71 | func (b *BadgerKV) SnapshotItems() <-chan DataItem { 72 | // create a new channel 73 | ch := make(chan DataItem, 1024) 74 | 75 | // generate items from snapshot to channel 76 | go b.db.View(func(txn *badger.Txn) error { 77 | defer close(ch) 78 | opt := badger.DefaultIteratorOptions 79 | opt.PrefetchSize = 10 80 | it := txn.NewIterator(opt) 81 | defer it.Close() 82 | 83 | keyCount := 0 84 | for it.Rewind(); it.Valid(); it.Next() { 85 | item := it.Item() 86 | k := item.Key() 87 | v, err := item.ValueCopy(nil) 88 | 89 | kvi := &KVItem{ 90 | key: append([]byte{}, k...), 91 | value: append([]byte{}, v...), 92 | err: err, 93 | } 94 | 95 | // write kvItem to channel with last error 96 | ch <- kvi 97 | keyCount++ 98 | 99 | if err != nil { 100 | return err 101 | } 102 | } 103 | 104 | // just use nil kvItem to mark the end 105 | kvi := &KVItem{ 106 | key: nil, 107 | value: nil, 108 | err: ErrSnapshotFinished, 109 | } 110 | ch <- kvi 111 | 112 | b.logger.Printf("Snapshot total %d keys", keyCount) 113 | 114 | return nil 115 | }) 116 | // return channel to persist 117 | return ch 118 | } 119 | 120 | func (b *BadgerKV) Close() { 121 | b.db.Close() 122 | } 123 | -------------------------------------------------------------------------------- /raftkv/command.go: -------------------------------------------------------------------------------- 1 | package raftkv 2 | 3 | type command struct { 4 | Op string `json:"op,omitempty"` 5 | Key string `json:"key,omitempty"` 6 | Value string `json:"value,omitempty"` 7 | } 8 | 9 | func NewSetCommand(key, value string) *command { 10 | return &command{ 11 | Op: "set", 12 | Key: key, 13 | Value: value, 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /raftkv/config.go: -------------------------------------------------------------------------------- 1 | package raftkv 2 | 3 | type Config struct { 4 | Id string 5 | RaftDir string 6 | RaftBind string 7 | RaftJoin string 8 | BlockSize int 9 | } 10 | 11 | func NewConfig(id, raftDir, raftBind, join string, blkSize int) *Config { 12 | return &Config{ 13 | Id: id, 14 | RaftDir: raftDir, 15 | RaftBind: raftBind, 16 | RaftJoin: join, 17 | BlockSize: blkSize, 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /raftkv/fsmsnapshot.go: -------------------------------------------------------------------------------- 1 | package raftkv 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/golang/protobuf/proto" 7 | "github.com/hashicorp/raft" 8 | 9 | pb "hybrid/proto/raftkv" 10 | ) 11 | 12 | type fsmSnapshot struct { 13 | kv KV 14 | logger *log.Logger 15 | } 16 | 17 | func (f *fsmSnapshot) Persist(sink raft.SnapshotSink) error { 18 | f.logger.Printf("Persist action in fsmSnapshot") 19 | 20 | ch := f.kv.SnapshotItems() 21 | 22 | keyCount := 0 23 | 24 | for { 25 | buff := proto.NewBuffer([]byte{}) 26 | 27 | dataItem := <-ch 28 | item := dataItem.(*KVItem) 29 | 30 | if item.IsFinished() { 31 | break 32 | } 33 | 34 | protoKVItem := &pb.KVItem{ 35 | Key: item.key, 36 | Value: item.value, 37 | } 38 | 39 | keyCount++ 40 | 41 | buff.EncodeMessage(protoKVItem) 42 | 43 | if _, err := sink.Write(buff.Bytes()); err != nil { 44 | return err 45 | } 46 | } 47 | f.logger.Printf("Persist total %d keys", keyCount) 48 | 49 | return nil 50 | } 51 | 52 | func (f *fsmSnapshot) Release() { 53 | f.logger.Printf("Release action in fsmSnapshot") 54 | } 55 | -------------------------------------------------------------------------------- /raftkv/kv.go: -------------------------------------------------------------------------------- 1 | package raftkv 2 | 3 | import "errors" 4 | 5 | var ErrSnapshotFinished = errors.New("snapshot finished successfully") 6 | 7 | type KV interface { 8 | Get(key []byte) ([]byte, error) 9 | Set(key, value []byte) error 10 | 11 | SnapshotItems() <-chan DataItem 12 | 13 | Close() 14 | } 15 | 16 | type DataItem interface{} 17 | -------------------------------------------------------------------------------- /raftkv/rediskv.go: -------------------------------------------------------------------------------- 1 | package raftkv 2 | 3 | import ( 4 | "context" 5 | "hybrid/dbconn" 6 | 7 | "github.com/go-redis/redis/v8" 8 | ) 9 | 10 | var _ KV = (*RedisKV)(nil) 11 | 12 | type RedisKV struct { 13 | cli *redis.Client 14 | } 15 | 16 | func NewRedisKV(addr, pwd string, db int) (KV, error) { 17 | cli, err := dbconn.NewRedisConn(addr, pwd, db) 18 | if err != nil { 19 | return nil, err 20 | } 21 | return &RedisKV{cli: cli}, nil 22 | } 23 | 24 | func (r *RedisKV) Get(key []byte) ([]byte, error) { 25 | val, err := r.cli.Get(context.Background(), string(key)).Result() 26 | if err != nil { 27 | return nil, err 28 | } 29 | return []byte(val), nil 30 | } 31 | 32 | func (r *RedisKV) Set(key, value []byte) error { 33 | return r.cli.Set(context.Background(), string(key), string(value), 0).Err() 34 | } 35 | 36 | func (r *RedisKV) SnapshotItems() <-chan DataItem { 37 | ch := make(chan DataItem, 1024) 38 | 39 | go func() { 40 | defer close(ch) 41 | iter := r.cli.Scan(context.Background(), 0, "*", 0).Iterator() 42 | for iter.Next(context.Background()) { 43 | key := iter.Val() 44 | val, err := r.cli.Get(context.Background(), key).Result() 45 | if err != nil { 46 | panic(err) 47 | } 48 | kvi := &KVItem{ 49 | key: append([]byte{}, []byte(key)...), 50 | value: append([]byte{}, []byte(val)...), 51 | err: nil, 52 | } 53 | ch <- kvi 54 | } 55 | if err := iter.Err(); err != nil { 56 | panic(err) 57 | } 58 | kvi := &KVItem{ 59 | key: nil, 60 | value: nil, 61 | err: ErrSnapshotFinished, 62 | } 63 | ch <- kvi 64 | }() 65 | 66 | return ch 67 | } 68 | 69 | func (r *RedisKV) Close() { 70 | r.cli.Close() 71 | } 72 | -------------------------------------------------------------------------------- /scripts/build_binaries.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd .. 4 | mkdir -p bin 5 | cd bin 6 | 7 | set -x 8 | 9 | go version 10 | 11 | go build -o veritas-kafka ../cmd/veritas/main.go 12 | go build -o veritas-tso ../cmd/tso/main.go 13 | go build -o veritas-kafka-bench ../veritas/benchmark/ycsbbench/main.go 14 | go build -o veritas-tendermint ../cmd/veritastm/main.go 15 | go build -o veritas-tendermint-bench ../veritastm/benchmark/main.go 16 | 17 | # DB benchmarks 18 | go build -o db-redis ../cmd/redis/main.go 19 | go build -o db-redisql ../cmd/redisql/main.go 20 | go build -o db-mongodb ../cmd/mongodb/main.go 21 | 22 | # build Veritas Kafka + ZooKeeper TSO 23 | cd .. 24 | git apply scripts/veritas-tso-zk.patch 25 | cd bin 26 | go build -o veritas-kafka-zk ../cmd/veritas/main.go 27 | go build -o veritas-kafka-zk-bench ../veritas/benchmark/ycsbbench/main.go 28 | git checkout ../veritas/driver/driver.go 29 | 30 | # build Veritas Kafka + RediSQL 31 | cd .. 32 | git apply scripts/veritas-redisql.patch 33 | cd bin 34 | go build -o veritas-kafka-redisql ../cmd/veritas-redisql/main.go 35 | git checkout ../veritas/server.go 36 | 37 | # build Veritas Kafka + Transaction Delay 38 | cd .. 39 | git apply scripts/veritas-txdelay.patch 40 | cd bin 41 | go build -o veritas-kafka-txdelay ../cmd/veritas/main.go 42 | go build -o veritas-kafka-bench-txdelay ../veritas/benchmark/ycsbbench/main.go 43 | go build -o veritas-tendermint-txdelay ../cmd/veritastm/main.go 44 | go build -o veritas-tendermint-bench-txdelay ../veritastm/benchmark/main.go 45 | git checkout ../cmd/veritas/main.go ../cmd/veritastm/main.go ../veritas/benchmark/ycsbbench/main.go ../veritas/config.go ../veritas/driver/driver.go ../veritas/server.go ../veritastm/benchmark/main.go ../veritastm/config.go ../veritastm/driver.go ../veritastm/server.go 46 | 47 | # build Veritas Tendermint + MongoDB 48 | cd .. 49 | git apply scripts/veritas-tendermint-mongodb.patch 50 | cd bin 51 | go build -o veritas-tendermint-mongodb ../cmd/veritastm-mongodb/main.go 52 | git checkout ../veritastm/ledgerapp.go ../veritastm/server.go 53 | -------------------------------------------------------------------------------- /scripts/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | # List of number of nodes 6 | NODES="4 8 16 32 64" 7 | 8 | # Default number of nodes 9 | DEFAULT_NODES=4 10 | 11 | # IP prefix of the containers 12 | IPPREFIX="192.168.30" 13 | 14 | # List of number of threads (clients) 15 | THREADS="4 8 16 32 64 128 192 256" 16 | 17 | # List of workloads 18 | WORKLOADS="workloada workloadb workloadc" 19 | 20 | # Default workload 21 | DEFAULT_WORKLOAD="workloada" 22 | 23 | # Default workload path 24 | DEFAULT_WORKLOAD_PATH="temp/ycsb_data" 25 | 26 | # List of YCSB distributions 27 | DISTRIBUTIONS="uniform latest zipfian" 28 | 29 | # Transaction delay times (in ms) 30 | TXDELAYS="0 10 100 1000" 31 | 32 | # Transaction (record) sizes 33 | TXSIZES="512B 2kB 8kB 32kB 128kB" 34 | 35 | # Block sizes 36 | BLKSIZES="10 100 1000 10000" 37 | 38 | # Default block size (Veritas) 39 | DEFAULT_BLOCK_SIZE="100" 40 | 41 | # Veritas (Kafka) 42 | DEFAULT_DRIVERS_VERITAS_KAFKA=8 43 | DEFAULT_THREADS_VERITAS_KAFKA=256 44 | 45 | # Veritas (TM) 46 | DEFAULT_DRIVERS_VERITAS_TM=8 47 | DEFAULT_THREADS_VERITAS_TM=256 48 | 49 | # BigchainDB 50 | DEFAULT_THREADS_BIGCHAINDB=4 51 | DEFAULT_THREADS_BIGCHAINDBPV=4 -------------------------------------------------------------------------------- /scripts/genproto.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | go get -u google.golang.org/grpc 4 | go get -u github.com/golang/protobuf/{proto,protoc-gen-go} 5 | 6 | protoc --go_out=plugins=grpc:. ./proto/veritas/veritas.proto 7 | protoc --go_out=plugins=grpc:. ./proto/raftkv/raftkv.proto 8 | -------------------------------------------------------------------------------- /scripts/get_kafka_ops.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | if [ $# -lt 1 ]; then 6 | echo "Usage: $0 " 7 | exit 1 8 | fi 9 | 10 | LOGSD=$1 11 | echo "# Server Nodes Reads Writes" 12 | for N in $NODES; do 13 | # LOG-END-OFFSET 14 | WRITES=`cat $LOGSD/veritas-nodes-$N-logs/kafka-counters.log | grep shared-log | tr -s ' ' | cut -d ' ' -f 5` 15 | # CURRENT-OFFSET 16 | READS=`cat $LOGSD/veritas-nodes-$N-logs/kafka-counters.log | grep shared-log | tr -s ' ' | cut -d ' ' -f 4` 17 | NW=`cat $LOGSD/veritas-nodes-$N-logs/kafka-counters.log | grep shared-log | tr -s ' ' | cut -d ' ' -f 5 | wc -l` 18 | NR=`cat $LOGSD/veritas-nodes-$N-logs/kafka-counters.log | grep shared-log | tr -s ' ' | cut -d ' ' -f 4 | wc -l` 19 | if [[ $N -ne $NW ]] || [[ $N -ne $NR ]]; then 20 | echo "Invalid number of counter records" 21 | fi 22 | SUMR=`echo $READS | tr ' ' '+' | bc -l` 23 | SUMW=`echo $WRITES | tr ' ' '+'` 24 | W=`echo "scale=2;($SUMW+0)/$NW" | bc -l` 25 | echo "$N $SUMR $W" 26 | done 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /scripts/install_dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MYUSER=`whoami` 4 | 5 | # Docker - https://docs.docker.com/engine/install/ubuntu/ 6 | sudo apt-get update 7 | sudo apt-get install \ 8 | ca-certificates \ 9 | curl \ 10 | gnupg \ 11 | lsb-release 12 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg 13 | echo \ 14 | "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ 15 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 16 | sudo apt-get update 17 | sudo apt-get -y install docker-ce docker-ce-cli containerd.io 18 | sudo adduser $MYUSER docker 19 | 20 | # OpenVSwitch, KafkaCat, jq, make, gcc, java, pip 21 | sudo apt -y install openvswitch-switch kafkacat jq make gcc default-jdk python3-pip 22 | 23 | # BigchainDB driver 24 | pip3 install bigchaindb_driver==0.6.2 25 | # update timeout from 20s to 120s 26 | cp ../BigchainDB/driver.py .local/lib/python3.6/site-packages/bigchaindb_driver/driver.py 27 | 28 | # Go 1.15.6 29 | mkdir -p temp 30 | cd temp 31 | wget https://golang.org/dl/go1.15.6.linux-amd64.tar.gz 32 | tar xf go1.15.6.linux-amd64.tar.gz 33 | mkdir gopath 34 | GOROOT=`pwd`/go 35 | GOPATH=`pwd`/gopath 36 | echo "" >> /home/$MYUSER/.bashrc 37 | echo "export GOROOT=$GOROOT" >> /home/$MYUSER/.bashrc 38 | echo "export GOPATH=$GOPATH" >> /home/$MYUSER/.bashrc 39 | echo "export PATH=$PATH:$GOROOT/bin" >> /home/$MYUSER/.bashrc 40 | 41 | echo "*** Please log out or reboot your system!" -------------------------------------------------------------------------------- /scripts/kill_containers_bigchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # kill and remove docker containes 4 | # 5 | 6 | IMGNAME="bigchaindb" 7 | PREFIX="bigchaindb" 8 | 9 | idx=1 10 | for id in `docker ps | grep $PREFIX | cut -d ' ' -f 1`; do 11 | echo "$PREFIX$idx" 12 | idx=$(($idx+1)) 13 | docker kill $id 14 | docker rm $id 15 | done 16 | -------------------------------------------------------------------------------- /scripts/kill_containers_blockchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # kill and remove docker containes 4 | # 5 | 6 | IMGNAME="blockchaindb" 7 | PREFIX="blockchaindb" 8 | 9 | idx=1 10 | for id in `docker ps -a| grep $PREFIX | cut -d ' ' -f 1`; do 11 | echo "$PREFIX$idx" 12 | idx=$(($idx+1)) 13 | docker kill $id 14 | docker rm $id 15 | echo '' 16 | done 17 | 18 | # idx=1 19 | # for id in `docker ps | grep "redis-shard" | cut -d ' ' -f 1`; do 20 | # echo "redis-shard$idx" 21 | # idx=$(($idx+1)) 22 | # docker kill $id 23 | # docker rm $id 24 | # echo '' 25 | # done 26 | -------------------------------------------------------------------------------- /scripts/kill_containers_veritas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # kill and remove docker containes 4 | # 5 | 6 | IMGNAME="veritas" 7 | PREFIX="veritas" 8 | 9 | idx=1 10 | for id in `docker ps | grep $PREFIX | cut -d ' ' -f 1`; do 11 | echo "$PREFIX$idx" 12 | idx=$(($idx+1)) 13 | docker kill $id 14 | docker rm $id 15 | done 16 | -------------------------------------------------------------------------------- /scripts/multi_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Run command on multiple nodes 4 | # 5 | 6 | . ./env.sh 7 | 8 | N=$DEFAULT_NODES 9 | START=2 10 | END=$(($START+$N-1)) 11 | 12 | function reset_term_color { 13 | echo -e "\e[0m" 14 | } 15 | 16 | if [[ $# -eq 0 ]] ; then 17 | echo "usage: $0 [ ... ]" 18 | exit 19 | fi 20 | 21 | rand=$RANDOM 22 | hosts="" 23 | domain="" 24 | for i in `seq $START $END`; do 25 | hosts="$hosts $IPPREFIX.$i" 26 | done 27 | 28 | #echo "Running $@ on $hosts" 29 | 30 | for host in $hosts; do 31 | echo -e "\033[31m$host\e[0m"> qres-$host-$rand.log ; 32 | ssh -o StrictHostKeyChecking=no root@$host$domain $@ >> qres-$host-$rand.log & 33 | done 34 | 35 | wait 36 | 37 | for host in $hosts ; do 38 | cat qres-$host-$rand.log 39 | rm qres-$host-$rand.log & 40 | done 41 | 42 | wait 43 | 44 | reset_term_color -------------------------------------------------------------------------------- /scripts/parse_bigchaindb_blocksize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 1 ]; then 4 | echo "Usage: $0 " 5 | exit 1 6 | fi 7 | LOGS=$1 8 | 9 | . ./env.sh 10 | 11 | NODES=$DEFAULT_NODES 12 | 13 | for CLI in $THREADS; do 14 | AVG=0 15 | for IDX in `seq 1 $NODES`; do 16 | SUM=`cat $LOGS/logs-bigchaindb-clients-$CLI/node-$IDX/tendermint.log | grep validTxs | tr -s ' ' | cut -d ' ' -f 6 | cut -d '=' -f 2 | tr '\n' '+'` 17 | N=`cat $LOGS/logs-bigchaindb-clients-$CLI/node-$IDX/tendermint.log | grep validTxs | wc -l` 18 | AVG=`echo "$AVG+("$SUM"0)/$N" | bc -l` 19 | done 20 | THR=`cat $LOGS/bigchaindb-clients-$CLI.txt | grep Throughput | cut -d ' ' -f 5` 21 | LAT=`cat $LOGS/bigchaindb-clients-$CLI.txt | grep Latency | cut -d ' ' -f 2` 22 | AVGBLKSIZE=`echo "$AVG/$NODES.0" | bc -l` 23 | echo "$CLI;$THR;$LAT;$AVGBLKSIZE" 24 | done 25 | -------------------------------------------------------------------------------- /scripts/parse_bigchaindb_profiles.py: -------------------------------------------------------------------------------- 1 | import pstats 2 | import sys 3 | 4 | if len(sys.argv) < 2: 5 | print("Usage: {} ".format(sys.argv[0])) 6 | sys.exit(-1) 7 | 8 | 9 | p = pstats.Stats(sys.argv[1]) 10 | # p.strip_dirs().sort_stats('cumulative').print_stats(30) 11 | p.sort_stats('cumulative').print_stats(30) 12 | -------------------------------------------------------------------------------- /scripts/restart_cluster_bigchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | N=$DEFAULT_NODES 6 | 7 | if [ $# -gt 0 ]; then 8 | N=$1 9 | else 10 | echo -e "Usage: $0 <# containers>" 11 | echo -e "\tDefault: $N containers" 12 | fi 13 | 14 | sudo ./unset_ovs_bigchaindb.sh $N 15 | ./kill_containers_bigchaindb.sh $N 16 | ./start_containers_bigchaindb.sh $N 17 | sudo ./set_ovs_bigchaindb.sh $N -------------------------------------------------------------------------------- /scripts/restart_cluster_blockchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | N=${1:-4} 4 | 5 | 6 | sudo ./unset_ovs_blockchaindb.sh $N 7 | ./kill_containers_blockchaindb.sh 8 | sleep 2 9 | ./start_containers_blockchaindb.sh $N 10 | sleep 2 11 | sudo ./set_ovs_blockchaindb.sh $N -------------------------------------------------------------------------------- /scripts/restart_cluster_veritas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | # +1 nodes for Kafka 6 | N=$(($DEFAULT_NODES + 1)) 7 | 8 | if [ $# -gt 0 ]; then 9 | N=$1 10 | else 11 | echo -e "Usage: $0 <# containers>" 12 | echo -e "\tDefault: $N containers" 13 | fi 14 | 15 | sudo ./unset_ovs_veritas.sh $N 16 | ./kill_containers_veritas.sh $N 17 | ./start_containers_veritas.sh $N 18 | sudo ./set_ovs_veritas.sh $N -------------------------------------------------------------------------------- /scripts/run_benchmark_bigchaindb_clients.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | N=$DEFAULT_NODES 8 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 9 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 10 | 11 | # Generate server addresses. BigchainDB port is 9984 12 | ADDRS="http://$IPPREFIX.2:9984" 13 | for IDX in `seq 3 $(($N+1))`; do 14 | ADDRS="$ADDRS,http://$IPPREFIX.$IDX:9984" 15 | done 16 | 17 | TSTAMP=`date +%F-%H-%M-%S` 18 | LOGSD="logs-clients-bigchaindb-$TSTAMP" 19 | mkdir $LOGSD 20 | 21 | cd .. 22 | RDIR=`pwd` 23 | cd scripts 24 | 25 | function copy_logs { 26 | DEST=$1 27 | mkdir -p $DEST 28 | for IDX in `seq 2 $(($N+1))`; do 29 | DEST_NODE=$DEST/node-$(($IDX-1)) 30 | mkdir -p $DEST_NODE 31 | scp root@$IPPREFIX.$IDX:bigchaindb* $DEST_NODE/ 32 | scp root@$IPPREFIX.$IDX:mongodb.log $DEST_NODE/ 33 | scp root@$IPPREFIX.$IDX:tendermint.log $DEST_NODE/ 34 | done 35 | } 36 | 37 | # Threads list is defined in env.sh 38 | for TH in $THREADS; do 39 | ./restart_cluster_bigchaindb.sh 40 | ./start_bigchaindb.sh 41 | sleep 10 42 | python3 $RDIR/BigchainDB/bench.py $WORKLOAD_FILE $WORKLOAD_RUN_FILE $ADDRS $TH 2>&1 | tee $LOGSD/bigchaindb-clients-$TH.txt 43 | copy_logs $LOGSD/logs-bigchaindb-clients-$TH 44 | ./stop_bigchaindb.sh 45 | done 46 | # ./restart_cluster_bigchaindb.sh 47 | -------------------------------------------------------------------------------- /scripts/run_benchmark_bigchaindb_distribution.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGSD="logs-distribution-bigchain-$TSTAMP" 9 | mkdir $LOGSD 10 | 11 | N=$DEFAULT_NODES 12 | THREADS=$DEFAULT_THREADS_BIGCHAINDB 13 | 14 | # Generate server addresses. BigchainDB port is 9984 15 | ADDRS="http://$IPPREFIX.2:9984" 16 | for IDX in `seq 3 $(($N+1))`; do 17 | ADDRS="$ADDRS,http://$IPPREFIX.$IDX:9984" 18 | done 19 | 20 | cd .. 21 | RDIR=`pwd` 22 | cd scripts 23 | 24 | function copy_logs { 25 | DEST=$1 26 | mkdir -p $DEST 27 | for IDX in `seq 2 $(($N+1))`; do 28 | DEST_NODE=$DEST/node-$(($IDX-1)) 29 | mkdir -p $DEST_NODE 30 | scp root@$IPPREFIX.$IDX:bigchaindb* $DEST_NODE/ 31 | scp root@$IPPREFIX.$IDX:mongodb.log $DEST_NODE/ 32 | scp root@$IPPREFIX.$IDX:tendermint.log $DEST_NODE/ 33 | done 34 | } 35 | 36 | # Uniform 37 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 38 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 39 | 40 | ./restart_cluster_bigchaindb.sh 41 | ./start_bigchaindb.sh 42 | sleep 5 43 | python3 $RDIR/BigchainDB/bench.py $WORKLOAD_FILE $WORKLOAD_RUN_FILE $ADDRS $THREADS 2>&1 | tee $LOGSD/bigchaindb-uniform.txt 44 | copy_logs $LOGSD/logs-bigchaindb-uniform 45 | 46 | # Latest 47 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH""_latest/$DEFAULT_WORKLOAD".dat 48 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH""_latest/run_$DEFAULT_WORKLOAD".dat 49 | 50 | ./restart_cluster_bigchaindb.sh 51 | ./start_bigchaindb.sh 52 | sleep 5 53 | python3 $RDIR/BigchainDB/bench.py $WORKLOAD_FILE $WORKLOAD_RUN_FILE $ADDRS $THREADS 2>&1 | tee $LOGSD/bigchaindb-latest.txt 54 | copy_logs $LOGSD/logs-bigchaindb-latest 55 | 56 | # Zipfian 57 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH""_zipfian/$DEFAULT_WORKLOAD".dat 58 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH""_zipfian/run_$DEFAULT_WORKLOAD".dat 59 | 60 | ./restart_cluster_bigchaindb.sh 61 | ./start_bigchaindb.sh 62 | sleep 5 63 | python3 $RDIR/BigchainDB/bench.py $WORKLOAD_FILE $WORKLOAD_RUN_FILE $ADDRS $THREADS 2>&1 | tee $LOGSD/bigchaindb-zipfian.txt 64 | copy_logs $LOGSD/logs-bigchaindb-zipfian 65 | ./stop_bigchaindb.sh 66 | -------------------------------------------------------------------------------- /scripts/run_benchmark_bigchaindb_networking.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | BWS="NoLimit 10000 1000 100" 8 | RTTS="5ms 10ms 20ms 30ms 40ms 50ms 60ms" 9 | 10 | TSTAMP=`date +%F-%H-%M-%S` 11 | LOGS="logs-networking-bigchaindb-$TSTAMP" 12 | mkdir $LOGS 13 | 14 | N=$DEFAULT_NODES 15 | THREADS=$DEFAULT_THREADS_BIGCHAINDB 16 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 17 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 18 | 19 | # Generate server addresses. BigchainDB port is 9984 20 | ADDRS="http://$IPPREFIX.2:9984" 21 | for IDX in `seq 3 $(($N+1))`; do 22 | ADDRS="$ADDRS,http://$IPPREFIX.$IDX:9984" 23 | done 24 | 25 | cd .. 26 | RDIR=`pwd` 27 | cd scripts 28 | 29 | for BW in $BWS; do 30 | for RTT in $RTTS; do 31 | LOGSD="$LOGS/logs-$BW-$RTT" 32 | mkdir -p $LOGSD 33 | ./restart_cluster_bigchaindb.sh 34 | if [[ "$BW" != "NoLimit" ]]; then 35 | sudo ./set_ovs_bw_limit.sh $BW 1 36 | fi 37 | ./set_tc.sh $RTT 38 | sleep 3 39 | ./start_bigchaindb.sh 40 | ./run_iperf_ping.sh 2>&1 | tee $LOGSD/net.txt 41 | sleep 3 42 | python3 $RDIR/BigchainDB/bench.py $WORKLOAD_FILE $WORKLOAD_RUN_FILE $ADDRS $THREADS 2>&1 | tee $LOGS/bigchaindb-$BW-$RTT.txt 43 | done 44 | done 45 | -------------------------------------------------------------------------------- /scripts/run_benchmark_bigchaindb_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../env.sh 4 | 5 | set -x 6 | 7 | THREADS=$DEFAULT_THREADS_BIGCHAINDB 8 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 9 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 10 | 11 | TSTAMP=`date +%F-%H-%M-%S` 12 | LOGSD="logs-nodes-bigchaindb-$TSTAMP" 13 | mkdir $LOGSD 14 | 15 | cd .. 16 | RDIR=`pwd` 17 | cd scripts 18 | 19 | for N in $NODES; do 20 | ./restart_cluster_bigchaindb.sh $N 21 | ./start_bigchaindb.sh $N 22 | 23 | ADDRS="http://$IPPREFIX:9984" 24 | for I in `seq 3 $(($N+1))`; do 25 | ADDRS="$ADDRS,http://$IPPREFIX.$I:9984" 26 | done 27 | 28 | python3 $RDIR/BigchainDB/bench.py $WORKLOAD_FILE $WORKLOAD_RUN_FILE $ADDRS $THREADS 2>&1 | tee $LOGSD/bigchaindb-nodes-$N.txt 29 | done 30 | -------------------------------------------------------------------------------- /scripts/run_benchmark_bigchaindb_proctime.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGSD="logs-txdelay-bigchaindb-$TSTAMP" 9 | mkdir $LOGSD 10 | 11 | N=$DEFAULT_NODES 12 | THREADS=$DEFAULT_THREADS_BIGCHAINDB 13 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 14 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 15 | 16 | # Generate server addresses. BigchainDB port is 9984 17 | ADDRS="http://$IPPREFIX.2:9984" 18 | for IDX in `seq 3 $(($N+1))`; do 19 | ADDRS="$ADDRS,http://$IPPREFIX.$IDX:9984" 20 | done 21 | 22 | cd .. 23 | RDIR=`pwd` 24 | cd scripts 25 | 26 | for TXD in $TXDELAYS; do 27 | ./restart_cluster_bigchaindb.sh 28 | ./start_bigchaindb.sh $N $TXD 29 | sleep 5 30 | python3 $RDIR/BigchainDB/bench.py $WORKLOAD_FILE $WORKLOAD_RUN_FILE $ADDRS $THREADS 2>&1 | tee $LOGSD/bigchaindb-txdelay-$TXD.txt 31 | done 32 | ./stop_bigchaindb.sh 33 | -------------------------------------------------------------------------------- /scripts/run_benchmark_bigchaindb_recordsize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGSD="logs-txsizes-bigchaindb-$TSTAMP" 9 | mkdir $LOGSD 10 | 11 | N=$DEFAULT_NODES 12 | THREADS=$DEFAULT_THREADS_BIGCHAINDB 13 | 14 | # Generate server addresses. BigchainDB port is 9984 15 | ADDRS="http://$IPPREFIX.2:9984" 16 | for IDX in `seq 3 $(($N+1))`; do 17 | ADDRS="$ADDRS,http://$IPPREFIX.$IDX:9984" 18 | done 19 | 20 | cd .. 21 | RDIR=`pwd` 22 | cd scripts 23 | 24 | for TXSIZE in $TXSIZES; do 25 | ./restart_cluster_bigchaindb.sh 26 | ./start_bigchaindb.sh 27 | sleep 5 28 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH"_"$TXSIZE/$DEFAULT_WORKLOAD".dat 29 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH"_"$TXSIZE/run_$DEFAULT_WORKLOAD".dat 30 | python3 $RDIR/BigchainDB/bench.py $WORKLOAD_FILE $WORKLOAD_RUN_FILE $ADDRS $THREADS 2>&1 | tee $LOGSD/bigchaindb-txsize-$TXSIZE.txt 31 | done 32 | ./stop_bigchaindb.sh 33 | -------------------------------------------------------------------------------- /scripts/run_benchmark_bigchaindb_workload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGSD="logs-workload-bigchaindb-$TSTAMP" 9 | mkdir $LOGSD 10 | 11 | N=$DEFAULT_NODES 12 | THREADS=$DEFAULT_THREADS_BIGCHAINDB 13 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 14 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 15 | 16 | # Generate server addresses. BigchainDB port is 9984 17 | ADDRS="http://$IPPREFIX.2:9984" 18 | for IDX in `seq 3 $(($N+1))`; do 19 | ADDRS="$ADDRS,http://$IPPREFIX.$IDX:9984" 20 | done 21 | 22 | cd .. 23 | RDIR=`pwd` 24 | cd scripts 25 | 26 | for W in $WORKLOADS; do 27 | ./restart_cluster_bigchaindb.sh 28 | ./start_bigchaindb.sh 29 | sleep 5 30 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$W".dat 31 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$W".dat 32 | python3 $RDIR/BigchainDB/bench.py $WORKLOAD_FILE $WORKLOAD_RUN_FILE $ADDRS $THREADS 2>&1 | tee $LOGSD/bigchaindb-workload-$W.txt 33 | done 34 | ./stop_bigchaindb.sh 35 | -------------------------------------------------------------------------------- /scripts/run_benchmark_blockchaindb_blocksize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TSTAMP=`date +%F-%H-%M-%S` 4 | LOGSD="logs-blksize-blockchaindb-$TSTAMP" 5 | mkdir $LOGSD 6 | 7 | set -x 8 | 9 | size=${1:-4} 10 | clients=${2:-256} 11 | workload=${3:-a} 12 | distribution=${4:-ycsb_data} 13 | shards=${5:-1} 14 | ndrivers=${size} 15 | nthreads=$(( ${clients} / ${ndrivers} )) 16 | 17 | dir=$(pwd) 18 | echo $dir 19 | bin="$dir/../BlockchainDB/.bin/benchmark_bcdb" 20 | defaultAddrs="192.168.20.2:50001" 21 | loadPath="$dir/temp/${distribution}/workload${workload}.dat" 22 | runPath="$dir/temp/${distribution}/run_workload${workload}.dat" 23 | 24 | if [ ! -f ${bin} ]; then 25 | echo "Binary file ${bin} not found!" 26 | echo "Hint: " 27 | echo " Please build binaries by run command: " 28 | echo " cd ../BlockchainDB" 29 | echo " make build " 30 | echo " make docker (if never build blockchaindb image before)" 31 | echo " cd -" 32 | echo "exit 1 " 33 | exit 1 34 | fi 35 | 36 | for (( c=2; c<=${size}; c++ )) 37 | do 38 | defaultAddrs="${defaultAddrs},192.168.20.$((1+ ${c})):50001" 39 | done 40 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 41 | 42 | # group 1 43 | #nDURATIONS="1 5 10 15" 44 | #nGASLIMITS="10000000 100000000" 45 | # group 2 46 | nDURATIONS="5" 47 | nGASLIMITS="100000000 80000000 60000000 40000000 20000000 10000000" 48 | 49 | for GAS in $nGASLIMITS; do 50 | for TH in $nDURATIONS; do 51 | echo "Test start with node size: ${size}, client size: ${clients}, workload${workload}" 52 | ./restart_cluster_blockchaindb.sh 53 | ./start_blockchaindb.sh ${shards} ${size} ${TH} ${GAS} 54 | sleep 6 55 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} > $LOGSD/blockchaindb-blk-duration-${GAS}-${TH}.txt 2>&1 56 | done 57 | done 58 | 59 | -------------------------------------------------------------------------------- /scripts/run_benchmark_blockchaindb_clients.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TSTAMP=`date +%F-%H-%M-%S` 4 | LOGSD="logs-clients-blockchaindb-$TSTAMP" 5 | mkdir $LOGSD 6 | 7 | set -x 8 | 9 | size=${1:-4} 10 | clients=${2:-256} 11 | workload=${3:-a} 12 | distribution=${4:-ycsb_data} 13 | shards=${5:-1} 14 | ndrivers=${size} 15 | 16 | dir=$(pwd) 17 | echo $dir 18 | bin="$dir/../BlockchainDB/.bin/benchmark_bcdb" 19 | defaultAddrs="192.168.20.2:50001" 20 | loadPath="$dir/temp/${distribution}/workload${workload}.dat" 21 | runPath="$dir/temp/${distribution}/run_workload${workload}.dat" 22 | 23 | if [ ! -f ${bin} ]; then 24 | echo "Binary file ${bin} not found!" 25 | echo "Hint: " 26 | echo " Please build binaries by run command: " 27 | echo " cd ../BlockchainDB" 28 | echo " make build " 29 | echo " make docker (if never build blockchaindb image before)" 30 | echo " cd -" 31 | echo "exit 1 " 32 | exit 1 33 | fi 34 | 35 | for (( c=2; c<=${size}; c++ )) 36 | do 37 | defaultAddrs="${defaultAddrs},192.168.20.$((1+ ${c})):50001" 38 | done 39 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 40 | 41 | 42 | nCLIENTS="4 8 16 32 64 128 192 256" 43 | 44 | for TH in $nCLIENTS; do 45 | echo "Test start with node size: ${size}, client size: ${TH}, workload${workload}" 46 | nthreads=$(( ${TH} / ${ndrivers} )) 47 | ./restart_cluster_blockchaindb.sh 48 | ./start_blockchaindb.sh 49 | sleep 6 50 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} > $LOGSD/blockchaindb-clients-$TH.txt 2>&1 51 | done 52 | 53 | -------------------------------------------------------------------------------- /scripts/run_benchmark_blockchaindb_distribution.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TSTAMP=`date +%F-%H-%M-%S` 4 | LOGSD="logs-distribution-blockchaindb-$TSTAMP" 5 | mkdir $LOGSD 6 | 7 | set -x 8 | 9 | size=${1:-4} 10 | clients=${2:-256} 11 | workload=${3:-a} 12 | distribution=${4:-ycsb_data} 13 | shards=${5:-1} 14 | ndrivers=${size} 15 | 16 | dir=$(pwd) 17 | echo $dir 18 | bin="$dir/../BlockchainDB/.bin/benchmark_bcdb" 19 | defaultAddrs="192.168.20.2:50001" 20 | nthreads=$(( ${clients} / ${ndrivers} )) 21 | 22 | if [ ! -f ${bin} ]; then 23 | echo "Binary file ${bin} not found!" 24 | echo "Hint: " 25 | echo " Please build binaries by run command: " 26 | echo " cd ../BlockchainDB" 27 | echo " make build " 28 | echo " make docker (if never build blockchaindb image before)" 29 | echo " cd -" 30 | echo "exit 1 " 31 | exit 1 32 | fi 33 | 34 | for (( c=2; c<=${size}; c++ )) 35 | do 36 | defaultAddrs="${defaultAddrs},192.168.20.$((1+ ${c})):50001" 37 | done 38 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 39 | 40 | 41 | nDISTRIBUTIONS="ycsb_data ycsb_data_latest ycsb_data_zipfian" 42 | 43 | for TH in $nDISTRIBUTIONS; do 44 | echo "Test start with node size: ${size}, client size: ${clients}, workload${workload}, distribution: ${TH}" 45 | loadPath="$dir/temp/${TH}/workload${workload}.dat" 46 | runPath="$dir/temp/${TH}/run_workload${workload}.dat" 47 | ./restart_cluster_blockchaindb.sh 48 | ./start_blockchaindb.sh 49 | sleep 6 50 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} > $LOGSD/blockchaindb-distribution-$TH.txt 2>&1 51 | done 52 | 53 | -------------------------------------------------------------------------------- /scripts/run_benchmark_blockchaindb_networking.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TSTAMP=`date +%F-%H-%M-%S` 4 | LOGSD="logs-networking-blockchaindb-$TSTAMP" 5 | mkdir $LOGSD 6 | 7 | set -x 8 | 9 | size=${1:-4} 10 | clients=${2:-256} 11 | workload=${3:-a} 12 | distribution=${4:-ycsb_data} 13 | shards=${5:-1} 14 | ndrivers=${size} 15 | DURATION=5 16 | GAS=10000000 17 | 18 | dir=$(pwd) 19 | echo $dir 20 | bin="$dir/../BlockchainDB/.bin/benchmark_bcdb" 21 | defaultAddrs="192.168.20.2:50001" 22 | nthreads=$(( ${clients} / ${ndrivers} )) 23 | loadPath="$dir/temp/${distribution}/workload${workload}.dat" 24 | runPath="$dir/temp/${distribution}/run_workload${workload}.dat" 25 | 26 | if [ ! -f ${bin} ]; then 27 | echo "Binary file ${bin} not found!" 28 | echo "Hint: " 29 | echo " Please build binaries by run command: " 30 | echo " cd ../BlockchainDB" 31 | echo " make build " 32 | echo " make docker (if never build blockchaindb image before)" 33 | echo " cd -" 34 | echo "exit 1 " 35 | exit 1 36 | fi 37 | 38 | for (( c=2; c<=${size}; c++ )) 39 | do 40 | defaultAddrs="${defaultAddrs},192.168.20.$((1+ ${c})):50001" 41 | done 42 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 43 | 44 | BWS="NoLimit 10000 1000 100" 45 | RTTS="5ms 10ms 20ms 30ms 40ms 50ms 60ms" 46 | 47 | for BW in $BWS; do 48 | for RTT in $RTTS; do 49 | LOGSDD="$LOGS/logs-$BW-$RTT" 50 | mkdir $LOGSDD 51 | echo "Test start with node size: ${size}, client size: ${clients}, workload${workload}, TxSize: ${TH}" 52 | ./restart_cluster_blockchaindb.sh 53 | if [[ "$BW" != "NoLimit" ]]; then 54 | sudo ./set_ovs_bs_limit.sh $BW 1 55 | fi 56 | ./set_tc.sh $RTT 57 | sleep 3 58 | ./start_blockchaindb.sh 59 | ./run_iperf_ping.sh 2>&1 | tee $LOGSDD/net.txt 60 | sleep 3 61 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} > $LOGSD/blockchaindb--$BW-$RTT.txt 2>&1 62 | done 63 | done 64 | 65 | -------------------------------------------------------------------------------- /scripts/run_benchmark_blockchaindb_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TSTAMP=`date +%F-%H-%M-%S` 4 | LOGSD="logs-nodes-blockchaindb-$TSTAMP" 5 | mkdir $LOGSD 6 | 7 | set -x 8 | 9 | size=${1:-4} 10 | clients=${2:-256} 11 | workload=${3:-a} 12 | distribution=${4:-ycsb_data} 13 | shards=${5:-1} 14 | nthreads=$(( ${clients} / ${ndrivers} )) 15 | 16 | dir=$(pwd) 17 | echo $dir 18 | bin="$dir/../BlockchainDB/.bin/benchmark_bcdb" 19 | defaultAddrs="192.168.20.2:50001" 20 | loadPath="$dir/temp/${distribution}/workload${workload}.dat" 21 | runPath="$dir/temp/${distribution}/run_workload${workload}.dat" 22 | 23 | if [ ! -f ${bin} ]; then 24 | echo "Binary file ${bin} not found!" 25 | echo "Hint: " 26 | echo " Please build binaries by run command: " 27 | echo " cd ../BlockchainDB" 28 | echo " make build " 29 | echo " make docker (if never build blockchaindb image before)" 30 | echo " cd -" 31 | echo "exit 1 " 32 | exit 1 33 | fi 34 | 35 | 36 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 37 | 38 | 39 | nNODES="4 8 16 32 64" 40 | 41 | for TH in $nNODES; do 42 | size=${TH} 43 | # init 44 | defaultAddrs="192.168.20.2:50001" 45 | for (( c=2; c<=${size}; c++ )) 46 | do 47 | defaultAddrs="${defaultAddrs},192.168.20.$((1+ ${c})):50001" 48 | done 49 | 50 | echo "Test start with node size: ${size}, client size: ${clients}, workload${workload}" 51 | ndrivers=${TH} 52 | nthreads=$(( ${clients} / ${ndrivers} )) 53 | ./restart_cluster_blockchaindb.sh ${TH} 54 | ./start_blockchaindb.sh ${shards} ${TH} 55 | sleep 10 56 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} > $LOGSD/blockchaindb-nodes-$TH.txt 2>&1 57 | done 58 | 59 | -------------------------------------------------------------------------------- /scripts/run_benchmark_blockchaindb_proctime.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TSTAMP=`date +%F-%H-%M-%S` 4 | LOGSD="logs-txdelay-blockchaindb-$TSTAMP" 5 | mkdir $LOGSD 6 | 7 | set -x 8 | 9 | size=${1:-4} 10 | clients=${2:-256} 11 | workload=${3:-a} 12 | distribution=${4:-ycsb_data} 13 | shards=${5:-1} 14 | ndrivers=${size} 15 | DURATION=5 16 | GAS=10000000 17 | 18 | dir=$(pwd) 19 | echo $dir 20 | bin="$dir/../BlockchainDB/.bin/benchmark_bcdb" 21 | defaultAddrs="192.168.20.2:50001" 22 | nthreads=$(( ${clients} / ${ndrivers} )) 23 | loadPath="$dir/temp/${distribution}/workload${workload}.dat" 24 | runPath="$dir/temp/${distribution}/run_workload${workload}.dat" 25 | 26 | if [ ! -f ${bin} ]; then 27 | echo "Binary file ${bin} not found!" 28 | echo "Hint: " 29 | echo " Please build binaries by run command: " 30 | echo " cd ../BlockchainDB" 31 | echo " make build " 32 | echo " make docker (if never build blockchaindb image before)" 33 | echo " cd -" 34 | echo "exit 1 " 35 | exit 1 36 | fi 37 | 38 | for (( c=2; c<=${size}; c++ )) 39 | do 40 | defaultAddrs="${defaultAddrs},192.168.20.$((1+ ${c})):50001" 41 | done 42 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 43 | 44 | 45 | TXDELAYS="0 10 100 1000" 46 | 47 | for TH in $TXDELAYS; do 48 | echo "Test start with node size: ${size}, client size: ${clients}, workload${workload}, TxSize: ${TH}" 49 | ./restart_cluster_blockchaindb.sh 50 | ./start_blockchaindb.sh ${shards} ${size} ${DURATION} ${GAS} ${TH} 51 | sleep 10 52 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} > $LOGSD/blockchaindb-txdelay-$TH.txt 2>&1 53 | done 54 | 55 | -------------------------------------------------------------------------------- /scripts/run_benchmark_blockchaindb_recordsize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TSTAMP=`date +%F-%H-%M-%S` 4 | LOGSD="logs-txsize-blockchaindb-$TSTAMP" 5 | mkdir $LOGSD 6 | 7 | set -x 8 | 9 | size=${1:-4} 10 | clients=${2:-256} 11 | workload=${3:-a} 12 | distribution=${4:-ycsb_data} 13 | shards=${5:-1} 14 | ndrivers=${size} 15 | 16 | dir=$(pwd) 17 | echo $dir 18 | bin="$dir/../BlockchainDB/.bin/benchmark_bcdb" 19 | defaultAddrs="192.168.20.2:50001" 20 | nthreads=$(( ${clients} / ${ndrivers} )) 21 | 22 | if [ ! -f ${bin} ]; then 23 | echo "Binary file ${bin} not found!" 24 | echo "Hint: " 25 | echo " Please build binaries by run command: " 26 | echo " cd ../BlockchainDB" 27 | echo " make build " 28 | echo " make docker (if never build blockchaindb image before)" 29 | echo " cd -" 30 | echo "exit 1 " 31 | exit 1 32 | fi 33 | 34 | for (( c=2; c<=${size}; c++ )) 35 | do 36 | defaultAddrs="${defaultAddrs},192.168.20.$((1+ ${c})):50001" 37 | done 38 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 39 | 40 | 41 | DURATION=5 42 | # default gas_limit is 10 000 000, records of "ycsb_data_32kB ycsb_data_128kB" exceed limits 43 | GAS=10000000 44 | nTXSIZES="ycsb_data_512B ycsb_data_2kB ycsb_data_8kB" 45 | 46 | # increase gas_limit for tx_size tests 47 | #GAS=100000000 48 | #nTXSIZES="ycsb_data_512B ycsb_data_2kB ycsb_data_8kB ycsb_data_32kB ycsb_data_128kB" 49 | 50 | for TH in $nTXSIZES; do 51 | echo "Test start with node size: ${size}, client size: ${clients}, workload${workload}, TxSize: ${TH}" 52 | loadPath="$dir/temp/${TH}/workload${workload}.dat" 53 | runPath="$dir/temp/${TH}/run_workload${workload}.dat" 54 | ./restart_cluster_blockchaindb.sh 55 | ./start_blockchaindb.sh ${shards} ${size} ${DURATION} ${GAS} 56 | sleep 10 57 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} > $LOGSD/blockchaindb-txsize-$TH.txt 2>&1 58 | done 59 | 60 | -------------------------------------------------------------------------------- /scripts/run_benchmark_blockchaindb_sharding.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TSTAMP=`date +%F-%H-%M-%S` 4 | LOGSD="logs-sharding-blockchaindb-$TSTAMP" 5 | mkdir $LOGSD 6 | 7 | set -x 8 | 9 | size=${1:-4} 10 | clients=${2:-256} 11 | workload=${3:-a} 12 | distribution=${4:-ycsb_data} 13 | 14 | dir=$(pwd) 15 | echo $dir 16 | bin="$dir/../BlockchainDB/.bin/benchmark_bcdb" 17 | defaultAddrs="192.168.20.2:50001" 18 | loadPath="$dir/temp/${distribution}/workload${workload}.dat" 19 | runPath="$dir/temp/${distribution}/run_workload${workload}.dat" 20 | 21 | if [ ! -f ${bin} ]; then 22 | echo "Binary file ${bin} not found!" 23 | echo "Hint: " 24 | echo " Please build binaries by run command: " 25 | echo " cd ../BlockchainDB" 26 | echo " make build " 27 | echo " make docker (if never build blockchaindb image before)" 28 | echo " cd -" 29 | echo "exit 1 " 30 | exit 1 31 | fi 32 | 33 | 34 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 35 | 36 | 37 | nSHARDS="1 2 4 8 16" 38 | 39 | for TH in $nSHARDS; do 40 | shardnodes=$((${size} * ${TH})) 41 | ndrivers=${shardnodes} 42 | nthreads=$(( ${clients} / ${ndrivers} )) 43 | # init 44 | defaultAddrs="192.168.20.2:50001" 45 | for (( c=2; c<=${shardnodes}; c++ )) 46 | do 47 | defaultAddrs="${defaultAddrs},192.168.20.$((1+ ${c})):50001" 48 | done 49 | 50 | echo "Test start with node size: ${size}, client size: ${clients}, workload${workload}" 51 | ./restart_cluster_blockchaindb.sh ${shardnodes} 52 | sleep 4 53 | ./start_blockchaindb.sh ${TH} ${size} 54 | 55 | sleep 10 56 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} > $LOGSD/blockchaindb-sharding-$TH.txt 2>&1 57 | done 58 | -------------------------------------------------------------------------------- /scripts/run_benchmark_blockchaindb_workload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TSTAMP=`date +%F-%H-%M-%S` 4 | LOGSD="logs-workload-blockchaindb-$TSTAMP" 5 | mkdir $LOGSD 6 | 7 | set -x 8 | 9 | size=${1:-4} 10 | clients=${2:-256} 11 | workload=${3:-a} 12 | distribution=${4:-ycsb_data} 13 | shards=${5:-1} 14 | ndrivers=${size} 15 | 16 | dir=$(pwd) 17 | echo $dir 18 | bin="$dir/../BlockchainDB/.bin/benchmark_bcdb" 19 | defaultAddrs="192.168.20.2:50001" 20 | nthreads=$(( ${clients} / ${ndrivers} )) 21 | 22 | if [ ! -f ${bin} ]; then 23 | echo "Binary file ${bin} not found!" 24 | echo "Hint: " 25 | echo " Please build binaries by run command: " 26 | echo " cd ../BlockchainDB" 27 | echo " make build " 28 | echo " make docker (if never build blockchaindb image before)" 29 | echo " cd -" 30 | echo "exit 1 " 31 | exit 1 32 | fi 33 | 34 | for (( c=2; c<=${size}; c++ )) 35 | do 36 | defaultAddrs="${defaultAddrs},192.168.20.$((1+ ${c})):50001" 37 | done 38 | echo "start test with bcdbnode addrs: ${defaultAddrs}" 39 | 40 | 41 | nDISTRIBUTIONS="a b c" 42 | 43 | for TH in $nDISTRIBUTIONS; do 44 | echo "Test start with node size: ${size}, client size: ${clients}, workload${TH}" 45 | loadPath="$dir/temp/${distribution}/workload${TH}.dat" 46 | runPath="$dir/temp/${distribution}/run_workload${TH}.dat" 47 | ./restart_cluster_blockchaindb.sh 48 | ./start_blockchaindb.sh 49 | sleep 6 50 | $bin --load-path=$loadPath --run-path=$runPath --ndrivers=$ndrivers --nthreads=$nthreads --server-addrs=${defaultAddrs} > $LOGSD/blockchaindb-workload-$TH.txt 2>&1 51 | done 52 | 53 | -------------------------------------------------------------------------------- /scripts/run_benchmark_db.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Run benchmark (YCSB Workload A) locally on Redis, RediSQL, and MongoDB 4 | # 5 | 6 | if ! [ -x redis-server ]; then 7 | echo "Make sure you have redis-server binary in this folder" 8 | exit 1 9 | fi 10 | if ! [ -f redisql.so ]; then 11 | echo "Make sure you have redis-server library in this folder" 12 | exit 1 13 | fi 14 | if ! [ -x mongod ]; then 15 | echo "Make sure you have mongod binary in this folder" 16 | exit 1 17 | fi 18 | 19 | # Start servers 20 | ./redis-server --port 7777 --loadmodule ./redisql.so > redis.log 2>&1 & 21 | mkdir -p data 22 | rm -r data/* 23 | ./mongod --dbpath data > mongodb.log 2>&1 & 24 | sleep 3 25 | 26 | # Redis 27 | echo "*** Redis Workload A" 28 | ../bin/db-redis --load-path temp/ycsb_data/workloada.dat --run-path temp/ycsb_data/run_workloada.dat --nthreads 6 --redis-addr 127.0.0.1:7777 --redis-db 0 29 | echo "*** Redis Workload B" 30 | ../bin/db-redis --load-path temp/ycsb_data/workloadb.dat --run-path temp/ycsb_data/run_workloadb.dat --nthreads 6 --redis-addr 127.0.0.1:7777 --redis-db 0 31 | echo "*** Redis Workload C" 32 | ../bin/db-redis --load-path temp/ycsb_data/workloadc.dat --run-path temp/ycsb_data/run_workloadc.dat --nthreads 6 --redis-addr 127.0.0.1:7777 --redis-db 0 33 | 34 | # RedisQL 35 | echo "" 36 | echo "*** RediSQL Workload A" 37 | ../bin/db-redisql --load-path temp/ycsb_data/workloada.dat --run-path temp/ycsb_data/run_workloada.dat --nthreads 6 --redis-addr 127.0.0.1:7777 --redis-db 0 38 | echo "*** RediSQL Workload B" 39 | ../bin/db-redisql --load-path temp/ycsb_data/workloadb.dat --run-path temp/ycsb_data/run_workloadb.dat --nthreads 6 --redis-addr 127.0.0.1:7777 --redis-db 0 40 | echo "*** RediSQL Workload C" 41 | ../bin/db-redisql --load-path temp/ycsb_data/workloadc.dat --run-path temp/ycsb_data/run_workloadc.dat --nthreads 6 --redis-addr 127.0.0.1:7777 --redis-db 0 42 | 43 | # MongoDB 44 | echo "" 45 | echo "*** MongoDB Workload A" 46 | ../bin/db-mongodb --load-path temp/ycsb_data/workloada.dat --run-path temp/ycsb_data/run_workloada.dat --nthreads 6 --mongo-addr 127.0.0.1 --mongo-port 27017 47 | echo "*** MongoDB Workload B" 48 | ../bin/db-mongodb --load-path temp/ycsb_data/workloadb.dat --run-path temp/ycsb_data/run_workloadb.dat --nthreads 6 --mongo-addr 127.0.0.1 --mongo-port 27017 49 | echo "*** MongoDB Workload C" 50 | ../bin/db-mongodb --load-path temp/ycsb_data/workloadc.dat --run-path temp/ycsb_data/run_workloadc.dat --nthreads 6 --mongo-addr 127.0.0.1 --mongo-port 27017 51 | 52 | killall -9 redis-server 53 | killall -9 mongod 54 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_blocksize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | N=$(($DEFAULT_NODES + 1)) 8 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 9 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 10 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 11 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 12 | 13 | # Generate server addresses. Veritas port is 1990 14 | ADDRS="$IPPREFIX.2:1990" 15 | for IDX in `seq 3 $N`; do 16 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 17 | done 18 | 19 | TSTAMP=`date +%F-%H-%M-%S` 20 | LOGS="logs-blksize-veritas-kafka-$TSTAMP" 21 | mkdir $LOGS 22 | 23 | for BLK in $BLKSIZES; do 24 | ./restart_cluster_veritas.sh 25 | ./start_veritas_kafka.sh 5 $BLK 26 | sleep 30 27 | ../bin/veritas-kafka-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-kafka-blksize-$BLK.txt 28 | done 29 | ./stop_veritas_kafka.sh 30 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_clients.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-clients-veritas-kafka-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$(($DEFAULT_NODES + 1)) 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 13 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 14 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 15 | 16 | # Generate server addresses. Veritas port is 1990 17 | ADDRS="$IPPREFIX.2:1990" 18 | for IDX in `seq 3 $N`; do 19 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 20 | done 21 | 22 | for TH in $THREADS; do 23 | ./restart_cluster_veritas.sh 24 | ./start_veritas_kafka.sh 25 | ../bin/veritas-kafka-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$TH --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-clients-$TH.txt 26 | # copy logs 27 | SLOGS="$LOGS/veritas-clients-$TH-logs" 28 | mkdir -p $SLOGS 29 | for I in `seq 2 5`; do 30 | IDX=$(($I-1)) 31 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/veritas-$IDX.log $SLOGS/ 32 | done 33 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.6:/kafka_2.12-2.7.0/zookeeper.log $SLOGS/ 34 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.6:/kafka_2.12-2.7.0/kafka.log $SLOGS/ 35 | done 36 | ./restart_cluster_veritas.sh 37 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_clients_tso_zk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-clients-veritas-kafka-tso-zk-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$(($DEFAULT_NODES + 1)) 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 13 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 14 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 15 | 16 | # Generate server addresses. Veritas port is 1990 17 | ADDRS="$IPPREFIX.2:1990" 18 | for IDX in `seq 3 $N`; do 19 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 20 | done 21 | TSO_ADDR="$IPPREFIX.6:2181" 22 | 23 | for TH in $THREADS; do 24 | ./restart_cluster_veritas.sh 25 | ./start_veritas_kafka_tso_zk.sh 26 | ../bin/veritas-kafka-zk-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$TH --veritas-addrs=$ADDRS --tso-addr=$TSO_ADDR 2>&1 | tee $LOGS/veritas-clients-$TH.txt 27 | # copy logs 28 | SLOGS="$LOGS/veritas-clients-$TH-logs" 29 | mkdir -p $SLOGS 30 | for I in `seq 2 5`; do 31 | IDX=$(($I-1)) 32 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/veritas-$IDX.log $SLOGS/ 33 | done 34 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/kafka_2.12-2.7.0/zookeeper.log $SLOGS/ 35 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/kafka_2.12-2.7.0/kafka.log $SLOGS/ 36 | done 37 | ./restart_cluster_veritas.sh 38 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | TSTAMP=`date +%F-%H-%M-%S` 6 | LOGS="logs-database-veritas-kafka-$TSTAMP" 7 | mkdir $LOGS 8 | 9 | N=$(($DEFAULT_NODES + 1)) 10 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 11 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 12 | 13 | # Generate server addresses. Veritas port is 1990 14 | ADDRS="$IPPREFIX.2:1990" 15 | for IDX in `seq 3 $N`; do 16 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 17 | done 18 | 19 | # Redis KV 20 | for W in $WORKLOADS; do 21 | ./restart_cluster_veritas.sh 22 | ./start_veritas_kafka.sh 23 | sleep 10 24 | ../bin/veritas-kafka-bench --load-path=$DEFAULT_WORKLOAD_PATH/$W.dat --run-path=$DEFAULT_WORKLOAD_PATH/run_$W.dat --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-rediskv-$W.txt 25 | mkdir -p $LOGS/logs-rediskv-$W 26 | for IDX in `seq 2 5`; do 27 | IDXX=$(($IDX-1)) 28 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$IDX:/veritas-$IDXX.log $LOGS/logs-rediskv-$W/ 29 | done 30 | IDX=6 31 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$IDX:/kafka_2.12-2.7.0/kafka.log $LOGS/logs-rediskv-$W/ 32 | done 33 | ./stop_veritas_kafka.sh 34 | 35 | # Redis SQL 36 | for W in $WORKLOADS; do 37 | ./restart_cluster_veritas.sh 38 | ./start_veritas_kafka_redisql.sh 39 | sleep 10 40 | ../bin/veritas-kafka-bench --load-path=$DEFAULT_WORKLOAD_PATH/$W.dat --run-path=$DEFAULT_WORKLOAD_PATH/run_$W.dat --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-redisql-$W.txt 41 | mkdir -p $LOGS/logs-redisql-$W 42 | for IDX in `seq 2 5`; do 43 | IDXX=$(($IDX-1)) 44 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$IDX:/veritas-$IDXX.log $LOGS/logs-redisql-$W/ 45 | done 46 | IDX=6 47 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$IDX:/kafka_2.12-2.7.0/kafka.log $LOGS/logs-redisql-$W/ 48 | done 49 | ./stop_veritas_kafka_redisql.sh 50 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_distribution.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-distribution-veritas-kafka-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$(($DEFAULT_NODES + 1)) 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 13 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 14 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 15 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 16 | 17 | # Generate server addresses. Veritas port is 1990 18 | ADDRS="$IPPREFIX.2:1990" 19 | for IDX in `seq 3 $N`; do 20 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 21 | done 22 | 23 | function copy_logs { 24 | LOGSDIR=$1 25 | mkdir -p $LOGSDIR 26 | for I in `seq 2 5`; do 27 | IDX=$(($I-1)) 28 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/veritas-$IDX.log $LOGSDIR/ 29 | done 30 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/kafka_2.12-2.7.0/zookeeper.log $LOGSDIR/ 31 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/kafka_2.12-2.7.0/kafka.log $LOGSDIR/ 32 | } 33 | 34 | # Uniform 35 | ./restart_cluster_veritas.sh 36 | ./start_veritas_kafka.sh 37 | sleep 5 38 | ../bin/veritas-kafka-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-uniform.txt 39 | copy_logs "$LOGS/veritas-uniform-logs" 40 | 41 | # Latest 42 | ./restart_cluster_veritas.sh 43 | ./start_veritas_kafka.sh 44 | sleep 5 45 | ../bin/veritas-kafka-bench --load-path=$DEFAULT_WORKLOAD_PATH"_latest/"$DEFAULT_WORKLOAD".dat" --run-path=$DEFAULT_WORKLOAD_PATH"_latest/run_"$DEFAULT_WORKLOAD".dat" --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-latest.txt 46 | copy_logs "$LOGS/veritas-latest-logs" 47 | 48 | # Zipfian 49 | ./restart_cluster_veritas.sh 50 | ./start_veritas_kafka.sh 51 | sleep 5 52 | ../bin/veritas-kafka-bench --load-path=$DEFAULT_WORKLOAD_PATH"_zipfian/"$DEFAULT_WORKLOAD".dat" --run-path=$DEFAULT_WORKLOAD_PATH"_zipfian/run_"$DEFAULT_WORKLOAD".dat" --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-zipfian.txt 53 | copy_logs "$LOGS/veritas-zipfian-logs" 54 | 55 | ./stop_veritas_kafka.sh 56 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_networking.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | BWS="NoLimit 10000 1000 100" 8 | RTTS="5ms 10ms 20ms 30ms 40ms 50ms 60ms" 9 | 10 | TSTAMP=`date +%F-%H-%M-%S` 11 | LOGS="logs-networking-veritas-kafka-$TSTAMP" 12 | mkdir $LOGS 13 | 14 | N=$(($DEFAULT_NODES + 1)) 15 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 16 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 17 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 18 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 19 | 20 | # Generate server addresses. Veritas port is 1990 21 | ADDRS="$IPPREFIX.2:1990" 22 | for IDX in `seq 3 $N`; do 23 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 24 | done 25 | 26 | for BW in $BWS; do 27 | for RTT in $RTTS; do 28 | LOGSD="$LOGS/logs-$BW-$RTT" 29 | mkdir $LOGSD 30 | ./restart_cluster_veritas.sh 31 | if [[ "$BW" != "NoLimit" ]]; then 32 | sudo ./set_ovs_bw_limit.sh $BW 1 33 | fi 34 | ./set_tc.sh $RTT 35 | sleep 3 36 | ./start_veritas_kafka.sh 37 | ./run_iperf_ping.sh 2>&1 | tee $LOGSD/net.txt 38 | sleep 3 39 | ../bin/veritas-kafka-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-$BW-$RTT.txt 40 | done 41 | done 42 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-nodes-veritas-kafka-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 12 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 13 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 14 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 15 | 16 | for N in $NODES; do 17 | ./restart_cluster_veritas.sh $(($N+1)) 18 | ./start_veritas_kafka.sh $(($N+1)) 19 | 20 | # Generate server addresses. Veritas port is 1990 21 | ADDRS="$IPPREFIX.2:1990" 22 | for IDX in `seq 3 $(($N+1))`; do 23 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 24 | done 25 | 26 | ../bin/veritas-kafka-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-nodes-$N.txt 27 | 28 | sleep 10 29 | SLOGS=$LOGS/veritas-nodes-$N-logs 30 | mkdir -p $SLOGS 31 | for I in `seq 2 $(($N+1))`; do 32 | IDX=$(($I-1)) 33 | scp -o StrictHostKeyChecking=no root@1$IPPREFIX.$I:/veritas-$IDX.log $SLOGS/ 34 | done 35 | KAFKA_HOST="$IPPREFIX.$(($N+2))" 36 | scp -o StrictHostKeyChecking=no root@$KAFKA_HOST:/kafka_2.12-2.7.0/zookeeper.log $SLOGS/ 37 | scp -o StrictHostKeyChecking=no root@$KAFKA_HOST:/kafka_2.12-2.7.0/kafka.log $SLOGS/ 38 | ssh -o StrictHostKeyChecking=no root@$KAFKA_HOST "cd /kafka_2.12-2.7.0 && ./bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --list" | tee -a $SLOGS/kafka-counters.log 39 | for I in `seq 1 $N`; do 40 | ssh -o StrictHostKeyChecking=no root@$KAFKA_HOST "cd /kafka_2.12-2.7.0 && ./bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group $I" | tee -a $SLOGS/kafka-counters.log 41 | done 42 | done 43 | sudo ./unset_ovs_veritas.sh 44 | ./kill_containers_veritas.sh 45 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_proctime.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-txdelay-veritas-kafka-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$(($DEFAULT_NODES + 1)) 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 13 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 14 | BLKSIZE=$DEFAULT_BLOCK_SIZE 15 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 16 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 17 | 18 | # Generate server addresses. Veritas port is 1990 19 | ADDRS="$IPPREFIX.2:1990" 20 | for IDX in `seq 3 $N`; do 21 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 22 | done 23 | 24 | for TXD in $TXDELAYS; do 25 | ./restart_cluster_veritas.sh 26 | ./start_veritas_kafka_delay.sh $N $BLKSIZE $TXD 27 | sleep 30 28 | ../bin/veritas-kafka-bench-txdelay --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-kafka-txdelay-$TXD.txt 29 | done 30 | ./stop_veritas_kafka.sh 31 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_recordsize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-txsizes-veritas-kafka-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$(($DEFAULT_NODES + 1)) 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 13 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 14 | BLKSIZE=$DEFAULT_BLOCK_SIZE 15 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 16 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 17 | 18 | # Generate server addresses. Veritas port is 1990 19 | ADDRS="$IPPREFIX.2:1990" 20 | for IDX in `seq 3 $N`; do 21 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 22 | done 23 | 24 | for TXSIZE in $TXSIZES; do 25 | ./restart_cluster_veritas.sh 26 | ./start_veritas_kafka.sh 27 | sleep 30 28 | ../bin/veritas-kafka-bench --load-path=$DEFAULT_WORKLOAD_PATH"_$TXSIZE/"$DEFAULT_WORKLOAD".dat" --run-path=$DEFAULT_WORKLOAD_PATH"_$TXSIZE/run_"$DEFAULT_WORKLOAD".dat" --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-kafka-txsize-$TXSIZE.txt 29 | done 30 | ./stop_veritas_kafka.sh 31 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_redisql_clients.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-clients-veritas-kafka-redisql-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$(($DEFAULT_NODES + 1)) 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 13 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 14 | BLKSIZE=$DEFAULT_BLOCK_SIZE 15 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 16 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 17 | 18 | # Generate server addresses. Veritas port is 1990 19 | ADDRS="$IPPREFIX.2:1990" 20 | for IDX in `seq 3 $N`; do 21 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 22 | done 23 | 24 | 25 | for TH in $THREADS; do 26 | ./restart_cluster_veritas.sh 27 | ./start_veritas_kafka_redisql.sh 28 | ../bin/veritas-kafka-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$TH --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-clients-$TH.txt 29 | 30 | # copy logs 31 | SLOGS="$LOGS/veritas-clients-$TH-logs" 32 | mkdir -p $SLOGS 33 | for I in `seq 2 5`; do 34 | IDX=$(($I-1)) 35 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/veritas-$IDX.log $SLOGS/ 36 | done 37 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.6:/kafka_2.12-2.7.0/zookeeper.log $SLOGS/ 38 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.6:/kafka_2.12-2.7.0/kafka.log $SLOGS/ 39 | done 40 | # ./restart_cluster_veritas.sh 41 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_redisql_workload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-workload-veritas-kafka-redisql-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$(($DEFAULT_NODES + 1)) 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 13 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 14 | BLKSIZE=$DEFAULT_BLOCK_SIZE 15 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 16 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 17 | 18 | # Generate server addresses. Veritas port is 1990 19 | ADDRS="$IPPREFIX.2:1990" 20 | for IDX in `seq 3 $N`; do 21 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 22 | done 23 | 24 | # Redis KV 25 | for W in $WORKLOADS; do 26 | ./restart_cluster_veritas.sh 27 | ./start_veritas_kafka_redisql.sh 28 | sleep 30 29 | ../bin/veritas-kafka-bench --load-path=$DEFAULT_WORKLOAD_PATH/$W.dat --run-path=$DEFAULT_WORKLOAD_PATH/run_$W.dat --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-redisql-$W.txt 30 | SLOGS="$LOGS/veritas-redisql-$W-logs" 31 | mkdir -p $SLOGS 32 | for I in `seq 2 5`; do 33 | IDX=$(($I-1)) 34 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/veritas-$IDX.log $SLOGS/ 35 | done 36 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.6:/kafka_2.12-2.7.0/zookeeper.log $SLOGS/ 37 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.6:/kafka_2.12-2.7.0/kafka.log $SLOGS/ 38 | done 39 | ./stop_veritas_kafka.sh 40 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_kafka_workload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-workload-veritas-kafka-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$(($DEFAULT_NODES + 1)) 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_KAFKA 13 | THREADS=$DEFAULT_THREADS_VERITAS_KAFKA 14 | BLKSIZE=$DEFAULT_BLOCK_SIZE 15 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 16 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 17 | 18 | # Generate server addresses. Veritas port is 1990 19 | ADDRS="$IPPREFIX.2:1990" 20 | for IDX in `seq 3 $N`; do 21 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 22 | done 23 | 24 | # Redis KV 25 | for W in $WORKLOADS; do 26 | ./restart_cluster_veritas.sh 27 | ./start_veritas_kafka.sh 28 | sleep 30 29 | ../bin/veritas-kafka-bench --load-path=$DEFAULT_WORKLOAD_PATH/$W.dat --run-path=$DEFAULT_WORKLOAD_PATH/run_$W.dat --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS --tso-addr=:7070 2>&1 | tee $LOGS/veritas-kafka-$W.txt 30 | SLOGS="$LOGS/veritas-kafka-$W-logs" 31 | mkdir -p $SLOGS 32 | for I in `seq 2 5`; do 33 | IDX=$(($I-1)) 34 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/veritas-$IDX.log $SLOGS/ 35 | done 36 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.6:/kafka_2.12-2.7.0/zookeeper.log $SLOGS/ 37 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.6:/kafka_2.12-2.7.0/kafka.log $SLOGS/ 38 | done 39 | ./stop_veritas_kafka.sh 40 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_tendermint_clients.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-clients-veritas-tendermint-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=DEFAULT_NODES 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_TM 13 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 14 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 15 | 16 | # Generate server addresses. Veritas port is 1990 17 | ADDRS="$IPPREFIX.2:1990" 18 | for IDX in `seq 3 $(($N+1))`; do 19 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 20 | done 21 | 22 | for TH in $THREADS; do 23 | ./restart_cluster_veritas.sh 24 | ./start_veritas_tendermint.sh 25 | ../bin/veritas-tendermint-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$TH --veritas-addrs=$ADDRS | tee $LOGS/veritas-clients-$TH.txt 26 | # copy logs 27 | SLOGS="$LOGS/veritas-clients-$TH-logs" 28 | mkdir -p $SLOGS 29 | for I in `seq 2 $(($N+1))`; do 30 | IDX=$(($I-1)) 31 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/veritas-tm-$IDX.log $SLOGS/ 32 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/root/tendermint.log $SLOGS/tendermint-$IDX.log 33 | done 34 | done 35 | ./restart_cluster_veritas.sh 36 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_tendermint_distribution.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-distribution-veritas-tendermint-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$DEFAULT_NODES 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_TM 13 | THREADS=$DEFAULT_THREADS_VERITAS_TM 14 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 15 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 16 | 17 | # Generate server addresses. Veritas port is 1990 18 | ADDRS="$IPPREFIX.2:1990" 19 | for IDX in `seq 3 $(($N+1))`; do 20 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 21 | done 22 | 23 | # Uniform 24 | ./restart_cluster_veritas.sh 25 | ./start_veritas_tendermint.sh 26 | sleep 5 27 | ../bin/veritas-tendermint-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS | tee $LOGS/veritas-uniform.txt 28 | 29 | # Latest 30 | ./restart_cluster_veritas.sh 31 | ./start_veritas_tendermint.sh 32 | sleep 5 33 | ../bin/veritas-tendermint-bench --load-path=$DEFAULT_WORKLOAD_PATH"_latest/"$DEFAULT_WORKLOAD".dat" --run-path=$DEFAULT_WORKLOAD_PATH"_latest/run_"$DEFAULT_WORKLOAD".dat" --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS | tee $LOGS/veritas-latest.txt 34 | 35 | # Zipfian 36 | ./restart_cluster_veritas.sh 37 | ./start_veritas_tendermint.sh 38 | sleep 5 39 | ../bin/veritas-tendermint-bench --load-path=$DEFAULT_WORKLOAD_PATH"_zipfian/"$DEFAULT_WORKLOAD".dat" --run-path=$DEFAULT_WORKLOAD_PATH"_zipfian/run_"$DEFAULT_WORKLOAD".dat" --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS | tee $LOGS/veritas-zipfian.txt 40 | 41 | ./stop_veritas_tendermint.sh 42 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_tendermint_mongodb_clients.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-clients-veritas-tendermint-mongodb-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=$DEFAULT_NODES 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_TM 13 | THREADS=$DEFAULT_THREADS_VERITAS_TM 14 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 15 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 16 | 17 | # Generate server addresses. Veritas port is 1990 18 | ADDRS="$IPPREFIX.2:1990" 19 | for IDX in `seq 3 $(($N+1))`; do 20 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 21 | done 22 | 23 | for TH in $THREADS; do 24 | ./restart_cluster_veritas.sh 25 | ./start_veritas_tendermint_mongodb.sh 26 | ../bin/veritas-tendermint-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$TH --veritas-addrs=$ADDRS | tee $LOGS/veritas-clients-$TH.txt 27 | # copy logs 28 | SLOGS="$LOGS/veritas-clients-$TH-logs" 29 | mkdir -p $SLOGS 30 | for I in `seq 2 $(($N+1))`; do 31 | IDX=$(($I-1)) 32 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/veritas-tm-$IDX.log $SLOGS/ 33 | scp -o StrictHostKeyChecking=no root@$IPPREFIX.$I:/root/tendermint.log $SLOGS/tendermint-$IDX.log 34 | done 35 | done 36 | #./restart_cluster_veritas.sh 37 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_tendermint_networking.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | BWS="NoLimit 10000 1000 100" 8 | RTTS="5ms 10ms 20ms 30ms 40ms 50ms 60ms" 9 | 10 | TSTAMP=`date +%F-%H-%M-%S` 11 | LOGS="logs-networking-veritas-tendermint-$TSTAMP" 12 | mkdir $LOGS 13 | 14 | N=$DEFAULT_NODES 15 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_TM 16 | THREADS=$DEFAULT_THREADS_VERITAS_TM 17 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 18 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 19 | 20 | # Generate server addresses. Veritas port is 1990 21 | ADDRS="$IPPREFIX.2:1990" 22 | for IDX in `seq 3 $(($N+1))`; do 23 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 24 | done 25 | 26 | for BW in $BWS; do 27 | for RTT in $RTTS; do 28 | LOGSD="$LOGS/logs-$BW-$RTT" 29 | mkdir $LOGSD 30 | ./restart_cluster_veritas.sh 31 | if [[ "$BW" != "NoLimit" ]]; then 32 | sudo ./set_ovs_bs_limit.sh $BW 1 33 | fi 34 | ./set_tc.sh $RTT 35 | sleep 3 36 | ./start_veritas_tendermint.sh 37 | ./run_iperf_ping.sh 2>&1 | tee $LOGSD/net.txt 38 | sleep 3 39 | ../bin/veritas-tendermint-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS 2>&1 | tee $LOGS/veritas-net-$BW-$RTT.txt 40 | done 41 | done 42 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_tendermint_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-nodes-veritas-tendermint-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_TM 12 | THREADS=$DEFAULT_THREADS_VERITAS_TM 13 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 14 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 15 | 16 | for N in $NODES; do 17 | ./restart_cluster_veritas.sh $(($N+1)) 18 | ./start_veritas_tendermint.sh $N 19 | 20 | ADDRS="$IPPREFIX.2:1990" 21 | for I in `seq 3 $(($N+1))`; do 22 | ADDRS="$ADDRS,$IPPREFIX.$I:1990" 23 | done 24 | 25 | ../bin/veritas-tendermint-bench --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS | tee $LOGS/veritas-nodes-$N.txt 26 | done 27 | sudo ./unset_ovs_veritas.sh 28 | ./kill_containers_veritas.sh 29 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_tendermint_proctime.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-txdelay-veritas-tendermint-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=DEFAULT_NODES 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_TM 13 | THREADS=$DEFAULT_THREADS_VERITAS_TM 14 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 15 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 16 | 17 | # Generate server addresses. Veritas port is 1990 18 | ADDRS="$IPPREFIX.2:1990" 19 | for IDX in `seq 3 $(($N+1))`; do 20 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 21 | done 22 | 23 | for TXD in $TXDELAYS; do 24 | ./restart_cluster_veritas.sh 25 | ./start_veritas_tendermint_delay.sh 4 $TXD 26 | sleep 30 27 | ../bin/veritas-tendermint-bench-txdelay --load-path=$WORKLOAD_FILE --run-path=$WORKLOAD_RUN_FILE --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS 2>&1 | tee $LOGS/veritas-tendermint-txdelay-$TXD.txt 28 | done 29 | ./stop_veritas_tendermint.sh 30 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_tendermint_recordsize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-txsizes-veritas-tendermint-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=DEFAULT_NODES 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_TM 13 | THREADS=$DEFAULT_THREADS_VERITAS_TM 14 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 15 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 16 | 17 | # Generate server addresses. Veritas port is 1990 18 | ADDRS="$IPPREFIX.2:1990" 19 | for IDX in `seq 3 $(($N+1))`; do 20 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 21 | done 22 | 23 | for TXSIZE in $TXSIZES; do 24 | ./restart_cluster_veritas.sh 25 | ./start_veritas_tendermint.sh 26 | sleep 10 27 | ../bin/veritas-tendermint-bench --load-path=$DEFAULT_WORKLOAD_PATH"_$TXSIZE/"$DEFAULT_WORKLOAD".dat" --run-path=$DEFAULT_WORKLOAD_PATH"_$TXSIZE/run_"$DEFAULT_WORKLOAD".dat" --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS 2>&1 | tee $LOGS/veritas-tendermint-txsize-$TXSIZE.txt 28 | done 29 | ./stop_veritas_tendermint.sh 30 | -------------------------------------------------------------------------------- /scripts/run_benchmark_veritas_tendermint_workload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | TSTAMP=`date +%F-%H-%M-%S` 8 | LOGS="logs-workload-veritas-tendermint-$TSTAMP" 9 | mkdir $LOGS 10 | 11 | N=DEFAULT_NODES 12 | DRIVERS=$DEFAULT_DRIVERS_VERITAS_TM 13 | THREADS=$DEFAULT_THREADS_VERITAS_TM 14 | WORKLOAD_FILE="$DEFAULT_WORKLOAD_PATH/$DEFAULT_WORKLOAD".dat 15 | WORKLOAD_RUN_FILE="$DEFAULT_WORKLOAD_PATH/run_$DEFAULT_WORKLOAD".dat 16 | 17 | # Generate server addresses. Veritas port is 1990 18 | ADDRS="$IPPREFIX.2:1990" 19 | for IDX in `seq 3 $(($N+1))`; do 20 | ADDRS="$ADDRS,$IPPREFIX.$IDX:1990" 21 | done 22 | 23 | # Redis KV 24 | for W in $WORKLOADS; do 25 | ./restart_cluster_veritas.sh 26 | ./start_veritas_tendermint.sh 27 | sleep 10 28 | ../bin/veritas-tendermint-bench --load-path=$DEFAULT_WORKLOAD_PATH/$W.dat --run-path=$DEFAULT_WORKLOAD_PATH/run_$W.dat --ndrivers=$DRIVERS --nthreads=$THREADS --veritas-addrs=$ADDRS | tee $LOGS/veritas-workload-$W.txt 29 | done 30 | ./stop_veritas_tendermint.sh 31 | -------------------------------------------------------------------------------- /scripts/run_iperf_ping.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | HOST1="$IPPREFIX.2" 6 | HOST2="$IPPREFIX.3" 7 | 8 | ssh -o StrictHostKeyChecking=no root@$HOST1 "killall -9 iperf" 9 | sleep 1 10 | ssh -o StrictHostKeyChecking=no root@$HOST1 "iperf -s" & 11 | sleep 3 12 | ssh -o StrictHostKeyChecking=no root@$HOST2 "iperf -c $HOST1" 13 | sleep 3 14 | ssh -o StrictHostKeyChecking=no root@$HOST2 "ping -c 10 $HOST1" 15 | ssh -o StrictHostKeyChecking=no root@$HOST1 "killall -9 iperf" 16 | -------------------------------------------------------------------------------- /scripts/run_redis_benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | go build -o redis-bench ../cmd/redis/main.go 4 | go build -o redisql-bench ../cmd/redisql/main.go 5 | 6 | if ! [ -f "RediSQL.so" ]; then 7 | wget https://github.com/RedBeardLab/rediSQL/releases/download/v1.1.1/RediSQL_v1.1.1_9b110f__release.so 8 | mv RediSQL_v1.1.1_9b110f__release.so RediSQL.so 9 | chmod u+x RediSQL.so 10 | fi 11 | killall -9 redis-server 12 | sleep 5 13 | redis-server --loadmodule `pwd`/RediSQL.so --port 6500 > redis.log 2>&1 & 14 | sleep 5 15 | 16 | echo "Redis ..." 17 | echo "FLUSHALL" | redis-cli -p 6500 18 | ./redis-bench --load-path temp/ycsb_data/workloada.dat --run-path temp/ycsb_data/run_workloada.dat --nthreads 6 --redis-addr 127.0.0.1:6500 --redis-db 0 19 | 20 | echo "RediSQL ..." 21 | echo "FLUSHALL" | redis-cli -p 6500 22 | ./redisql-bench --load-path temp/ycsb_data/workloada.dat --run-path temp/ycsb_data/run_workloada.dat --nthreads 6 --redis-addr 127.0.0.1:6500 --redis-db 0 23 | -------------------------------------------------------------------------------- /scripts/set_ovs_bigchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $EUID -ne 0 ]; then 4 | echo "This script must be run as root!" 5 | exit 1 6 | fi 7 | 8 | . ./env.sh 9 | 10 | N=$DEFAULT_NODES 11 | if [ $# -ge 1 ]; then 12 | N=$1 13 | fi 14 | PREFIX="bigchaindb" 15 | 16 | ovs-vsctl add-br ovs-br1 17 | ifconfig ovs-br1 $IPPREFIX.1 netmask 255.255.255.0 up 18 | for idx in `seq 1 $N`; do 19 | idx2=$(($idx+1)) 20 | ovs-docker add-port ovs-br1 eth1 $PREFIX$idx --ipaddress=$IPPREFIX.$idx2/24 21 | done 22 | -------------------------------------------------------------------------------- /scripts/set_ovs_blockchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | if [ $EUID -ne 0 ]; then 5 | echo "This script must be run as root!" 6 | exit 1 7 | fi 8 | 9 | N=${1:-4} 10 | shard=${2:-1} 11 | PREFIX="blockchaindb" 12 | NET_PREFIX="192.168.20" 13 | 14 | ovs-vsctl add-br ovs-br1 15 | ifconfig ovs-br1 $NET_PREFIX.1 netmask 255.255.255.0 up 16 | for idx in `seq 1 $N`; do 17 | idx2=$(($idx+1)) 18 | ovs-docker add-port ovs-br1 eth1 $PREFIX$idx --ipaddress=$NET_PREFIX.${idx2}/24 19 | done 20 | # ovs-docker add-port ovs-br1 eth1 redis-shard${shard} --ipaddress=$NET_PREFIX.$((${idx2}+${shard}))/24 21 | -------------------------------------------------------------------------------- /scripts/set_ovs_bw_limit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $EUID -ne 0 ]; then 4 | echo "This script must be run as root!" 5 | exit 1 6 | fi 7 | 8 | if [ $# -lt 2 ]; then 9 | echo "Usage: $0 " 10 | exit 1 11 | fi 12 | 13 | BW=$(($1*1000000)) 14 | LAT=$2 15 | PORTS=`ovs-appctl dpif/show | tail +3 | head -n 20 | cut -d ' ' -f 1 | tr -d '\t'` 16 | for PORT in $PORTS; do 17 | echo $PORT 18 | ovs-vsctl -- set port $PORT qos=@newqos -- --id=@newqos create qos type=linux-htb other-config:max-rate=$BW queues=0=@q0 -- --id=@q0 create queue other-config:min-rate=$BW other-config:max-rate=$BW 19 | done -------------------------------------------------------------------------------- /scripts/set_ovs_veritas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ $EUID -ne 0 ]; then 3 | echo "This script must be run as root!" 4 | exit 1 5 | fi 6 | 7 | . ./env.sh 8 | 9 | # +1 node for Kafka 10 | N=$(($DEFAULT_NODES+1)) 11 | if [ $# -gt 0 ]; then 12 | N=$1 13 | else 14 | echo -e "Usage: $0 <# containers>" 15 | echo -e "\tDefault: $N containers" 16 | fi 17 | 18 | PREFIX="veritas" 19 | 20 | ovs-vsctl add-br ovs-br1 21 | ifconfig ovs-br1 $IPPREFIX.1 netmask 255.255.255.0 up 22 | for idx in `seq 1 $N`; do 23 | idx2=$(($idx+1)) 24 | ovs-docker add-port ovs-br1 eth1 $PREFIX$idx --ipaddress=$IPPREFIX.$idx2/24 25 | done 26 | -------------------------------------------------------------------------------- /scripts/set_tc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Set latency 4 | # 5 | 6 | if [ $# -ge 1 ]; then 7 | NETWORK=$1 8 | else 9 | echo "Usage: $0 " 10 | exit 1 11 | fi 12 | 13 | ./multi_node.sh "tc qdisc del dev eth1 root" 14 | echo "Network: $NETWORK" 15 | case $NETWORK in 16 | "1ms") 17 | ./multi_node.sh "tc qdisc add dev eth1 root netem delay 0.98ms" 18 | ;; 19 | "5ms") 20 | ./multi_node.sh "tc qdisc add dev eth1 root netem delay 2.48ms" 21 | ;; 22 | "10ms") 23 | ./multi_node.sh "tc qdisc add dev eth1 root netem delay 4.98ms" 24 | ;; 25 | "20ms") 26 | ./multi_node.sh "tc qdisc add dev eth1 root netem delay 9.98ms" 27 | ;; 28 | "30ms") 29 | ./multi_node.sh "tc qdisc add dev eth1 root netem delay 14.98ms" 30 | ;; 31 | "40ms") 32 | ./multi_node.sh "tc qdisc add dev eth1 root netem delay 19.99ms" 33 | ;; 34 | "50ms") 35 | ./multi_node.sh "tc qdisc add dev eth1 root netem delay 24.98ms" 36 | ;; 37 | "60ms") 38 | ./multi_node.sh "tc qdisc add dev eth1 root netem delay 29.98ms" 39 | ;; 40 | "default") 41 | exit 0 42 | ;; 43 | *) 44 | ./multi_node.sh "sudo tc qdisc del dev eth0 root" 45 | echo "$NETWORK -> reset latency" 46 | esac -------------------------------------------------------------------------------- /scripts/start_bigchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | N=$DEFAULT_NODES 6 | 7 | set -x 8 | 9 | if [ $# -ge 1 ]; then 10 | N=$1 11 | fi 12 | END_IDX=$(($N+1)) 13 | 14 | for idx in `seq 2 $END_IDX`; do 15 | ssh -o StrictHostKeyChecking=no root@$IPPREFIX.$idx "export LC_CTYPE=C.UTF-8 && export BIGCHAINDB_WSSERVER_HOST=0.0.0.0 && export BIGCHAINDB_SERVER_BIND=0.0.0.0:9984 && export BIGCHAINDB_SERVER_WORKERS=4 && /usr/src/app/scripts/start-all.sh" 16 | sleep 5 17 | ssh -o StrictHostKeyChecking=no root@$IPPREFIX.$idx "killall -9 tendermint; rm -r .tendermint; /usr/local/bin/tendermint init" 18 | for jdx in `seq 2 $END_IDX`; do 19 | if [ $idx -ne $jdx ]; then 20 | echo "," >> ids_$jdx.txt 21 | ssh -o StrictHostKeyChecking=no root@$IPPREFIX.$idx "/usr/local/bin/tendermint show_node_id" >> ids_$jdx.txt 22 | echo "," >> ips_$jdx.txt 23 | echo $IPPREFIX.$idx >> ips_$jdx.txt 24 | fi 25 | done 26 | echo "," >> validators.txt 27 | ssh -o StrictHostKeyChecking=no root@$IPPREFIX.$idx "cat .tendermint/config/genesis.json" | jq .validators[0] >> validators.txt 28 | GENESIS=`ssh -o StrictHostKeyChecking=no root@$IPPREFIX.$idx "cat .tendermint/config/genesis.json" | jq .genesis_time` 29 | echo "," >> power.txt 30 | echo "default" >> power.txt 31 | done 32 | VALIDATORS=`tail +2 validators.txt | tr -d '\n' | base64 | tr -d '\n'` 33 | POWERS=`tail +2 power.txt | tr -d '\n'` 34 | 35 | for idx in `seq 2 $END_IDX`; do 36 | IDS=`tail +2 ids_$idx.txt | tr -d '\n'` 37 | IPS=`tail +2 ips_$idx.txt | tr -d '\n'` 38 | scp -o StrictHostKeyChecking=no tendermint_config_bigchaindb.py root@$IPPREFIX.$idx:/usr/src/app/scripts/tendermint_config.py 39 | ssh -o StrictHostKeyChecking=no root@$IPPREFIX.$idx "cd /usr/src/app/scripts && ./tendermint_config.py root $GENESIS generate $VALIDATORS $POWERS $IDS $IPS" 40 | done 41 | 42 | rm validators.txt power.txt ids*.txt ips*.txt 43 | 44 | for idx in `seq 2 $END_IDX`; do 45 | ssh -o StrictHostKeyChecking=no root@$IPPREFIX.$idx "killall -9 tendermint; sleep 1; /usr/local/bin/tendermint node --p2p.laddr 'tcp://0.0.0.0:26656' --proxy_app='tcp://0.0.0.0:26658' --consensus.create_empty_blocks=false --p2p.pex=false > tendermint.log 2>&1 &" 46 | done 47 | -------------------------------------------------------------------------------- /scripts/start_containers_bigchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | N=$DEFAULT_NODES 6 | if [ $# -gt 0 ]; then 7 | N=$1 8 | else 9 | echo -e "Usage: $0 <# containers>" 10 | echo -e "\tDefault: $N containers" 11 | fi 12 | 13 | IMGNAME="bigchaindb:latest" 14 | PREFIX="bigchaindb" 15 | 16 | CPUS_PER_CONTAINER=4 17 | 18 | DFILE=dockers.txt 19 | rm -rf $DFILE 20 | 21 | cat /dev/null > $HOME/.ssh/known_hosts 22 | 23 | set -x 24 | 25 | for idx in `seq 1 $N`; do 26 | CPUID=$(($idx*$CPUS_PER_CONTAINER+1)) 27 | CPUIDS=$CPUID 28 | for jdx in `seq 1 $(($CPUS_PER_CONTAINER-1))`; do 29 | CPUIDS="$CPUIDS,$(($CPUID+$jdx))" 30 | done 31 | docker run -d --publish-all=true --cap-add=SYS_ADMIN --cap-add=NET_ADMIN --security-opt seccomp:unconfined --cpuset-cpus=$CPUIDS --name=$PREFIX$idx $IMGNAME tail -f /dev/null 2>&1 >> $DFILE 32 | done 33 | while read ID; do 34 | # For Alpine: 35 | # docker exec $ID "/usr/sbin/sshd" 36 | # For Ubuntu: 37 | docker exec $ID service ssh start 38 | done < $DFILE 39 | -------------------------------------------------------------------------------- /scripts/start_containers_blockchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | nodes=${1:-4} 5 | shard=${2:-1} 6 | IMGNAME="blockchaindb" 7 | PREFIX="blockchaindb" 8 | 9 | CPUS_PER_CONTAINER=1 10 | 11 | DFILE=dockers.txt 12 | rm -rf $DFILE 13 | 14 | for idx in `seq 1 ${nodes}`; do 15 | CPUID=$(($idx*$CPUS_PER_CONTAINER)) 16 | CPUIDS=$CPUID 17 | for jdx in `seq 1 $(($CPUS_PER_CONTAINER-1))`; do 18 | CPUIDS="$CPUIDS,$(($CPUID+$jdx))" 19 | done 20 | docker run -d --publish-all=true --cap-add=SYS_ADMIN --cap-add=NET_ADMIN --security-opt seccomp:unconfined --cpuset-cpus=$CPUIDS --name=$PREFIX$idx $IMGNAME tail -f /dev/null 2>&1 >> $DFILE 21 | done 22 | # docker run -d --publish-all=true --cap-add=SYS_ADMIN --cap-add=NET_ADMIN --security-opt seccomp:unconfined --cpuset-cpus=$CPUIDS --name=redis-shard${shard} redis tail -f /dev/null 2>&1 >> $DFILE 23 | while read ID; do 24 | docker exec $ID "/usr/sbin/sshd" 25 | done < $DFILE -------------------------------------------------------------------------------- /scripts/start_containers_veritas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | N=$(($DEFAULT_NODES + 1)) 6 | CDIR=`pwd` 7 | 8 | if [ $# -gt 0 ]; then 9 | N=$1 10 | else 11 | echo -e "Usage: $0 <# containers>" 12 | echo -e "\tDefault: $N containers" 13 | fi 14 | 15 | IMGNAME="veritas:latest" 16 | PREFIX="veritas" 17 | 18 | DFILE=dockers.txt 19 | rm -rf $DFILE 20 | 21 | for idx in `seq 1 $N`; do 22 | CPUID=$(($idx+0)) 23 | docker run -d --publish-all=true --cap-add=SYS_ADMIN --cap-add=NET_ADMIN --security-opt seccomp:unconfined --cpuset-cpus=$CPUID --name=$PREFIX$idx $IMGNAME tail -f /dev/null 2>&1 >> $DFILE 24 | done 25 | while read ID; do 26 | docker exec $ID service ssh start 27 | done < $DFILE -------------------------------------------------------------------------------- /scripts/start_veritas_kafka.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | N=$(($DEFAULT_NODES + 1)) 8 | BLKSIZE=100 9 | if [ $# -gt 0 ]; then 10 | N=$1 11 | else 12 | echo -e "Usage: $0 <# containers> " 13 | echo -e "\tDefault: $N containers" 14 | echo -e "\tDefault: $BLKSIZE block size" 15 | fi 16 | if [ $# -gt 1 ]; then 17 | BLKSIZE=$2 18 | else 19 | echo -e "Usage: $0 <# containers> " 20 | echo -e "\tDefault: $N containers" 21 | echo -e "\tDefault: $BLKSIZE block size" 22 | fi 23 | 24 | #TSO 25 | ../bin/veritas-tso --addr=":7070" > tso.log 2>&1 & 26 | 27 | # Kafka 28 | KAFKA_ADDR=$IPPREFIX".$(($N+1))" 29 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0/config && echo 'advertised.listeners=PLAINTEXT://$KAFKA_ADDR:9092' >> server.properties" 30 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/zookeeper-server-start.sh config/zookeeper.properties > zookeeper.log 2>&1 &" 31 | sleep 10s 32 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-server-start.sh config/server.properties > kafka.log 2>&1 &" 33 | sleep 10s 34 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-topics.sh --create --topic shared-log --bootstrap-server 0.0.0.0:9092" 35 | sleep 10 36 | 37 | # Nodes 38 | NODES=node1 39 | for I in `seq 1 $(($N-1))`; do 40 | NODES="$NODES,node$I" 41 | done 42 | 43 | # Start 44 | for I in `seq 1 $(($N-1))`; do 45 | ADDR=$IPPREFIX".$(($I+1))" 46 | ssh -o StrictHostKeyChecking=no root@$ADDR "cd /; redis-server > redis.log 2>&1 &" 47 | ssh -o StrictHostKeyChecking=no root@$ADDR "cd /; nohup /bin/veritas-kafka --signature=node$I --parties=${NODES} --blk-size=$BLKSIZE --addr=:1990 --kafka-addr=$KAFKA_ADDR:9092 --kafka-group=$I --kafka-topic=shared-log --redis-addr=0.0.0.0:6379 --redis-db=0 --ledger-path=veritas$I > veritas-$I.log 2>&1 &" 48 | done 49 | -------------------------------------------------------------------------------- /scripts/start_veritas_kafka_delay.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | N=$(($DEFAULT_NODES + 1)) 8 | BLKSIZE=100 9 | TXDELAY=0 10 | if [ $# -gt 0 ]; then 11 | N=$1 12 | else 13 | echo -e "Usage: $0 <# containers> " 14 | echo -e "\tDefault: $N containers" 15 | echo -e "\tDefault: $BLKSIZE block size" 16 | echo -e "\tDefault: $TXDELAY block size" 17 | fi 18 | if [ $# -gt 1 ]; then 19 | BLKSIZE=$2 20 | else 21 | echo -e "Usage: $0 <# containers> " 22 | echo -e "\tDefault: $N containers" 23 | echo -e "\tDefault: $BLKSIZE block size" 24 | echo -e "\tDefault: $TXDELAY block size" 25 | fi 26 | if [ $# -gt 2 ]; then 27 | TXDELAY=$3 28 | else 29 | echo -e "Usage: $0 <# containers> " 30 | echo -e "\tDefault: $N containers" 31 | echo -e "\tDefault: $BLKSIZE block size" 32 | echo -e "\tDefault: $TXDELAY block size" 33 | fi 34 | 35 | #TSO 36 | ../bin/veritas-tso --addr=":7070" > tso.log 2>&1 & 37 | 38 | # Kafka 39 | KAFKA_ADDR=$IPPREFIX".$(($N+1))" 40 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0/config && echo 'advertised.listeners=PLAINTEXT://$KAFKA_ADDR:9092' >> server.properties" 41 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/zookeeper-server-start.sh config/zookeeper.properties > zookeeper.log 2>&1 &" 42 | sleep 10s 43 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-server-start.sh config/server.properties > kafka.log 2>&1 &" 44 | sleep 10s 45 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-topics.sh --create --topic shared-log --bootstrap-server 0.0.0.0:9092" 46 | sleep 10 47 | 48 | # Nodes 49 | NODES=node1 50 | for I in `seq 1 $(($N-1))`; do 51 | NODES="$NODES,node$I" 52 | done 53 | 54 | # Start 55 | for I in `seq 1 $(($N-1))`; do 56 | ADDR=$IPPREFIX".$(($I+1))" 57 | ssh -o StrictHostKeyChecking=no root@$ADDR "cd /; redis-server > redis.log 2>&1 &" 58 | ssh -o StrictHostKeyChecking=no root@$ADDR "cd /; nohup /bin/veritas-kafka-txdelay --signature=node$I --parties=${NODES} --tx-delay=$TXDELAY --blk-size=$BLKSIZE --addr=:1990 --kafka-addr=$KAFKA_ADDR:9092 --kafka-group=$I --kafka-topic=shared-log --redis-addr=0.0.0.0:6379 --redis-db=0 --ledger-path=veritas$I > veritas-$I.log 2>&1 &" 59 | done 60 | -------------------------------------------------------------------------------- /scripts/start_veritas_kafka_redisql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | N=$(($DEFAULT_NODES + 1)) 8 | if [ $# -gt 0 ]; then 9 | N=$1 10 | else 11 | echo -e "Usage: $0 <# containers>" 12 | echo -e "\tDefault: 5 containers" 13 | fi 14 | 15 | #TSO 16 | ../bin/veritas-tso --addr=":7070" > tso.log 2>&1 & 17 | 18 | # Kafka 19 | KAFKA_ADDR=$IPPREFIX".$(($N+1))" 20 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0/config && echo 'advertised.listeners=PLAINTEXT://$KAFKA_ADDR:9092' >> server.properties" 21 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/zookeeper-server-start.sh config/zookeeper.properties > zookeeper.log 2>&1 &" 22 | sleep 10s 23 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-server-start.sh config/server.properties > kafka.log 2>&1 &" 24 | sleep 10s 25 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-topics.sh --create --topic shared-log --bootstrap-server 0.0.0.0:9092" 26 | 27 | # Nodes 28 | NODES=node1 29 | for I in `seq 1 $(($N-1))`; do 30 | NODES="$NODES,node$I" 31 | done 32 | 33 | # Start 34 | for I in `seq 1 $(($N-1))`; do 35 | ADDR=$IPPREFIX".$(($I+1))" 36 | ssh -o StrictHostKeyChecking=no root@$ADDR "cd /; redis-server --loadmodule /redisql.so > redis.log 2>&1 &" 37 | ssh -o StrictHostKeyChecking=no root@$ADDR "cd /; nohup /bin/veritas-kafka-redisql --signature=node$I --parties=${NODES} --addr=:1990 --kafka-addr=$KAFKA_ADDR:9092 --kafka-group=$I --kafka-topic=shared-log --redis-addr=0.0.0.0:6379 --redis-db=0 --ledger-path=veritas$I > veritas-$I.log 2>&1 &" 38 | done 39 | -------------------------------------------------------------------------------- /scripts/start_veritas_kafka_tso_zk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | N=$(($DEFAULT_NODES + 1)) 8 | if [ $# -gt 0 ]; then 9 | N=$1 10 | else 11 | echo -e "Usage: $0 <# containers>" 12 | echo -e "\tDefault: 5 containers" 13 | fi 14 | 15 | # TSO via ZooKeeper 16 | # Kafka 17 | KAFKA_ADDR=$IPPREFIX".$(($N+1))" 18 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0/config && echo 'advertised.listeners=PLAINTEXT://$KAFKA_ADDR:9092' >> server.properties" 19 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/zookeeper-server-start.sh config/zookeeper.properties > zookeeper.log 2>&1 &" 20 | sleep 10s 21 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-server-start.sh config/server.properties > kafka.log 2>&1 &" 22 | sleep 10s 23 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-topics.sh --create --topic shared-log --bootstrap-server 0.0.0.0:9092" 24 | sleep 10 25 | 26 | # Nodes 27 | NODES=node1 28 | for I in `seq 1 $(($N-1))`; do 29 | NODES="$NODES,node$I" 30 | done 31 | 32 | # Start 33 | for I in `seq 1 $(($N-1))`; do 34 | ADDR=$IPPREFIX".$(($I+1))" 35 | ssh -o StrictHostKeyChecking=no root@$ADDR "cd /; redis-server > redis.log 2>&1 &" 36 | ssh -o StrictHostKeyChecking=no root@$ADDR "cd /; nohup /bin/veritas-kafka-zk --signature=node$I --parties=${NODES} --addr=:1990 --kafka-addr=$KAFKA_ADDR:9092 --kafka-group=$I --kafka-topic=shared-log --redis-addr=0.0.0.0:6379 --redis-db=0 --ledger-path=veritas$I > veritas-$I.log 2>&1 &" 37 | done 38 | -------------------------------------------------------------------------------- /scripts/stop_bigchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./multi_node.sh "killall -9 tendermint; killall -SIGINT bigchaindb; killall -9 bigchaindb_ws; killall -9 bigchaindb_exchange; killall -9 mongod; killall -9 gunicorn" 4 | ./multi_node.sh "rm -rf /data/db/*; rm -rf /root/.tendermint; rm -f /root/*.log" 5 | -------------------------------------------------------------------------------- /scripts/stop_blockchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | nodes=${1:-4} 6 | # Nodes 7 | for (( c=1; c<=$nodes; c++ )); do 8 | ADDR=$IPPREFIX".$(($c+1))" 9 | ssh -o StrictHostKeyChecking=no root@$ADDR "killall -9 bcdbnode; killall -9 geth" 10 | done 11 | -------------------------------------------------------------------------------- /scripts/stop_veritas_kafka.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | N=$(($DEFAULT_NODES + 1)) 8 | if [ $# -gt 0 ]; then 9 | N=$1 10 | else 11 | echo -e "Usage: $0 <# containers>" 12 | echo -e "\tDefault: 5 containers" 13 | fi 14 | 15 | #TSO 16 | killall -9 veritas-tso 17 | 18 | # Kafka 19 | KAFKA_ADDR=$IPPREFIX".$(($N+1))" 20 | RES=`kafkacat -b $KAFKA_ADDR:9092 -L 2>&1` 21 | if [[ "$RES" =~ "% ERROR" ]]; then 22 | echo "Kafka is down." 23 | else 24 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-topics.sh --delete --topic shared-log --bootstrap-server 0.0.0.0:9092" 25 | sleep 10 26 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-server-stop.sh" 27 | sleep 5 28 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/zookeeper-server-stop.sh" 29 | fi 30 | 31 | # Nodes 32 | for I in `seq 1 $(($N-1))`; do 33 | ADDR=$IPPREFIX".$(($I+1))" 34 | ssh -o StrictHostKeyChecking=no root@$ADDR "redis-cli flushdb; killall -9 redis-server; killall -9 veritas-kafka" 35 | done 36 | -------------------------------------------------------------------------------- /scripts/stop_veritas_kafka_redisql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | N=$(($DEFAULT_NODES + 1)) 8 | if [ $# -gt 0 ]; then 9 | N=$1 10 | else 11 | echo -e "Usage: $0 <# containers>" 12 | echo -e "\tDefault: 5 containers" 13 | fi 14 | 15 | #TSO 16 | killall -9 veritas-tso 17 | 18 | # Kafka 19 | KAFKA_ADDR=$IPPREFIX".$(($N+1))" 20 | RES=`kafkacat -b $KAFKA_ADDR:9092 -L 2>&1` 21 | if [[ "$RES" =~ "% ERROR" ]]; then 22 | echo "Kafka is down." 23 | else 24 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-topics.sh --delete --topic shared-log --bootstrap-server 0.0.0.0:9092" 25 | sleep 30 26 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-server-stop.sh" 27 | sleep 5 28 | fi 29 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/zookeeper-server-stop.sh" 30 | 31 | # Nodes 32 | for I in `seq 1 $(($N-1))`; do 33 | ADDR=$IPPREFIX".$(($I+1))" 34 | ssh -o StrictHostKeyChecking=no root@$ADDR "redis-cli flushdb; killall -9 redis-server; killall -9 veritas-kafka-redisql" 35 | done 36 | -------------------------------------------------------------------------------- /scripts/stop_veritas_kafka_tso_zk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./env.sh 4 | 5 | set -x 6 | 7 | N=$(($DEFAULT_NODES + 1)) 8 | if [ $# -gt 0 ]; then 9 | N=$1 10 | else 11 | echo -e "Usage: $0 <# containers>" 12 | echo -e "\tDefault: 5 containers" 13 | fi 14 | 15 | # Kafka 16 | KAFKA_ADDR=$IPPREFIX".$(($N+1))" 17 | RES=`kafkacat -b $KAFKA_ADDR:9092 -L 2>&1` 18 | if [[ "$RES" =~ "% ERROR" ]]; then 19 | echo "Kafka is down." 20 | else 21 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-topics.sh --delete --topic shared-log --bootstrap-server 0.0.0.0:9092" 22 | sleep 10 23 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/kafka-server-stop.sh" 24 | sleep 5 25 | ssh -o StrictHostKeyChecking=no root@$KAFKA_ADDR "cd /kafka_2.12-2.7.0; bin/zookeeper-server-stop.sh" 26 | fi 27 | 28 | # Nodes 29 | for I in `seq 1 $(($N-1))`; do 30 | ADDR=$IPPREFIX".$(($I+1))" 31 | ssh -o StrictHostKeyChecking=no root@$ADDR "redis-cli flushdb; killall -9 redis-server; killall -9 veritas-kafka-zk" 32 | done -------------------------------------------------------------------------------- /scripts/stop_veritas_tendermint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./multi_node.sh "killall -9 redis-server; killall -9 mongod; killall -9 tendermint; killall -9 veritas-tendermint" 4 | ./multi_node.sh "rm -r /veritas/data/*" -------------------------------------------------------------------------------- /scripts/unset_ovs_bigchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $EUID -ne 0 ]; then 4 | echo "This script must be run as root!" 5 | exit 1 6 | fi 7 | 8 | . ./env.sh 9 | 10 | N=$DEFAULT_NODES 11 | if [ $# -gt 0 ]; then 12 | N=$1 13 | else 14 | echo -e "Usage: $0 <# containers>" 15 | echo -e "\tDefault: $N containers" 16 | fi 17 | 18 | PREFIX="bigchaindb" 19 | 20 | for idx in `seq 1 $N`; do 21 | idx2=$(($idx+1)) 22 | ovs-docker del-port ovs-br1 eth1 $PREFIX$idx 23 | done 24 | ovs-vsctl del-br ovs-br1 -------------------------------------------------------------------------------- /scripts/unset_ovs_blockchaindb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $EUID -ne 0 ]; then 4 | echo "This script must be run as root!" 5 | exit 1 6 | fi 7 | 8 | N=${1:-4} 9 | shard=${2:-1} 10 | PREFIX="blockchaindb" 11 | 12 | for idx in `seq 1 $N`; do 13 | idx2=$(($idx+1)) 14 | ovs-docker del-port ovs-br1 eth1 $PREFIX$idx 15 | done 16 | # ovs-docker del-port ovs-br1 eth1 redis-shard${shard} 17 | ovs-vsctl del-br ovs-br1 -------------------------------------------------------------------------------- /scripts/unset_ovs_veritas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $EUID -ne 0 ]; then 4 | echo "This script must be run as root!" 5 | exit 1 6 | fi 7 | 8 | . ./env.sh 9 | 10 | N=$(($DEFAULT_NODES + 1)) 11 | if [ $# -gt 0 ]; then 12 | N=$1 13 | else 14 | echo -e "Usage: $0 <# containers>" 15 | echo -e "\tDefault: $N containers" 16 | fi 17 | 18 | PREFIX="veritas" 19 | 20 | for idx in `seq 1 $N`; do 21 | idx2=$(($idx+1)) 22 | ovs-docker del-port ovs-br1 eth1 $PREFIX$idx 23 | done 24 | ovs-vsctl del-br ovs-br1 -------------------------------------------------------------------------------- /scripts/veritas-pprof.patch: -------------------------------------------------------------------------------- 1 | diff --git a/cmd/veritas/main.go b/cmd/veritas/main.go 2 | index 717959c..d0017f3 100644 3 | --- a/cmd/veritas/main.go 4 | +++ b/cmd/veritas/main.go 5 | @@ -7,6 +7,8 @@ import ( 6 | "os/signal" 7 | "strings" 8 | 9 | + "github.com/pkg/profile" 10 | + 11 | "google.golang.org/grpc" 12 | "gopkg.in/alecthomas/kingpin.v2" 13 | 14 | @@ -37,6 +39,8 @@ func check(err error) { 15 | } 16 | 17 | func main() { 18 | + defer profile.Start(profile.CPUProfile, profile.NoShutdownHook, profile.ProfilePath("/tmp")).Stop() 19 | + 20 | kingpin.Parse() 21 | 22 | r, err := dbconn.NewRedisConn(*redisAddr, *redisPwd, *redisDb) 23 | -------------------------------------------------------------------------------- /scripts/veritas-tso-zk.patch: -------------------------------------------------------------------------------- 1 | diff --git a/veritas/driver/driver.go b/veritas/driver/driver.go 2 | index a91d6ff..b929d65 100644 3 | --- a/veritas/driver/driver.go 4 | +++ b/veritas/driver/driver.go 5 | @@ -2,11 +2,12 @@ package driver 6 | 7 | import ( 8 | "context" 9 | + "fmt" 10 | 11 | "google.golang.org/grpc" 12 | 13 | pbv "hybrid/proto/veritas" 14 | - "hybrid/tso" 15 | + tso "hybrid/tso_zookeeper" 16 | "hybrid/veritas/db" 17 | ) 18 | 19 | @@ -18,6 +19,8 @@ type Driver struct { 20 | } 21 | 22 | func Open(serverAddr, tsoAddr, signature string) (*Driver, error) { 23 | + fmt.Println("Using Zookeeper TSO ...") 24 | + 25 | cc, err := grpc.Dial(serverAddr, grpc.WithInsecure()) 26 | if err != nil { 27 | return nil, err 28 | -------------------------------------------------------------------------------- /tso/README.md: -------------------------------------------------------------------------------- 1 | # Timestamp Oracle 2 | 3 | Timestamp Oracle is a golang implementation of timestamp service. It only support simple TS request function, which returns an auto-increment logic timestamp. 4 | 5 | The TS oracle can handle crash recovery by writing WAL to durable disk. 6 | 7 | The TS oracle has high performance by batching TS requests. Each client maintains only one in-flight TS request RPC. TS oracle also allocate TSs in batch, which reduces the WAL IO cost. 8 | 9 | ## Usage 10 | 11 | * Start Oracle Server 12 | ``` 13 | go run tso/examples/server.go --addr=":7070" 14 | ``` 15 | 16 | * Client stub 17 | 18 | The client stub is thread-safe. 19 | 20 | ```go 21 | client, err := tso.NewClient(":7070") 22 | if err != nil { 23 | log.Fatalln(err) 24 | } 25 | derfer client.Close() 26 | if ts, err := client.TS(); err != nil { 27 | log.Println("ts error") 28 | } else { 29 | ... 30 | } 31 | ``` 32 | -------------------------------------------------------------------------------- /tso/examples/benchmark.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "hybrid/tso" 6 | "sync" 7 | "time" 8 | 9 | "gopkg.in/alecthomas/kingpin.v2" 10 | ) 11 | 12 | var ( 13 | tsoAddr = kingpin.Flag("addr", "tso server address").Default(":7070").String() 14 | concurrency = kingpin.Flag("concurrency", "client num").Default("20").Int() 15 | reqNum = kingpin.Flag("req-num", "request num").Default("1000000").Int() 16 | ) 17 | 18 | func main() { 19 | kingpin.Parse() 20 | 21 | avaReqNum := *reqNum / (*concurrency) 22 | wg := &sync.WaitGroup{} 23 | for i := 0; i < *concurrency; i++ { 24 | wg.Add(1) 25 | go func() { 26 | defer wg.Done() 27 | 28 | cli, err := tso.NewClient(*tsoAddr) 29 | if err != nil { 30 | panic(err) 31 | } 32 | defer cli.Close() 33 | 34 | for j := 0; j < avaReqNum; j++ { 35 | if _, err := cli.TS(); err != nil { 36 | panic(err) 37 | } 38 | } 39 | }() 40 | } 41 | 42 | start := time.Now() 43 | wg.Wait() 44 | fmt.Printf("%v clients %v requests: %v req/s\n", *concurrency, *reqNum, int64(float64(*reqNum)/time.Since(start).Seconds())) 45 | } 46 | -------------------------------------------------------------------------------- /tso/examples/client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "gopkg.in/alecthomas/kingpin.v2" 7 | 8 | "hybrid/tso" 9 | ) 10 | 11 | var ( 12 | addr = kingpin.Flag("addr", "tso server address").Default(":7070").String() 13 | ) 14 | 15 | func main() { 16 | kingpin.Parse() 17 | 18 | client, err := tso.NewClient(*addr) 19 | if err != nil { 20 | panic(err) 21 | } 22 | if ts, err := client.TS(); err != nil { 23 | fmt.Println(err) 24 | } else { 25 | fmt.Println(ts) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /tso/examples/server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "os/signal" 7 | "runtime" 8 | "runtime/pprof" 9 | 10 | "gopkg.in/alecthomas/kingpin.v2" 11 | 12 | "hybrid/tso" 13 | ) 14 | 15 | var ( 16 | cpuProfile = kingpin.Flag("cpuProfile", "write cpu profile to file").Default("").String() 17 | address = kingpin.Flag("addr", "listen address").Default(":7070").String() 18 | ) 19 | 20 | func main() { 21 | kingpin.Parse() 22 | 23 | runtime.GOMAXPROCS(runtime.NumCPU()) 24 | log.SetFlags(log.LstdFlags | log.Lshortfile) 25 | 26 | if *cpuProfile != "" { 27 | f, err := os.Create(*cpuProfile) 28 | if err != nil { 29 | log.Fatal(err) 30 | } 31 | pprof.StartCPUProfile(f) 32 | interrupt := make(chan os.Signal, 1) 33 | signal.Notify(interrupt) 34 | go catchKill(interrupt) 35 | } 36 | 37 | log.Println("Timestamp Oracle Started") 38 | orc := tso.NewOracle(*address, 100000) 39 | orc.Recover() 40 | orc.WaitForClientConnections() 41 | } 42 | 43 | func catchKill(interrupt chan os.Signal) { 44 | <-interrupt 45 | if *cpuProfile != "" { 46 | pprof.StopCPUProfile() 47 | } 48 | log.Fatalln("Caught Signal") 49 | } 50 | -------------------------------------------------------------------------------- /tso/oracleclient.go: -------------------------------------------------------------------------------- 1 | package tso 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "log" 7 | "net" 8 | ) 9 | 10 | type Client struct { 11 | shutdown bool 12 | req chan chan int64 13 | conn net.Conn 14 | reader *bufio.Reader 15 | writer *bufio.Writer 16 | } 17 | 18 | func NewClient(address string) (*Client, error) { 19 | log.SetFlags(log.LstdFlags | log.Lshortfile) 20 | conn, err := net.Dial("tcp", address) 21 | if err != nil { 22 | return nil, err 23 | } 24 | 25 | cl := &Client{ 26 | shutdown: false, 27 | conn: conn, 28 | writer: bufio.NewWriter(conn), 29 | reader: bufio.NewReader(conn), 30 | req: make(chan chan int64, 100000), 31 | } 32 | go cl.start() 33 | 34 | return cl, nil 35 | } 36 | 37 | // Close the client after all TS responses are returned 38 | func (c *Client) Close() { 39 | c.shutdown = true 40 | } 41 | 42 | func (c *Client) TS() (int64, error) { 43 | if c.shutdown { 44 | return -1, errors.New("close") 45 | } 46 | ch := make(chan int64) 47 | c.req <- ch 48 | if ts := <-ch; ts >= 0 { 49 | return ts, nil 50 | } else { 51 | return -1, errors.New("invalid ts") 52 | } 53 | } 54 | 55 | func (c *Client) GetTS(num int32) (int64, error) { 56 | if c.shutdown { 57 | return -1, errors.New("already close") 58 | } 59 | 60 | getTs := &GetTS{num} 61 | 62 | c.writer.WriteByte(byte(GET)) 63 | getTs.Marshal(c.writer) 64 | c.writer.Flush() 65 | 66 | msgType, err := c.reader.ReadByte() 67 | if err != nil { 68 | return -1, err 69 | } 70 | switch uint8(msgType) { 71 | case REPLY: 72 | replyTs := new(ReplyTS) 73 | if err := replyTs.Unmarshal(c.reader); err != nil { 74 | return -1, err 75 | } 76 | return replyTs.Timestamp, nil 77 | default: 78 | return -1, errors.New("unknown msg type") 79 | } 80 | } 81 | 82 | func (c *Client) start() { 83 | for !c.shutdown { 84 | ch := <-c.req 85 | l := len(c.req) 86 | // batch count 87 | // log.Println(l) 88 | ts, err := c.GetTS(int32(l + 1)) 89 | if err != nil { 90 | log.Println("get ts error", err) 91 | c.shutdown = true 92 | c.conn.Close() 93 | break 94 | } 95 | ch <- ts 96 | for i := 1; i <= l; i++ { 97 | ch = <-c.req 98 | ch <- ts - int64(i) 99 | } 100 | } 101 | 102 | close(c.req) 103 | } 104 | -------------------------------------------------------------------------------- /tso/utils.go: -------------------------------------------------------------------------------- 1 | package tso 2 | 3 | const ( 4 | GET uint8 = iota 5 | REPLY 6 | ) 7 | 8 | type GetTS struct { 9 | Num int32 10 | } 11 | 12 | type ReplyTS struct { 13 | Timestamp int64 14 | } 15 | 16 | type LogTS struct { 17 | crc uint32 18 | ts int64 19 | } 20 | -------------------------------------------------------------------------------- /tso_zookeeper/tso.go: -------------------------------------------------------------------------------- 1 | package tso 2 | 3 | import ( 4 | // "fmt" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "github.com/go-zookeeper/zk" 10 | ) 11 | 12 | const PathPrefix = "/veritasts" 13 | 14 | type Client struct { 15 | conn *zk.Conn 16 | lock *sync.Mutex 17 | maxTS int64 18 | curTS int64 19 | path string 20 | } 21 | 22 | var singleton *Client 23 | var once sync.Once 24 | 25 | func NewClient(address string) (*Client, error) { 26 | once.Do(func() { 27 | zk_servers := make([]string, 1) 28 | zk_servers[0] = address 29 | conn, _, err := zk.Connect(zk_servers, 10000000000) 30 | 31 | if err != nil { 32 | singleton = nil 33 | panic(err) 34 | } 35 | 36 | // create a new unique path 37 | t := time.Now() 38 | path := fmt.Sprintf("%s-%s", PathPrefix, t.Format("20211010090930")) 39 | conn.Create(path, []byte{1, 2, 3, 4}, 0, zk.WorldACL(zk.PermAll)) 40 | 41 | singleton = &Client{ 42 | conn: conn, 43 | lock: &sync.Mutex{}, 44 | maxTS: 0, 45 | curTS: 0, 46 | path: path, 47 | } 48 | 49 | // async function for look-ahead counter 50 | go func() { 51 | for true { 52 | _, stat, err := conn.Get(path) 53 | if err != nil { 54 | fmt.Printf("TSO ZK Get error %v\n", err) 55 | continue 56 | } 57 | stat, err = conn.Set(path, []byte{1, 2, 3, 4}, stat.Version) 58 | if err != nil { 59 | fmt.Printf("TSO ZK Set error %v\n", err) 60 | continue 61 | } 62 | singleton.maxTS += 100 * int64(stat.Version) 63 | // fmt.Printf("Max TS %d Cur TS %d\n", singleton.maxTS, singleton.curTS) 64 | time.Sleep(750 * time.Millisecond) 65 | } 66 | }() 67 | }) 68 | 69 | return singleton, nil 70 | } 71 | 72 | // Close the client after all TS responses are returned 73 | func (c *Client) Close() { 74 | c.conn.Close() 75 | } 76 | 77 | // naive implementation 78 | func (c *Client) TSX() (int64, error) { 79 | c.lock.Lock() 80 | defer c.lock.Unlock() 81 | 82 | _, stat, err := c.conn.Get(c.path) 83 | if err != nil { 84 | fmt.Printf("TSO ZK Get error %v\n", err) 85 | return -1, err 86 | } 87 | 88 | stat, err = c.conn.Set(c.path, []byte{1, 2, 3, 4}, stat.Version) 89 | if err != nil { 90 | fmt.Printf("TSO ZK Set error %v\n", err) 91 | return -1, err 92 | } 93 | 94 | return int64(stat.Version), nil 95 | } 96 | 97 | // faster implementation 98 | func (c *Client) TS() (int64, error) { 99 | c.lock.Lock() 100 | defer c.lock.Unlock() 101 | 102 | if c.curTS < c.maxTS { 103 | ret := c.curTS 104 | c.curTS += 1 105 | return ret, nil 106 | } 107 | for c.curTS == c.maxTS { 108 | time.Sleep(100 * time.Millisecond) 109 | } 110 | ret := c.curTS 111 | c.curTS += 1 112 | return ret, nil 113 | } 114 | -------------------------------------------------------------------------------- /veritas/benchmark/util.go: -------------------------------------------------------------------------------- 1 | package benchmark 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "math/rand" 7 | "strings" 8 | ) 9 | 10 | const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 11 | const ( 12 | letterIdxBits = 6 // 6 bits to represent a letter index 13 | letterIdxMask = 1<= 10000 { 55 | break 56 | } 57 | */ 58 | } 59 | return nil 60 | } 61 | 62 | func GenRandString(n int) string { 63 | b := make([]byte, n) 64 | // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! 65 | for i, cache, remain := n-1, rand.Int63(), letterIdxMax; i >= 0; { 66 | if remain == 0 { 67 | cache, remain = rand.Int63(), letterIdxMax 68 | } 69 | if idx := int(cache & letterIdxMask); idx < len(letterBytes) { 70 | b[i] = letterBytes[idx] 71 | i-- 72 | } 73 | cache >>= letterIdxBits 74 | remain-- 75 | } 76 | return string(b) 77 | } 78 | -------------------------------------------------------------------------------- /veritas/config.go: -------------------------------------------------------------------------------- 1 | package veritas 2 | 3 | type Config struct { 4 | Signature string 5 | Topic string 6 | Parties map[string]struct{} 7 | BlockSize int 8 | } 9 | -------------------------------------------------------------------------------- /veritas/db/db.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | type DB interface { 4 | Get(key string) (value string, err error) 5 | Set(key, value string) error 6 | } 7 | -------------------------------------------------------------------------------- /veritas/db/transaction.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "context" 5 | 6 | pbv "hybrid/proto/veritas" 7 | ) 8 | 9 | type TransactionDB struct { 10 | ts int64 11 | signature string 12 | cli pbv.NodeClient 13 | setBuffer map[string]string 14 | } 15 | 16 | func NewTransaction(ts int64, cli pbv.NodeClient, signature string) *TransactionDB { 17 | return &TransactionDB{ 18 | ts: ts, 19 | signature: signature, 20 | cli: cli, 21 | setBuffer: make(map[string]string), 22 | } 23 | } 24 | 25 | func (db *TransactionDB) Get(key string) (string, error) { 26 | res, err := db.cli.Get(context.Background(), &pbv.GetRequest{ 27 | Key: key, 28 | Signature: db.signature, 29 | }) 30 | if err != nil { 31 | return "", err 32 | } 33 | return res.GetValue(), nil 34 | } 35 | 36 | func (db *TransactionDB) Set(key, value string) error { 37 | db.setBuffer[key] = value 38 | return nil 39 | } 40 | 41 | func (db *TransactionDB) Commit() error { 42 | for k, v := range db.setBuffer { 43 | if _, err := db.cli.Set(context.Background(), &pbv.SetRequest{ 44 | Signature: db.signature, 45 | Key: k, 46 | Value: v, 47 | Version: db.ts, 48 | }); err != nil { 49 | return err 50 | } 51 | } 52 | return nil 53 | } 54 | -------------------------------------------------------------------------------- /veritas/driver/driver.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "context" 5 | 6 | "google.golang.org/grpc" 7 | 8 | pbv "hybrid/proto/veritas" 9 | "hybrid/tso" 10 | "hybrid/veritas/db" 11 | ) 12 | 13 | type Driver struct { 14 | signature string 15 | cc *grpc.ClientConn 16 | dbCli pbv.NodeClient 17 | tsCli *tso.Client 18 | } 19 | 20 | func Open(serverAddr, tsoAddr, signature string) (*Driver, error) { 21 | cc, err := grpc.Dial(serverAddr, grpc.WithInsecure()) 22 | if err != nil { 23 | return nil, err 24 | } 25 | dbCli := pbv.NewNodeClient(cc) 26 | 27 | tsCli, err := tso.NewClient(tsoAddr) 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | return &Driver{ 33 | signature: signature, 34 | cc: cc, 35 | dbCli: dbCli, 36 | tsCli: tsCli, 37 | }, nil 38 | } 39 | 40 | func (d *Driver) Begin() (*db.TransactionDB, error) { 41 | ts, err := d.tsCli.TS() 42 | if err != nil { 43 | return nil, err 44 | } 45 | return db.NewTransaction(ts, d.dbCli, d.signature), nil 46 | } 47 | 48 | func (d *Driver) Get(ctx context.Context, key string) (string, error) { 49 | res, err := d.dbCli.Get(ctx, &pbv.GetRequest{ 50 | Signature: d.signature, 51 | Key: key, 52 | }) 53 | if err != nil { 54 | return "", err 55 | } 56 | return res.GetValue(), nil 57 | } 58 | 59 | func (d *Driver) Set(ctx context.Context, key, value string) error { 60 | ts, err := d.tsCli.TS() 61 | if err != nil { 62 | return err 63 | } 64 | if _, err := d.dbCli.Set(ctx, &pbv.SetRequest{ 65 | Signature: d.signature, 66 | Key: key, 67 | Value: value, 68 | Version: ts, 69 | }); err != nil { 70 | return err 71 | } 72 | return nil 73 | } 74 | 75 | func (d *Driver) Close() error { 76 | return d.cc.Close() 77 | } 78 | -------------------------------------------------------------------------------- /veritas/entry.go: -------------------------------------------------------------------------------- 1 | package veritas 2 | 3 | import ( 4 | jsoniter "github.com/json-iterator/go" 5 | ) 6 | 7 | var json = jsoniter.ConfigCompatibleWithStandardLibrary 8 | 9 | type Value struct { 10 | Val string 11 | Version int64 12 | } 13 | 14 | func Encode(val string, ts int64) (string, error) { 15 | v, err := json.Marshal(&Value{ 16 | Val: val, 17 | Version: ts, 18 | }) 19 | return string(v), err 20 | } 21 | 22 | func Decode(entry string) (*Value, error) { 23 | var v Value 24 | if err := json.Unmarshal([]byte(entry), &v); err != nil { 25 | return nil, err 26 | } 27 | return &v, nil 28 | } 29 | -------------------------------------------------------------------------------- /veritas/keylocker/mgr.go: -------------------------------------------------------------------------------- 1 | package keylocker 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | type KLocker interface { 9 | Lock(key string) 10 | Unlock(key string) 11 | } 12 | 13 | type KMutex struct { 14 | mu sync.Mutex 15 | locks map[string]*sync.Mutex 16 | counts map[string]int 17 | } 18 | 19 | func (km *KMutex) Lock(key string) { 20 | km.mu.Lock() 21 | if km.locks == nil { 22 | km.locks = make(map[string]*sync.Mutex) 23 | km.counts = make(map[string]int) 24 | } 25 | lock, ok := km.locks[key] 26 | if !ok { 27 | lock = &sync.Mutex{} 28 | km.locks[key] = lock 29 | } 30 | km.counts[key]++ 31 | km.mu.Unlock() 32 | lock.Lock() 33 | } 34 | 35 | func (km *KMutex) Unlock(key string) { 36 | km.mu.Lock() 37 | defer km.mu.Unlock() 38 | lock, ok := km.locks[key] 39 | if !ok || km.counts[key] == 0 { 40 | fmt.Printf("klocker: unlock unlocked kmutex of %#v\n", key) 41 | return 42 | } 43 | lock.Unlock() 44 | km.counts[key]-- 45 | if km.counts[key] == 0 { 46 | delete(km.locks, key) 47 | delete(km.counts, key) 48 | } 49 | } 50 | 51 | type locker struct { 52 | kl KLocker 53 | key string 54 | } 55 | 56 | func (l *locker) Lock() { l.kl.Lock(l.key) } 57 | func (l *locker) Unlock() { l.kl.Unlock(l.key) } 58 | 59 | func (km *KMutex) Locker(key string) sync.Locker { 60 | return &locker{kl: km, key: key} 61 | } 62 | -------------------------------------------------------------------------------- /veritas/ledger/merkletree/smt_test.go: -------------------------------------------------------------------------------- 1 | package merkletree 2 | 3 | import ( 4 | "crypto/sha256" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "path/filepath" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestBasicSparseMerkleTree(t *testing.T) { 15 | tmpDir, err := ioutil.TempDir(os.TempDir(), "test-basic") 16 | require.NoError(t, err) 17 | defer os.RemoveAll(tmpDir) 18 | 19 | store, err := NewBadgerStore(filepath.Join(tmpDir, "bs")) 20 | require.NoError(t, err) 21 | tree := NewSparseMerkleTree(store, sha256.New()) 22 | 23 | _, err = tree.Update([]byte("foo"), []byte("bar")) 24 | require.NoError(t, err) 25 | 26 | proof, _ := tree.Prove([]byte("foo")) 27 | root := tree.Root() 28 | 29 | if VerifyProof(proof, root, []byte("foo"), []byte("bar"), sha256.New()) { 30 | fmt.Println("Proof verification succeeded.") 31 | } else { 32 | fmt.Println("Proof verification failed.") 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /veritas/ledger/merkletree/store.go: -------------------------------------------------------------------------------- 1 | package merkletree 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "runtime" 7 | 8 | "github.com/dgraph-io/badger/v3" 9 | ) 10 | 11 | type KVStore interface { 12 | Get(key []byte) ([]byte, error) 13 | Set(key []byte, value []byte) error 14 | Close() error 15 | } 16 | 17 | type InvalidKeyError struct { 18 | Key []byte 19 | } 20 | 21 | func (e *InvalidKeyError) Error() string { 22 | return fmt.Sprintf("invalid key: %s", e.Key) 23 | } 24 | 25 | type SimpleMap struct { 26 | m map[string][]byte 27 | } 28 | 29 | func NewSimpleMap() *SimpleMap { 30 | return &SimpleMap{ 31 | m: make(map[string][]byte), 32 | } 33 | } 34 | 35 | func (sm *SimpleMap) Get(key []byte) ([]byte, error) { 36 | if value, ok := sm.m[string(key)]; ok { 37 | return value, nil 38 | } 39 | return nil, &InvalidKeyError{Key: key} 40 | } 41 | 42 | func (sm *SimpleMap) Set(key []byte, value []byte) error { 43 | sm.m[string(key)] = value 44 | return nil 45 | } 46 | 47 | func (sm *SimpleMap) Close() error { 48 | sm.m = nil 49 | runtime.GC() 50 | return nil 51 | } 52 | 53 | type BadgerStore struct { 54 | db *badger.DB 55 | } 56 | 57 | // NewBadgerStore creates a new empty BadgerStore. 58 | func NewBadgerStore(path string) (*BadgerStore, error) { 59 | db, err := badger.Open(badger.DefaultOptions(path)) 60 | if err != nil { 61 | return nil, err 62 | } 63 | return &BadgerStore{ 64 | db: db, 65 | }, nil 66 | } 67 | 68 | // Get gets the value for a key. 69 | func (bs *BadgerStore) Get(key []byte) ([]byte, error) { 70 | var value []byte 71 | 72 | if err := bs.db.View(func(txn *badger.Txn) error { 73 | item, err := txn.Get(key) 74 | if errors.Is(err, badger.ErrKeyNotFound) { 75 | return &InvalidKeyError{Key: key} 76 | } else if err != nil { 77 | return err 78 | } 79 | if err := item.Value(func(val []byte) error { 80 | value = append([]byte{}, val...) 81 | return nil 82 | }); err != nil { 83 | return err 84 | } 85 | return nil 86 | }); err != nil { 87 | return nil, err 88 | } 89 | 90 | return value, nil 91 | } 92 | 93 | // Set updates the value for a key. 94 | func (bs *BadgerStore) Set(key []byte, value []byte) error { 95 | return bs.db.Update(func(txn *badger.Txn) error { 96 | if err := txn.Set(key, value); err != nil { 97 | return err 98 | } 99 | return nil 100 | }) 101 | } 102 | 103 | // Close closes BadgerStore 104 | func (bs *BadgerStore) Close() error { 105 | return bs.db.Close() 106 | } 107 | -------------------------------------------------------------------------------- /veritas/ledger/merkletree/treehasher.go: -------------------------------------------------------------------------------- 1 | package merkletree 2 | 3 | import ( 4 | "bytes" 5 | "hash" 6 | ) 7 | 8 | var leafPrefix = []byte{0} 9 | var nodePrefix = []byte{1} 10 | 11 | type treeHasher struct { 12 | hasher hash.Hash 13 | } 14 | 15 | func newTreeHasher(hasher hash.Hash) *treeHasher { 16 | th := treeHasher{ 17 | hasher: hasher, 18 | } 19 | 20 | return &th 21 | } 22 | 23 | func (th *treeHasher) digest(data []byte) []byte { 24 | th.hasher.Write(data) 25 | sum := th.hasher.Sum(nil) 26 | th.hasher.Reset() 27 | return sum 28 | } 29 | 30 | func (th *treeHasher) path(key []byte) []byte { 31 | return th.digest(key) 32 | } 33 | 34 | func (th *treeHasher) digestLeaf(path []byte, leafData []byte) ([]byte, []byte) { 35 | value := make([]byte, len(leafPrefix)) 36 | copy(value, leafPrefix) 37 | 38 | value = append(value, path...) 39 | value = append(value, leafData...) 40 | 41 | th.hasher.Write(value) 42 | sum := th.hasher.Sum(nil) 43 | th.hasher.Reset() 44 | 45 | return sum, value 46 | } 47 | 48 | func (th *treeHasher) parseLeaf(data []byte) ([]byte, []byte) { 49 | return data[len(leafPrefix) : th.pathSize()+len(leafPrefix)], data[len(leafPrefix)+th.pathSize():] 50 | } 51 | 52 | func (th *treeHasher) isLeaf(data []byte) bool { 53 | return bytes.Equal(data[:len(leafPrefix)], leafPrefix) 54 | } 55 | 56 | func (th *treeHasher) digestNode(leftData []byte, rightData []byte) ([]byte, []byte) { 57 | value := make([]byte, len(nodePrefix)) 58 | copy(value, nodePrefix) 59 | 60 | value = append(value, leftData...) 61 | value = append(value, rightData...) 62 | 63 | th.hasher.Write(value) 64 | sum := th.hasher.Sum(nil) 65 | th.hasher.Reset() 66 | 67 | return sum, value 68 | } 69 | 70 | func (th *treeHasher) parseNode(data []byte) ([]byte, []byte) { 71 | return data[len(nodePrefix) : th.pathSize()+len(nodePrefix)], data[len(nodePrefix)+th.pathSize():] 72 | } 73 | 74 | func (th *treeHasher) pathSize() int { 75 | return th.hasher.Size() 76 | } 77 | 78 | func (th *treeHasher) placeholder() []byte { 79 | return bytes.Repeat([]byte{0}, th.pathSize()) 80 | } 81 | -------------------------------------------------------------------------------- /veritas/ledger/merkletree/utils.go: -------------------------------------------------------------------------------- 1 | package merkletree 2 | 3 | func hasBit(data []byte, position int) int { 4 | if int(data[position/8])&(1<<(uint(position)%8)) > 0 { 5 | return 1 6 | } 7 | return 0 8 | } 9 | 10 | func setBit(data []byte, position int) { 11 | n := int(data[position/8]) 12 | n |= 1 << (uint(position) % 8) 13 | data[position/8] = byte(n) 14 | } 15 | 16 | func countSetBits(data []byte) int { 17 | count := 0 18 | for i := 0; i < len(data)*8; i++ { 19 | if hasBit(data, i) == 1 { 20 | count++ 21 | } 22 | } 23 | return count 24 | } 25 | 26 | func countCommonPrefix(data1 []byte, data2 []byte) int { 27 | count := 0 28 | for i := 0; i < len(data1)*8; i++ { 29 | if hasBit(data1, i) == hasBit(data2, i) { 30 | count++ 31 | } else { 32 | break 33 | } 34 | } 35 | return count 36 | } 37 | 38 | func emptyBytes(length int) []byte { 39 | b := make([]byte, length) 40 | return b 41 | } 42 | -------------------------------------------------------------------------------- /veritas/ledger/storage_test.go: -------------------------------------------------------------------------------- 1 | package ledger 2 | 3 | import ( 4 | "io/ioutil" 5 | "log" 6 | "os" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestStateDigestComputation(t *testing.T) { 13 | dir, err := ioutil.TempDir("", "prefix1") 14 | if err != nil { 15 | log.Fatal(err) 16 | } 17 | ledger1, err := NewLedger(dir, true) 18 | if err != nil { 19 | log.Fatal(err) 20 | } 21 | ledger1.Append([]byte("A"), []byte("A")) 22 | ledger1.Append([]byte("B"), []byte("B")) 23 | ledger1.Append([]byte("C"), []byte("C")) 24 | ledger1Digest := ledger1.GetRootDigest() 25 | ledger1.Close() 26 | 27 | // Create another instance to test whether previous appended states are loaded properly from the persistence and reach the identical root digest. 28 | ledger2, err := NewLedger(dir, true) 29 | if err != nil { 30 | log.Fatal(err) 31 | } 32 | ledger2Digest := ledger2.GetRootDigest() 33 | assert.Equal(t, ledger1Digest, ledger2Digest) 34 | 35 | ledger2.AppendBlk([]byte("block111")) 36 | ledger2.AppendBlk([]byte("block222")) 37 | ledger2.AppendBlk([]byte("block333")) 38 | ledger2AfterAppendingBlkDigest := ledger2.GetRootDigest() 39 | // Verify the block appending does not interfere the state digest computation. 40 | assert.Equal(t, ledger2AfterAppendingBlkDigest, ledger2Digest) 41 | ledger2.Close() 42 | 43 | // Verify the non-interference after reloading db. 44 | ledger3, err := NewLedger(dir, true) 45 | if err != nil { 46 | log.Fatal(err) 47 | } 48 | ledger3Digest := ledger3.GetRootDigest() 49 | assert.Equal(t, ledger3Digest, ledger2AfterAppendingBlkDigest) 50 | ledger3.Close() 51 | defer os.RemoveAll(dir) 52 | } 53 | -------------------------------------------------------------------------------- /veritas/queue.go: -------------------------------------------------------------------------------- 1 | package veritas 2 | 3 | import ( 4 | "container/heap" 5 | "sync" 6 | ) 7 | 8 | type PriorityQueue struct { 9 | mu *sync.RWMutex 10 | nodes []*PqNode 11 | } 12 | 13 | type PqNode struct { 14 | Value string 15 | Priority int 16 | index int 17 | } 18 | 19 | func NewPriorityQueue() *PriorityQueue { 20 | pq := &PriorityQueue{mu: new(sync.RWMutex)} 21 | heap.Init(pq) 22 | return pq 23 | } 24 | 25 | func (pq *PriorityQueue) Put(v *PqNode) { 26 | defer pq.mu.Unlock() 27 | pq.mu.Lock() 28 | heap.Push(pq, v) 29 | } 30 | 31 | func (pq *PriorityQueue) Get() (interface{}, bool) { 32 | defer pq.mu.Unlock() 33 | pq.mu.Lock() 34 | if len(pq.nodes) > 0 { 35 | item := heap.Pop(pq) 36 | return item, true 37 | } 38 | return nil, false 39 | } 40 | 41 | func (pq PriorityQueue) Size() int { 42 | defer pq.mu.RUnlock() 43 | pq.mu.RLock() 44 | return len(pq.nodes) 45 | } 46 | 47 | func (pq *PriorityQueue) IsEmpty() bool { 48 | defer pq.mu.RUnlock() 49 | pq.mu.RLock() 50 | return !(len(pq.nodes) > 0) 51 | } 52 | 53 | func (pq PriorityQueue) Len() int { 54 | return len(pq.nodes) 55 | } 56 | 57 | func (pq PriorityQueue) Less(i, j int) bool { 58 | return pq.nodes[i].Priority > pq.nodes[j].Priority 59 | } 60 | 61 | func (pq PriorityQueue) Swap(i, j int) { 62 | pq.nodes[i], pq.nodes[j] = pq.nodes[j], pq.nodes[i] 63 | pq.nodes[i].index, pq.nodes[j].index = i, j 64 | } 65 | 66 | func (pq *PriorityQueue) Push(v interface{}) { 67 | item := v.(*PqNode) 68 | item.index = len(pq.nodes) 69 | pq.nodes = append(pq.nodes, item) 70 | } 71 | 72 | func (pq *PriorityQueue) Pop() interface{} { 73 | old := *pq 74 | n := len(old.nodes) 75 | item := old.nodes[n-1] 76 | item.index = -1 77 | pq.nodes = old.nodes[0 : n-1] 78 | return item 79 | } 80 | -------------------------------------------------------------------------------- /veritastm/config.go: -------------------------------------------------------------------------------- 1 | package veritastm 2 | 3 | type Config struct { 4 | Signature string 5 | Parties map[string]struct{} 6 | BlockSize int 7 | LedgerPath string 8 | ABCIRPCAddr string 9 | } 10 | -------------------------------------------------------------------------------- /veritastm/driver.go: -------------------------------------------------------------------------------- 1 | package veritastm 2 | 3 | import ( 4 | "context" 5 | 6 | "google.golang.org/grpc" 7 | 8 | pbv "hybrid/proto/veritas" 9 | ) 10 | 11 | type Driver struct { 12 | signature string 13 | cc *grpc.ClientConn 14 | dbCli pbv.NodeClient 15 | } 16 | 17 | func Open(serverAddr, signature string) (*Driver, error) { 18 | cc, err := grpc.Dial(serverAddr, grpc.WithInsecure()) 19 | if err != nil { 20 | return nil, err 21 | } 22 | dbCli := pbv.NewNodeClient(cc) 23 | 24 | return &Driver{ 25 | signature: signature, 26 | cc: cc, 27 | dbCli: dbCli, 28 | }, nil 29 | } 30 | 31 | func (d *Driver) Get(ctx context.Context, key string) (string, error) { 32 | res, err := d.dbCli.Get(ctx, &pbv.GetRequest{ 33 | Signature: d.signature, 34 | Key: key, 35 | }) 36 | if err != nil { 37 | return "", err 38 | } 39 | return res.GetValue(), nil 40 | } 41 | 42 | func (d *Driver) Set(ctx context.Context, key, value string) (string, error) { 43 | res, err := d.dbCli.Set(ctx, &pbv.SetRequest{ 44 | Signature: d.signature, 45 | Key: key, 46 | Value: value, 47 | }) 48 | 49 | if err != nil { 50 | return "", err 51 | } 52 | 53 | return res.Txid, nil 54 | } 55 | 56 | func (d *Driver) Close() error { 57 | return d.cc.Close() 58 | } 59 | -------------------------------------------------------------------------------- /veritastm/test/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "hybrid/veritas/benchmark" 7 | "hybrid/veritastm" 8 | 9 | "gopkg.in/alecthomas/kingpin.v2" 10 | ) 11 | 12 | var ( 13 | veritasAddrs = kingpin.Flag("veritas-addrs", "Address of veritas node").Required().String() 14 | ) 15 | 16 | func main() { 17 | ctx := context.Background() 18 | 19 | kingpin.Parse() 20 | 21 | cli, err := veritastm.Open(*veritasAddrs, benchmark.GenRandString(16)) 22 | if err != nil { 23 | fmt.Printf("Error %v\n", err) 24 | } 25 | defer cli.Close() 26 | 27 | res, err := cli.Set(ctx, "abc", "xyz") 28 | fmt.Printf("Set error %v\n", err) 29 | fmt.Printf("Set result %v\n", res) 30 | 31 | res, err = cli.Get(ctx, "abc") 32 | fmt.Printf("Get key %v value %v\n", "abc", res) 33 | } 34 | -------------------------------------------------------------------------------- /veritastm/test/test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nusdbsystem/Hybrid-Blockchain-Database-Systems/948f560f36f5af215c291e46f118340fc838eac1/veritastm/test/test -------------------------------------------------------------------------------- /veritastm/util.go: -------------------------------------------------------------------------------- 1 | package veritastm 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "math/rand" 7 | "strings" 8 | ) 9 | 10 | const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 11 | const ( 12 | letterIdxBits = 6 // 6 bits to represent a letter index 13 | letterIdxMask = 1<= 0; { 59 | if remain == 0 { 60 | cache, remain = rand.Int63(), letterIdxMax 61 | } 62 | if idx := int(cache & letterIdxMask); idx < len(letterBytes) { 63 | b[i] = letterBytes[idx] 64 | i-- 65 | } 66 | cache >>= letterIdxBits 67 | remain-- 68 | } 69 | return string(b) 70 | } 71 | --------------------------------------------------------------------------------