├── code ├── cmd │ ├── __init__.py │ ├── rcmd.py │ ├── bitcoincmd.py │ ├── tccmd.py │ └── dockercmd.py ├── simulationfiles │ ├── __init__.py │ ├── zone.py │ ├── checkargs.py │ ├── ticks_config.py │ ├── nodes_config.py │ └── network_config.py ├── requirements.txt ├── tests │ ├── test_dockercmd.py │ ├── test_bitcoincmd.py │ ├── test_bash.py │ ├── test_zone.py │ ├── test_systemmonitor.py │ ├── test_write.py │ ├── test_tccmd.py │ ├── test_postprocessing.py │ ├── test_nodes_config.py │ ├── test_node.py │ ├── test_checkargs.py │ ├── test_utils.py │ ├── test_ticks_config.py │ ├── test_prepare.py │ ├── test_clistats.py │ ├── test_network_config.py │ ├── test_event.py │ └── test_parse.py ├── run_cmd.py ├── bash.py ├── docker │ └── Dockerfile ├── chunker.py ├── write.py ├── reporter │ ├── preprocess.R │ └── report.Rmd ├── context.py ├── systemmonitor.py ├── simulation_cmd.py ├── config.py ├── event.py ├── simcoin.py ├── runner.py ├── clistats.py ├── utils.py ├── multirun_cmd.py ├── postprocessing.py ├── prepare.py ├── node.py └── parse.py ├── graphics ├── sba_logo.jpg └── netidee_logo_scholarship.jpg ├── .gitlab-ci.yml ├── .vscode ├── tasks.json └── launch.json ├── LICENSE ├── important_commands.txt ├── .gitignore ├── Makefile ├── README.md └── CODE_OF_CONDUCT.md /code/cmd/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /code/simulationfiles/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /graphics/sba_logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbaresearch/simcoin/HEAD/graphics/sba_logo.jpg -------------------------------------------------------------------------------- /code/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.13.3 2 | mock==2.0.0 3 | pandas==0.21.0 4 | python-bitcoinlib==0.8.0 5 | -------------------------------------------------------------------------------- /graphics/netidee_logo_scholarship.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sbaresearch/simcoin/HEAD/graphics/netidee_logo_scholarship.jpg -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | before_script: 2 | - python3 --version 3 | - pip3 install -r code/requirements.txt 4 | 5 | unittest: 6 | script: 7 | - cd code 8 | - python3 -m unittest discover -s tests 9 | -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | // See https://go.microsoft.com/fwlink/?LinkId=733558 3 | // for the documentation about the tasks.json format 4 | "version": "0.1.0", 5 | "command": "python", 6 | "isShellCommand": true, 7 | "args": ["${workspaceRoot}/setup.py"], 8 | "showOutput": "always" 9 | } -------------------------------------------------------------------------------- /code/tests/test_dockercmd.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from cmd import dockercmd 3 | from node import Node 4 | import ipaddress 5 | import config 6 | 7 | 8 | class TestDockercmd(TestCase): 9 | 10 | def test_run_node(self): 11 | cmd = dockercmd.run_node('node-1', '1.1.1.1', 'image', 'cmd', '/path') 12 | 13 | self.assertTrue(' ' not in cmd) 14 | -------------------------------------------------------------------------------- /code/run_cmd.py: -------------------------------------------------------------------------------- 1 | from simulationfiles import nodes_config 2 | from simulationfiles import ticks_config 3 | from simulationfiles import network_config 4 | import simulation_cmd 5 | 6 | 7 | def run(): 8 | nodes_config.create(unknown_arguments=True) 9 | ticks_config.create(unknown_arguments=True) 10 | network_config.create(unknown_arguments=True) 11 | 12 | simulation_cmd.run(unknown_arguments=True) 13 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Python Console App", 6 | "type": "python", 7 | "request": "launch", 8 | "stopOnEntry": false, 9 | "program": "${workspaceRoot}/setup.py", 10 | "externalConsole": true, 11 | "debugOptions": [ 12 | "WaitOnAbnormalExit", 13 | "WaitOnNormalExit", 14 | "RedirectOutput" 15 | ] 16 | } 17 | ] 18 | } -------------------------------------------------------------------------------- /code/tests/test_bitcoincmd.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from cmd import bitcoincmd 3 | from mock import MagicMock 4 | import config 5 | 6 | 7 | class TestBitcoincmd(TestCase): 8 | 9 | def test_start(self): 10 | cmd = bitcoincmd.start('node-1', '1.1.1.1', 'image', '/path', ['ip1', 'ip2']) 11 | 12 | self.assertTrue(' ' not in cmd) 13 | 14 | def test_rm_peers(self): 15 | cmd = bitcoincmd.rm_peers('node') 16 | 17 | self.assertTrue(' ' not in cmd) 18 | self.assertEqual(cmd, 'docker exec simcoin-node rm -f {}/regtest/peers.dat'.format(config.bitcoin_data_dir)) 19 | -------------------------------------------------------------------------------- /code/simulationfiles/zone.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | import config 3 | from collections import namedtuple 4 | 5 | 6 | class Zone: 7 | def __init__(self): 8 | self.zones = {} 9 | self.counter = 0 10 | 11 | def get_ip(self, latency): 12 | if latency not in self.zones: 13 | self.counter += 1 14 | 15 | network = ipaddress.ip_network(config.ip_zones.format(self.counter)) 16 | self.zones[latency] = ZoneConfig(network, network.hosts(), latency) 17 | return next(self.zones[latency].hosts) 18 | 19 | 20 | ZoneConfig = namedtuple('ZoneConfig', 'network hosts latency') 21 | -------------------------------------------------------------------------------- /code/bash.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import subprocess 4 | 5 | 6 | def check_output(cmd, lvl=logging.INFO): 7 | output = check_output_without_log(cmd) 8 | for line in output.splitlines(): 9 | logging.log(lvl, line.strip()) 10 | return output 11 | 12 | 13 | def check_output_without_log(cmd): 14 | logging.info(cmd) 15 | output = subprocess.check_output(cmd, shell=True, executable='/bin/bash') 16 | encoded_output = output.decode('utf-8').rstrip() 17 | return encoded_output 18 | 19 | 20 | def call_silent(cmd): 21 | logging.info(cmd) 22 | with open(os.devnull, 'w') as devnull: 23 | return subprocess.call(cmd, shell=True, executable='/bin/bash', stderr=devnull, stdout=devnull) 24 | -------------------------------------------------------------------------------- /code/cmd/rcmd.py: -------------------------------------------------------------------------------- 1 | import config 2 | 3 | 4 | def preprocess(path): 5 | cp_preprocess_r_cmd = 'cp reporter/{} {}'.format(config.preprocess_r_file_name, path) 6 | change_dir = 'cd {}'.format(path) 7 | preprocess_cmd = 'Rscript {}'.format(config.preprocess_r_file_name) 8 | return ';'.join([cp_preprocess_r_cmd, change_dir, preprocess_cmd]) 9 | 10 | 11 | def create_report(path): 12 | cp_report_rmd_cmd = 'cp reporter/{} {}'.format(config.report_rmd_file_name, path) 13 | change_dir = 'cd {}'.format(path) 14 | create_report_cmd = r'R -e library\(rmarkdown\)\;rmarkdown::render\(\"{}\",\"pdf_document\"\)\;q\(\)'\ 15 | .format(config.report_rmd_file_name) 16 | return ';'.join([cp_report_rmd_cmd, change_dir, create_report_cmd]) 17 | -------------------------------------------------------------------------------- /code/tests/test_bash.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | import bash 3 | from mock import patch 4 | from mock import mock_open 5 | 6 | 7 | class TestBash(TestCase): 8 | 9 | def __init__(self, *args, **kwargs): 10 | super(TestBash, self).__init__(*args, **kwargs) 11 | 12 | @patch('subprocess.check_output') 13 | def test_check_output(self, mock): 14 | mock.return_value = b'test\ntest\ttest\t\n\n' 15 | 16 | output = bash.check_output('cmd') 17 | 18 | self.assertEqual(output, 'test\ntest\ttest') 19 | 20 | @patch("builtins.open", mock_open()) 21 | @patch('subprocess.call') 22 | def test_call_silent(self, mock): 23 | mock.return_value = b'test' 24 | output = bash.call_silent('cmd') 25 | 26 | self.assertTrue(str(output), 'test') 27 | -------------------------------------------------------------------------------- /code/simulationfiles/checkargs.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | 4 | def check_positive(value): 5 | if value < 0: 6 | raise argparse.ArgumentTypeError("%s is an invalid positive value" % value) 7 | return value 8 | 9 | 10 | def check_percentage(value): 11 | float_value = float(value) 12 | if float_value < 0 or float_value > 1: 13 | raise argparse.ArgumentTypeError("%s is an invalid percentage value [0,1]" % value) 14 | return float_value 15 | 16 | 17 | def check_positive_float(value): 18 | float_value = float(value) 19 | check_positive(float_value) 20 | return float_value 21 | 22 | 23 | def check_positive_int(value): 24 | int_value = int(value) 25 | if str(int_value) != value: 26 | raise argparse.ArgumentTypeError("%s is an invalid integer" % value) 27 | check_positive(int_value) 28 | return int_value 29 | -------------------------------------------------------------------------------- /code/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM library/ubuntu:xenial-20170119 2 | RUN \ 3 | apt-get update &&\ 4 | apt-get -y install build-essential libtool autotools-dev automake pkg-config libssl-dev libevent-dev bsdmainutils &&\ 5 | apt-get -y install libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-program-options-dev libboost-test-dev libboost-thread-dev &&\ 6 | apt-get -y install software-properties-common &&\ 7 | add-apt-repository ppa:bitcoin/bitcoin &&\ 8 | apt-get -y update &&\ 9 | apt-get -y install libdb4.8-dev libdb4.8++-dev &&\ 10 | 11 | apt-get -y install git 12 | 13 | RUN git clone https://github.com/simonmulser/bitcoin.git 14 | WORKDIR "/bitcoin" 15 | RUN git checkout simcoin 16 | 17 | RUN ./autogen.sh 18 | RUN ./configure 19 | 20 | RUN make 21 | # multi-threaded 22 | #RUN make -j4 23 | 24 | ENV PATH /bitcoin/src:$PATH 25 | RUN mkdir /data 26 | 27 | EXPOSE 18332 28 | -------------------------------------------------------------------------------- /code/tests/test_zone.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | import ipaddress 3 | from simulationfiles.zone import Zone 4 | 5 | 6 | class TestZone(TestCase): 7 | 8 | def setUp(self): 9 | self.zone = Zone() 10 | 11 | def test_get_ip(self): 12 | 13 | ip = self.zone.get_ip(100) 14 | 15 | self.assertEqual(ip, ipaddress.IPv4Address('240.1.0.1')) 16 | self.assertEqual(self.zone.zones[100].latency, 100) 17 | self.assertEqual(self.zone.zones[100].network, ipaddress.ip_network('240.1.0.0/16')) 18 | 19 | def test_get_ip_second_time_same_latency(self): 20 | 21 | self.zone.get_ip(100) 22 | ip = self.zone.get_ip(100) 23 | 24 | self.assertEqual(ip, ipaddress.IPv4Address('240.1.0.2')) 25 | 26 | def test_get_ip_second_time_different_latency(self): 27 | 28 | self.zone.get_ip(100) 29 | ip = self.zone.get_ip(0) 30 | 31 | self.assertEqual(ip, ipaddress.IPv4Address('240.2.0.1')) 32 | -------------------------------------------------------------------------------- /code/tests/test_systemmonitor.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from systemmonitor import CpuTimeSnapshot 3 | from systemmonitor import MemorySnapshot 4 | 5 | 6 | class TestBash(TestCase): 7 | 8 | def test_cpu_time_snapshot_from_bash(self): 9 | cpu_time = 'cpu 6993159 247853 1473357 6905504 50921 0 102406 0 0 0' 10 | 11 | snapshot = CpuTimeSnapshot.from_bash(cpu_time) 12 | 13 | self.assertEqual(snapshot._user, '6993159') 14 | self.assertEqual(snapshot._nice, '247853') 15 | self.assertEqual(snapshot._system, '1473357') 16 | self.assertEqual(snapshot._idle, '6905504') 17 | 18 | def test_memory_snapshot_from_bash(self): 19 | memory = 'MemTotal: 7577060 kB\nMemFree: 8316 kB\nMemAvailable: 1568016 kB' 20 | 21 | snapshot = MemorySnapshot.from_bash(memory) 22 | 23 | self.assertEqual(snapshot._total, '7577060') 24 | self.assertEqual(snapshot._available, '1568016') 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Simon Mulser 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /code/tests/test_write.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from mock import patch 3 | from mock import mock_open 4 | from mock import Mock 5 | from write import Writer 6 | import config 7 | 8 | 9 | class TestUtils(TestCase): 10 | 11 | def setUp(self): 12 | self.writer = Writer('test_tag') 13 | 14 | @patch('fcntl.flock', lambda file, lock: None) 15 | @patch('builtins.open', new_callable=mock_open) 16 | def test_write_csv(self, m_open): 17 | elements = Mock() 18 | elements.vars_to_array.return_value = ['content_1', 'content_2'] 19 | self.writer.write_csv('file.name', ['header_1', 'header_2'], [elements]) 20 | 21 | self.assertEqual(m_open.call_count, 2) 22 | self.assertEqual(m_open.call_args_list[0][0], (config.postprocessing_dir + 'file.name', 'w')) 23 | self.assertEqual(m_open.call_args_list[1][0], (config.postprocessing_dir + 'file.name', 'a')) 24 | 25 | handle = m_open() 26 | self.assertEqual(handle.write.call_args_list[0][0][0], 'header_1,header_2,tag\r\n') 27 | self.assertEqual(handle.write.call_args_list[1][0][0], 'content_1,content_2,test_tag\r\n') 28 | -------------------------------------------------------------------------------- /code/tests/test_tccmd.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | import ipaddress 3 | from cmd import tccmd 4 | from simulationfiles.zone import ZoneConfig 5 | 6 | 7 | class TestTccmd(TestCase): 8 | 9 | def test_create(self): 10 | zones = { 11 | 0: ZoneConfig(ipaddress.ip_network('240.1.0.0/16'), ipaddress.ip_network('240.1.0.0/16').hosts(), 0), 12 | 100: ZoneConfig(ipaddress.ip_network('240.2.0.0/16'), ipaddress.ip_network('240.2.0.0/16').hosts(), 100), 13 | 200: ZoneConfig(ipaddress.ip_network('240.3.0.0/16'), ipaddress.ip_network('240.3.0.0/16').hosts(), 200), 14 | } 15 | 16 | cmds = tccmd.create('node-0', zones, 100)[0] 17 | 18 | self.assertTrue('add dev eth0' in cmds) 19 | self.assertTrue('u32 match ip dst 240.1.0.0/16 flowid 1:2' in cmds) 20 | self.assertTrue('u32 match ip dst 240.2.0.0/16 flowid 1:3' in cmds) 21 | self.assertTrue('u32 match ip dst 240.3.0.0/16 flowid 1:4' in cmds) 22 | self.assertTrue('1:1 handle 10: netem delay 0ms' in cmds) 23 | self.assertTrue('1:2 handle 20: netem delay 100ms' in cmds) 24 | self.assertTrue('1:3 handle 30: netem delay 100ms' in cmds) 25 | self.assertTrue('1:4 handle 40: netem delay 300ms' in cmds) 26 | -------------------------------------------------------------------------------- /code/chunker.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | # http://www.blopig.com/blog/2016/08/processing-large-files-using-python-part-duex/ 5 | class Chunker(object): 6 | 7 | # Iterator that yields start and end locations of a file chunk of default size 1MB. 8 | @classmethod 9 | def chunkify(cls, file_name, size=1024*1024): 10 | file_end = os.path.getsize(file_name) 11 | with open(file_name, 'rb') as file: 12 | chunk_end = file.tell() 13 | while True: 14 | chunk_start = chunk_end 15 | file.seek(size, 1) 16 | cls._EOC(file) 17 | chunk_end = file.tell() 18 | yield chunk_start, chunk_end - chunk_start 19 | if chunk_end >= file_end: 20 | break 21 | 22 | # Move file pointer to end of chunk 23 | @staticmethod 24 | def _EOC(file): 25 | file.readline() 26 | 27 | # read chunk 28 | @staticmethod 29 | def read(file_name, chunk): 30 | with open(file_name, 'r') as file: 31 | file.seek(chunk[0]) 32 | return file.read(chunk[1]) 33 | 34 | # iterator that splits a chunk into units 35 | @staticmethod 36 | def parse(chunk): 37 | for line in chunk.splitlines(): 38 | yield line 39 | -------------------------------------------------------------------------------- /code/write.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import csv 3 | import fcntl 4 | import config 5 | 6 | 7 | class Writer: 8 | def __init__(self, tag): 9 | self._tag = tag 10 | 11 | def write_csv(self, file_name, header, elements): 12 | write_header_csv(file_name, header) 13 | self.append_csv(file_name, elements) 14 | 15 | def append_csv(self, file_name, elements): 16 | with open(config.postprocessing_dir + file_name, 'a') as file: 17 | logging.debug('Waiting for lock to write to file={}'.format(file_name)) 18 | fcntl.flock(file, fcntl.LOCK_EX) 19 | logging.debug('Received lock for writing to file={}'.format(file_name)) 20 | 21 | w = csv.writer(file) 22 | for element in elements: 23 | row = element.vars_to_array() 24 | row.append(self._tag) 25 | w.writerow(row) 26 | 27 | 28 | def write_header_csv(file_name, header): 29 | with open(config.postprocessing_dir + file_name, 'w') as file: 30 | logging.debug('Waiting for lock to write to file={}'.format(file_name)) 31 | fcntl.flock(file, fcntl.LOCK_EX) 32 | logging.debug('Received lock for writing to file={}'.format(file_name)) 33 | 34 | w = csv.writer(file) 35 | w.writerow(header + ['tag']) 36 | -------------------------------------------------------------------------------- /code/tests/test_postprocessing.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from mock import patch 3 | from mock import mock_open 4 | from postprocessing import PostProcessing 5 | from mock import MagicMock 6 | import postprocessing 7 | from textwrap import dedent 8 | 9 | 10 | class TestPostProcessing(TestCase): 11 | def setUp(self): 12 | self.context = MagicMock() 13 | self.writer = MagicMock() 14 | self.postprocessing = PostProcessing(self.context, self.writer) 15 | 16 | def test_cut_log(self): 17 | data = dedent(""" 18 | line1 19 | line2 start 20 | line3 21 | line4 end 22 | line5 23 | """).strip() 24 | 25 | m = mock_open(read_data=''.join(data)) 26 | m.return_value.__iter__ = lambda self: self 27 | m.return_value.__next__ = lambda self: next(iter(self.readline, '')) 28 | with patch('builtins.open', m) as m_open: 29 | postprocessing._extract_from_file('source_file', 'destination_file', 'start', 'end') 30 | 31 | self.assertEqual(m_open.call_count, 2) 32 | self.assertEqual(m_open.call_args_list[0][0][0], 'source_file') 33 | self.assertEqual(m_open.call_args_list[1][0][0], 'destination_file') 34 | 35 | handle = m_open() 36 | self.assertEqual(handle.write.call_args_list[0][0][0], 'line2 start\n') 37 | self.assertEqual(handle.write.call_args_list[1][0][0], 'line3\n') 38 | self.assertEqual(handle.write.call_args_list[2][0][0], 'line4 end\n') 39 | -------------------------------------------------------------------------------- /code/tests/test_nodes_config.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from simulationfiles import nodes_config 3 | from simulationfiles.nodes_config import NodeConfig 4 | from mock import patch 5 | 6 | 7 | class TestNodesConfig(TestCase): 8 | 9 | def test_check_if_share_sum_is_1_false(self): 10 | nodes = [NodeConfig('group', 'node-0', 0.4, 0, None), NodeConfig('group', 'node-1', 0.4, 0, None)] 11 | 12 | with self.assertRaises(SystemExit) as cm: 13 | nodes_config._check_if_share_sum_is_1(nodes) 14 | 15 | self.assertEqual(cm.exception.code, -1) 16 | 17 | def test_check_if_share_sum_is_1_true(self): 18 | nodes = [NodeConfig('group', 'node-0', 0.4, 0, None), NodeConfig('group', 'node-1', 0.6, 0, None)] 19 | 20 | nodes_config._check_if_share_sum_is_1(nodes) 21 | 22 | @patch('bash.call_silent') 23 | def test_check_if_image_exists(self, m_call_silent): 24 | node_args = ['a', 'b', 'c', 'd', 'image'] 25 | m_call_silent.return_value = 0 26 | 27 | nodes_config._check_if_image_exists(node_args) 28 | 29 | self.assertTrue(m_call_silent.called) 30 | self.assertTrue(m_call_silent.call_args[0][0], 'docker inspect image') 31 | 32 | @patch('bash.call_silent') 33 | def test_check_if_image_exists_image_does_not_exists(self, m_call_silent): 34 | node_args = ['a', 'b', 'c', 'd', 'image'] 35 | m_call_silent.return_value = -1 36 | 37 | with self.assertRaises(SystemExit) as context: 38 | nodes_config._check_if_image_exists(node_args) 39 | 40 | self.assertEqual(context.exception.code, -1) 41 | -------------------------------------------------------------------------------- /important_commands.txt: -------------------------------------------------------------------------------- 1 | // docker command to test delay between to containers 2 | docker run -it --cap-add=NET_ADMIN ubuntu /bin/bash 3 | apt-get update; apt-get -y install iputils-ping; apt-get -y install net-tools; apt-get -y install iproute 4 | 5 | // tc command to simulate network delay 6 | tc qdisc del dev eth0 root 7 | tc qdisc add dev eth0 root handle 1: prio bands 2 priomap 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 | tc filter add dev eth0 protocol ip parent 1: prio 1 u32 match ip dport 240.0.0.2 flowid 1:2 9 | tc qdisc add dev eth0 parent 1:1 handle 10: netem delay 0ms 10 | tc qdisc add dev eth0 parent 1:2 handle 20: netem delay 1000ms 11 | 12 | // bash command to run simulation multiple times 13 | array=(0.5 1); for i in "${array[@]}"; do python3 main.py --tick-duration $i; done 14 | 15 | // rsync command to sync files on server 16 | rsync --dry-run --delete -r --verbose -e="ssh -p 2222" --filter=':- .gitignore' --exclude=data --exclude='.git*' ./ simon@128.131.169.35:simcoin 17 | 18 | // ssh command with port forwarding of cockpit 19 | ssh -p 2222 -L 9099:localhost:9090 simon@128.131.169.35 20 | 21 | // google perfomance tool 22 | https://wiki.geany.org/howtos/profiling/gperftools 23 | google-pprof ../patched_bitcoin_client_cpp/src/bitcoind ~/Desktop/bitcoin-1.prof 24 | 25 | // check kernel messages 26 | dmesg -w 27 | 28 | // increase arp cache 29 | sudo sysctl -w net.ipv4.neigh.default.gc_thresh1=16384 30 | sudo sysctl -w net.ipv4.neigh.default.gc_thresh2=32768 31 | sudo sysctl -w net.ipv4.neigh.default.gc_thresh3=65536 32 | 33 | // check arp cache length 34 | watch -n3 "arp -n | wc -l" 35 | 36 | // check sockets 37 | watch -n5 ss -s -a -t 38 | -------------------------------------------------------------------------------- /code/reporter/preprocess.R: -------------------------------------------------------------------------------- 1 | library(dplyr) 2 | 3 | args <- read.csv('../args.csv', stringsAsFactors=FALSE) 4 | 5 | tick_infos <- read.csv("tick_infos.csv") 6 | tick_infos <- tick_infos %>% arrange(actual_start) %>% mutate(end = actual_start + duration) 7 | write.csv(tick_infos, 'tick_infos.csv', row.names=FALSE, quote=FALSE) 8 | 9 | ticks <- readLines(file('../ticks.csv')) 10 | if (args$skip_ticks == 0){ 11 | analysed_tick_infos <- tick_infos 12 | analysed_ticks <- ticks 13 | } else { 14 | analysed_tick_infos <- head(tail(tick_infos, -args$skip_ticks), -args$skip_ticks) 15 | analysed_ticks <- head(tail(ticks, -args$skip_ticks), -args$skip_ticks) 16 | } 17 | write.csv(analysed_tick_infos, 'analysed_tick_infos.csv', row.names=FALSE, quote=FALSE) 18 | write(analysed_ticks, 'analysed_ticks.csv') 19 | 20 | files = c("blocks_create", "blocks_stats", "txs") 21 | for (i in 1:length(files)) { 22 | data <- read.csv(paste(files[i], '_raw.csv', sep = '')) 23 | data <- data %>% 24 | arrange(timestamp) %>% 25 | filter(timestamp > head(analysed_tick_infos$actual_start, 1) & timestamp < tail(analysed_tick_infos$end, 1)) 26 | write.csv(data, paste(files[i], 'csv', sep = '.'), row.names=FALSE, quote=FALSE) 27 | } 28 | 29 | files = c("blocks_reconstructed", "txs_received", "blocks_received", "peer_logic_validation", "update_tip") 30 | for (i in 1:length(files)) { 31 | data <- read.csv(paste(files[i], '_raw.csv', sep = '')) 32 | data <- data %>% 33 | arrange(timestamp) %>% 34 | filter(timestamp > head(analysed_tick_infos$actual_start, 1)) 35 | write.csv(data, paste(files[i], 'csv', sep = '.'), row.names=FALSE, quote=FALSE) 36 | } 37 | -------------------------------------------------------------------------------- /code/cmd/bitcoincmd.py: -------------------------------------------------------------------------------- 1 | from cmd import dockercmd 2 | import config 3 | 4 | daemon = 'bitcoind ' 5 | args = { 6 | 'regtest': '-regtest', 7 | 'datadir': '-datadir=' + config.bitcoin_data_dir, 8 | 9 | # log all events relevant for parsing 10 | 'debug': '-debug=cmpctblock -debug=net -debug=mempool', 11 | 'logips': '-logips', 12 | 'logtimemicros': '-logtimemicros', 13 | 14 | 15 | # activate listen even though explicit -connect will be set 16 | 'listen': '-listen=1', 17 | 'listenonion': '-listenonion=0', 18 | 'onlynet': '-onlynet=ipv4', 19 | 'dnsseed': '-dnsseed=0', 20 | 21 | 'reindex': '-reindex', 22 | 'checkmempool': '-checkmempool=0', 23 | 'keypool': '-keypool=1', 24 | 25 | # RPC configuration 26 | 'rpcuser': '-rpcuser=admin', 27 | 'rpcpassword': '-rpcpassword=admin', 28 | 'rpcallowip': '-rpcallowip=1.1.1.1/0.0.0.0', 29 | 'rpcservertimeout': '-rpcservertimeout=' + str(config.rpc_timeout), 30 | } 31 | 32 | 33 | def start(name, ip, docker_image, path, connect_to_ips): 34 | return_args = args.copy() 35 | cmd = transform_to_cmd(return_args) 36 | for _ip in connect_to_ips: 37 | cmd += ' -connect=' + str(_ip) 38 | return dockercmd.run_node(name, ip, docker_image, cmd, path) 39 | 40 | 41 | def transform_to_cmd(args_to_transform): 42 | return daemon + ' '.join(args_to_transform.values()) 43 | 44 | 45 | def rm_peers(node): 46 | return dockercmd.exec_cmd(node, 'rm -f {}/regtest/peers.dat'.format(config.bitcoin_data_dir)) 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /data 2 | .vscode 3 | .idea 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | env/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *,cover 51 | .hypothesis/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | 61 | # Flask stuff: 62 | instance/ 63 | .webassets-cache 64 | 65 | # Scrapy stuff: 66 | .scrapy 67 | 68 | # Sphinx documentation 69 | docs/_build/ 70 | 71 | # PyBuilder 72 | target/ 73 | 74 | # Jupyter Notebook 75 | .ipynb_checkpoints 76 | 77 | # pyenv 78 | .python-version 79 | 80 | # celery beat schedule file 81 | celerybeat-schedule 82 | 83 | # dotenv 84 | .env 85 | 86 | # virtualenv 87 | .venv 88 | venv/ 89 | ENV/ 90 | 91 | # Spyder project settings 92 | .spyderproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | log 98 | logs 99 | forks 100 | datadirs 101 | 102 | # Intellij 103 | *.iml 104 | -------------------------------------------------------------------------------- /code/cmd/tccmd.py: -------------------------------------------------------------------------------- 1 | import operator 2 | from cmd import dockercmd 3 | 4 | # 5 | # for the 'tc' command 'iproute2' needs to be installed inside the container 6 | # furthermore the container needs to be started with '--cap-add=NET_ADMIN' 7 | # 8 | 9 | 10 | def create(node, zones, latency): 11 | sorted_zones = sorted(zones.items(), key=operator.itemgetter(0)) 12 | 13 | cmds = [ 14 | 'tc qdisc add dev eth0' 15 | ' root handle 1: prio bands {}' 16 | ' priomap 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' 17 | .format(len(zones) + 1) 18 | ] 19 | 20 | for index, zone_tuple in enumerate(sorted_zones): 21 | zone = zone_tuple[1] 22 | 23 | cmds.append( 24 | 'tc filter add dev eth0' 25 | ' parent 1: protocol ip prio {} u32' 26 | ' match ip dst {} flowid 1:{}' 27 | .format(index + 1, zone.network, index + 2) 28 | ) 29 | 30 | cmds.append('tc qdisc add dev eth0 parent 1:1 handle 10: netem delay 0ms') 31 | 32 | for index, zone_tuple in enumerate(sorted_zones): 33 | zone = zone_tuple[1] 34 | 35 | if zone.latency == latency: 36 | aggregated_latency = latency 37 | else: 38 | aggregated_latency = latency + zone.latency 39 | 40 | cmds.append( 41 | 'tc qdisc add dev eth0' 42 | ' parent 1:{} handle {}: netem delay {}ms' 43 | .format(index + 2, (index + 2) * 10, aggregated_latency) 44 | ) 45 | 46 | 47 | # docker_cmds = [dockercmd.exec_cmd(node, cmd) for cmd in cmds] 48 | batch_cmd = " sh -c '" + " ; ".join(cmds) + "'" 49 | docker_cmds = [dockercmd.exec_cmd(node, batch_cmd)] 50 | 51 | return docker_cmds 52 | 53 | -------------------------------------------------------------------------------- /code/cmd/dockercmd.py: -------------------------------------------------------------------------------- 1 | import config 2 | 3 | 4 | def run_node(name, ip, docker_image, cmd, path): 5 | return ('docker run' 6 | ' --cap-add=NET_ADMIN' # for `tc` 7 | ' --detach=true' 8 | ' --net=' + config.network_name + 9 | ' --ip=' + ip + 10 | ' --name=' + config.prefix + name + # container name 11 | ' --hostname=' + config.prefix + name + 12 | ' --volume $PWD/' + path + ':' + config.client_dir + 13 | ' ' + docker_image + 14 | ' bash -c "' + cmd + '"') 15 | 16 | 17 | def exec_cmd(node, cmd): 18 | return 'docker exec {}{} {}'.format(config.prefix, node, cmd) 19 | 20 | 21 | def create_network(): 22 | return ('docker network create' 23 | ' --subnet={} --driver bridge {}'.format(config.ip_range, config.network_name)) 24 | 25 | 26 | def rm_network(): 27 | return 'docker network rm {}'.format(config.network_name) 28 | 29 | 30 | def fix_data_dirs_permissions(path): 31 | return ('docker run ' 32 | ' --rm --volume $PWD/{}:/mnt ubuntu' 33 | ' chmod a+rwx --recursive /mnt'.format(path)) 34 | 35 | 36 | def rm_container(name): 37 | return 'docker rm --force {}{}'.format(config.prefix, name) 38 | 39 | 40 | def ps_containers(): 41 | return 'docker ps -a -q -f "name={}*"'.format(config.prefix) 42 | 43 | 44 | def remove_all_containers(): 45 | return 'docker rm -f $({})'.format(ps_containers()) 46 | 47 | 48 | def inspect_network(): 49 | return 'docker network inspect {}'.format(config.network_name) 50 | 51 | 52 | def inspect(image): 53 | return 'docker inspect {}'.format(image) 54 | 55 | 56 | def check_if_running(name): 57 | return 'docker inspect -f {{{{.State.Running}}}} {0}{1}'.format(config.prefix, name) 58 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | echo "Run the following targets manually install, build-image, test, run" 3 | 4 | demo: 5 | cd code; \ 6 | python3 simcoin.py \ 7 | run \ 8 | --group-a 2 .6 0 simcoin/bitcoin:v15.0.1 \ 9 | --group-b 1 .4 0 simcoin/bitcoin:v15.0.1 \ 10 | --amount-of-ticks 10 \ 11 | --txs-per-tick 2 \ 12 | --blocks-per-tick 0.7 \ 13 | --system-snapshots-frequency 2 14 | 15 | multidemo: 16 | cd code; \ 17 | python3 simcoin.py \ 18 | multi-run \ 19 | --repeat 2 \ 20 | --group-a 2 .6 10 simcoin/bitcoin:v15.0.1 \ 21 | --group-b 1 .4 10 simcoin/bitcoin:v15.0.1 \ 22 | --blocks-per-tick 0.9 \ 23 | --amount-of-ticks 7 \ 24 | --txs-per-tick 10 \ 25 | --tick-duration 1 \ 26 | --system-snapshots-frequency 1 27 | 28 | install: 29 | # for kableExtra 30 | sudo apt install libmagick++-dev 31 | sudo apt install pandoc 32 | cd code; pip3 install -r requirements.txt 33 | R -e "install.packages(c('rmarkdown','devtools','jsonlite','dplyr','anytime', 'kableExtra', 'lattice', 'reshape2'), repos='https://cran.wu.ac.at')" 34 | # https://stackoverflow.com/questions/20923209/problems-installing-the-devtools-package 35 | 36 | build-image: 37 | cd ./code/docker; \ 38 | docker build --no-cache --tag simcoin/bitcoin:v15.0.1 . 39 | 40 | rm-image: 41 | docker rmi simcoin/bitcoin:v15.0.1 42 | 43 | cp-run: 44 | rm -r /tmp/run; \ 45 | mkdir /tmp/run; \ 46 | cp -r data/last_run/* /tmp/run/. 47 | 48 | cp-multi: 49 | rm -r /tmp/run; \ 50 | mkdir /tmp/run; \ 51 | mkdir /tmp/run/postprocessing; \ 52 | cp -r data/last_multi_run/* /tmp/run/postprocessing/. 53 | 54 | .PHONY : test 55 | test: 56 | cd code; \ 57 | python3 \ 58 | -m unittest discover \ 59 | -s tests 60 | 61 | .PHONY : clean 62 | clean: 63 | rm -rf data/* 64 | docker stop `docker ps --quiet --filter name=simcoin` 65 | -------------------------------------------------------------------------------- /code/context.py: -------------------------------------------------------------------------------- 1 | import config 2 | from node import PublicBitcoinNode 3 | import utils 4 | from simulationfiles import network_config 5 | from simulationfiles.zone import Zone 6 | from collections import OrderedDict 7 | import time 8 | 9 | 10 | class Context: 11 | def __init__(self): 12 | self._run_name = 'run-' + str(time.time()) 13 | self._run_dir = config.data_dir + self._run_name + '/' 14 | self._args = utils.read_args() 15 | self._zone = Zone() 16 | 17 | self._first_block_height = None 18 | self._step_times = [] 19 | 20 | node_configs = utils.read_csv(config.nodes_csv) 21 | self._nodes = OrderedDict([]) 22 | 23 | for node_config in node_configs: 24 | self.nodes.update({node_config.name: PublicBitcoinNode( 25 | node_config.name, node_config.group, 26 | self.zone.get_ip(node_config.latency), 27 | node_config.latency, node_config.docker_image, 28 | self.run_dir + node_config.name)}) 29 | 30 | connections = network_config.read_connections() 31 | for node in self.nodes.values(): 32 | node.set_outgoing_ips( 33 | [self.nodes[connection].ip for connection in connections[node.name]] 34 | ) 35 | 36 | 37 | @property 38 | def run_name(self): 39 | return self._run_name 40 | 41 | @property 42 | def run_dir(self): 43 | return self._run_dir 44 | 45 | @property 46 | def args(self): 47 | return self._args 48 | 49 | @property 50 | def zone(self): 51 | return self._zone 52 | 53 | @property 54 | def nodes(self): 55 | return self._nodes 56 | 57 | @property 58 | def first_block_height(self): 59 | return self._first_block_height 60 | 61 | @first_block_height.setter 62 | def first_block_height(self, height): 63 | self._first_block_height = height 64 | 65 | @property 66 | def step_times(self): 67 | return self._step_times 68 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Simcoin - A Blockchain Simulation Framework 🏗️ 2 | Simcoin facilitates realistic simulations of blockchain networks (currently Bitcoin only). The network is virtualised by the simulation software on one single Unix host machine. To be able to spawn multiple peers the CPU-heavy proof-of-work is deactivated. Blocks and transactions are created by sending respective commands over RPC to the nodes according to a pre-configured simulation scenario. 3 | 4 | ## Getting started 🏁 5 | Prerequisites: `python3`, `pip3`, `docker`, `make` and `R`. Check if you have them installed. 6 | 7 | * `git clone https://github.com/simonmulser/simcoin.git` 8 | * `cd simcoin` 9 | * `make install` (if fails check output, you may need to install other dependencies depending on your OS) 10 | * `make build-image` (consider to use multiple threads to build the image - check Dockerfile under `code/docker`) 11 | * `make demo` 12 | * checkout results under `data/last_run` and the generated report `data/last_run/postprocessing/report.pdf` 13 | 14 | ## Stack 📚 15 | * [Python 3](https://www.python.org/) 16 | * [Docker](https://www.docker.com/) 17 | * [R Markdown](http://rmarkdown.rstudio.com/) 18 | 19 | ## Performance 🚀 20 | When running a simulation, monitor the host machine closely. Check utilisation of RAM, CPU, Disk and Network. Further, control the log created by the host system (`dmesg`) as well as the log produced by the simulation framework (`data/debug.log`) and nodes (`data/last_run/node-X/debug.log`). To improve the performance consider the following: 21 | * Increase the ARP-cache if you encounter a neighbour table (ARP-cache) overflow in the kernel messages (`dmesg`). 22 | * Run the whole simulation in RAM by using tmpfs. 23 | * Use a better host machine! 😉 24 | 25 | ## Outlook/Possible improvements 🔮 26 | * Improving performance and stability by [using Unix domain sockets](https://github.com/bitcoin/bitcoin/pull/9979) for host to peer communication. 27 | * Sustaining different blockchain projects such as Ethereum or Litecoin. 28 | * Using the [ELK-Stack](https://www.elastic.co/products) to parse, store and analyse the log files. 29 | * Using [Kubernetes](https://kubernetes.io/) to orchestrate containers 30 | -------------------------------------------------------------------------------- /code/tests/test_node.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from node import BitcoinNode 3 | from mock import patch 4 | from mock import MagicMock 5 | from bitcoin.wallet import CBitcoinSecret 6 | import bitcoin 7 | import node as node_utils 8 | 9 | bitcoin.SelectParams('regtest') 10 | 11 | 12 | class TestNode(TestCase): 13 | 14 | def setUp(self): 15 | self.node = BitcoinNode('node-1', 'group', 'ip', 'image', '/path') 16 | 17 | @patch('node.BitcoinNode.execute_rpc') 18 | def test_get_coinbase_variables(self, m_execute_rpc): 19 | m_execute_rpc.side_effect = [ 20 | [ 21 | {"txid": 'tx_hash_1', 'address': 'address_hash_1', 'amount': 50}, 22 | {"txid": 'tx_hash_2', 'address': 'address_hash_2', 'amount': 25} 23 | ], 24 | 'cTCrrgVLfBqEZ1dxmCnEwmiEWzeZHU8uw3CNvLVvbT4CrBeDdTqc', 25 | 'cTCrrgVLfBqEZ1dxmCnEwmiEWzeZHU8uw3CNvLVvbT4CrBeDdTqc' 26 | ] 27 | 28 | self.node.create_tx_chains() 29 | 30 | self.assertEqual(m_execute_rpc.call_count, 3) 31 | self.assertEqual(len(self.node._tx_chains), 2) 32 | 33 | chain_1 = self.node._tx_chains[0] 34 | self.assertEqual(chain_1.current_unspent_tx, 'tx_hash_1') 35 | self.assertEqual(chain_1.address, 'address_hash_1') 36 | self.assertEqual(chain_1.seckey, CBitcoinSecret('cTCrrgVLfBqEZ1dxmCnEwmiEWzeZHU8uw3CNvLVvbT4CrBeDdTqc')) 37 | self.assertEqual(chain_1.amount, 5000000000) 38 | 39 | chain_2 = self.node._tx_chains[1] 40 | self.assertEqual(chain_2.current_unspent_tx, 'tx_hash_2') 41 | self.assertEqual(chain_2.address, 'address_hash_2') 42 | self.assertEqual(chain_2.amount, 2500000000) 43 | 44 | @patch('utils.sleep') 45 | def test_wait_until_height_reached(self, m_sleep): 46 | node = MagicMock() 47 | node.execute_rpc.side_effect = ['0', '9', '10'] 48 | node_utils.wait_until_height_reached(node, 10) 49 | 50 | self.assertEqual(m_sleep.call_count, 2) 51 | 52 | @patch('utils.sleep') 53 | def test_wait_until_height_reached_already_reached(self, m_sleep): 54 | node = MagicMock() 55 | node.execute_rpc.return_value = '10' 56 | node_utils.wait_until_height_reached(node, 10) 57 | 58 | self.assertFalse(m_sleep.called) 59 | -------------------------------------------------------------------------------- /code/tests/test_checkargs.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from simulationfiles import checkargs 3 | import argparse 4 | 5 | 6 | class TestCheckargs(TestCase): 7 | 8 | def __init__(self, *args, **kwargs): 9 | super(TestCheckargs, self).__init__(*args, **kwargs) 10 | 11 | def test_check_positive(self): 12 | value = checkargs.check_positive(0) 13 | 14 | self.assertEqual(value, 0) 15 | 16 | def test_check_positive_exception(self): 17 | with self.assertRaises(argparse.ArgumentTypeError) as context: 18 | checkargs.check_positive(-1) 19 | 20 | self.assertTrue('-1 is an invalid positive value' in str(context.exception)) 21 | 22 | def test_check_percentage_zero(self): 23 | value = checkargs.check_percentage(0) 24 | self.assertEqual(value, 0) 25 | 26 | def test_check_percentage_one(self): 27 | value = checkargs.check_percentage(1) 28 | self.assertEqual(value, 1) 29 | 30 | def test_check_percentage_negative(self): 31 | with self.assertRaises(argparse.ArgumentTypeError) as context: 32 | checkargs.check_percentage(-0.1) 33 | 34 | self.assertTrue('-0.1 is an invalid percentage value [0,1]' in str(context.exception)) 35 | 36 | def test_check_percentage_over_one(self): 37 | with self.assertRaises(argparse.ArgumentTypeError) as context: 38 | checkargs.check_percentage(1.1) 39 | 40 | self.assertTrue('1.1 is an invalid percentage value [0,1]' in str(context.exception)) 41 | 42 | def test_check_percentage_with_string(self): 43 | with self.assertRaises(ValueError): 44 | checkargs.check_percentage('test') 45 | 46 | def test_check_positive_float(self): 47 | value = checkargs.check_positive(1.1) 48 | 49 | self.assertEqual(value, 1.1) 50 | 51 | def test_check_positive_float_with_string(self): 52 | with self.assertRaises(ValueError): 53 | checkargs.check_positive_float('test') 54 | 55 | def test_check_positive_int_with_float(self): 56 | with self.assertRaises(argparse.ArgumentTypeError) as context: 57 | checkargs.check_positive_int(1.1) 58 | self.assertTrue('1.1 is an invalid integer' in str(context.exception)) 59 | 60 | def test_check_positive_int(self): 61 | checkargs.check_positive_int('10') 62 | -------------------------------------------------------------------------------- /code/systemmonitor.py: -------------------------------------------------------------------------------- 1 | import sched 2 | import time 3 | import logging 4 | import bash 5 | import re 6 | 7 | PRIORITY = 1 8 | 9 | 10 | def run(stop_event, frequency, q_cpu_time, q_memory): 11 | logging.info('Starting system monitor with frequency={}s'.format(str(frequency))) 12 | scheduler = sched.scheduler(time.time, time.sleep) 13 | next_execution = time.time() 14 | 15 | while not stop_event.wait(0): 16 | scheduler.enterabs(next_execution, PRIORITY, _collect, (q_cpu_time, q_memory,)) 17 | scheduler.run() 18 | next_execution += frequency 19 | 20 | 21 | def _collect(q_cpu_time, q_memory): 22 | cpu_time = bash.check_output('cat /proc/stat | head -1') 23 | memory = bash.check_output('cat /proc/meminfo | head -3') 24 | q_cpu_time.put(CpuTimeSnapshot.from_bash(cpu_time)) 25 | q_memory.put(MemorySnapshot.from_bash(memory)) 26 | 27 | logging.info('Collected cpu_time and memory usage') 28 | 29 | 30 | class CpuTimeSnapshot: 31 | __slots__ = ['_timestamp', '_user', '_nice', '_system', '_idle'] 32 | file_name = 'cpu_time.csv' 33 | csv_header = ['timestamp', 'user', 'nice', 'system', 'idle'] 34 | 35 | def __init__(self, timestamp, user, nice, system, idle): 36 | self._timestamp = timestamp 37 | self._user = user 38 | self._nice = nice 39 | self._system = system 40 | self._idle = idle 41 | 42 | @classmethod 43 | def from_bash(cls, cpu_time): 44 | cpu_matched = re.match('cpu\s+([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+)', cpu_time) 45 | snapshot = cls(time.time(), cpu_matched.group(1), cpu_matched.group(2), cpu_matched.group(3), cpu_matched.group(4)) 46 | return snapshot 47 | 48 | def vars_to_array(self): 49 | return [self._timestamp, self._user, self._nice, self._system, self._idle] 50 | 51 | 52 | class MemorySnapshot: 53 | __slots__ = ['_timestamp', '_total', '_available'] 54 | 55 | file_name = 'memory.csv' 56 | csv_header = ['timestamp', 'total', 'available'] 57 | 58 | def __init__(self, timestamp, total, available): 59 | self._timestamp = timestamp 60 | self._total = total 61 | self._available = available 62 | 63 | @classmethod 64 | def from_bash(cls, memory): 65 | memory_matched = re.match('MemTotal:\s+([0-9]+)\s+kB\n.*\nMemAvailable:\s+([0-9]+)\s+kB', memory) 66 | snapshot = cls(time.time(), memory_matched.group(1), memory_matched.group(2)) 67 | return snapshot 68 | 69 | def vars_to_array(self): 70 | return [self._timestamp, self._total, self._available] 71 | -------------------------------------------------------------------------------- /code/simulation_cmd.py: -------------------------------------------------------------------------------- 1 | from runner import Runner 2 | import logging 3 | import time 4 | from postprocessing import PostProcessing 5 | from event import Event 6 | import config 7 | from context import Context 8 | from prepare import Prepare 9 | from write import Writer 10 | import utils 11 | import sys 12 | import argparse 13 | from simulationfiles import checkargs 14 | 15 | 16 | def _create_parser(): 17 | parser = argparse.ArgumentParser() 18 | 19 | parser.add_argument('--skip-ticks' 20 | , type=checkargs.check_positive_int 21 | , default=0 22 | , help='Amount of ticks skipped for analysis at the beginning and at the end of the simulation' 23 | ) 24 | 25 | parser.add_argument('--tick-duration' 26 | , default=1 27 | , type=checkargs.check_positive_float 28 | , help='Duration of ticks.') 29 | return parser 30 | 31 | 32 | def run(unknown_arguments=False): 33 | for file in [config.ticks_csv, config.network_csv, config.nodes_csv]: 34 | utils.check_for_file(file) 35 | 36 | parser = _create_parser() 37 | if unknown_arguments: 38 | args = parser.parse_known_args(sys.argv[2:])[0] 39 | else: 40 | args = parser.parse_args(sys.argv[2:]) 41 | logging.info("Parsed arguments in {}: {}".format(__name__, args)) 42 | utils.update_args(args) 43 | 44 | _check_skip_ticks(args.skip_ticks) 45 | 46 | context = Context() 47 | 48 | logging.info(config.log_line_run_start + context.run_name) 49 | 50 | tag = context.args.tag 51 | if hasattr(context.args, 'tag_appendix'): 52 | tag += context.args.tag_appendix 53 | writer = Writer(tag) 54 | runner = Runner(context, writer) 55 | 56 | prepare = Prepare(context) 57 | runner._prepare = prepare 58 | 59 | postprocessing = PostProcessing(context, writer) 60 | runner._postprocessing = postprocessing 61 | 62 | event = Event(context) 63 | runner._event = event 64 | 65 | start = time.time() 66 | 67 | runner.run() 68 | 69 | logging.info("The duration of the run was {} seconds".format(str(time.time() - start))) 70 | 71 | 72 | def _check_skip_ticks(skip_ticks): 73 | amount_of_ticks = 0 74 | with open(config.ticks_csv, 'r') as file: 75 | for _ in file: 76 | amount_of_ticks += 1 77 | 78 | if amount_of_ticks <= 2 * skip_ticks: 79 | logging.error('You want to skip two times skip_ticks={} but you only have {} ticks in your {}.' 80 | .format(skip_ticks, amount_of_ticks, config.ticks_csv_file_name)) 81 | exit(-1) 82 | -------------------------------------------------------------------------------- /code/config.py: -------------------------------------------------------------------------------- 1 | import time 2 | import multiprocessing 3 | 4 | pool_processors = multiprocessing.cpu_count() 5 | file_chunk_size = 10 * 1024 * 1024 # 10MB 6 | 7 | # IP range from RFC6890 - IP range for future use 8 | # it does not conflict with https://github.com/bitcoin/bitcoin/blob/master/src/netbase.h 9 | ip_range = "240.0.0.0/4" 10 | ip_zones = '240.{}.0.0/16' 11 | 12 | standard_image = 'simcoin/bitcoin:v15.0.1' 13 | number_of_node_group_arguments = 4 14 | 15 | network_name = 'simcoin-network' 16 | prefix = 'simcoin-' 17 | node_prefix = 'node-' 18 | node_name = node_prefix + '{}.{}' 19 | 20 | max_wait_time_bitcoin_runs_out = 30 21 | 22 | rpc_user = 'admin' 23 | rpc_password = 'admin' 24 | rpc_port = 18332 25 | rpc_timeout = 3600 26 | 27 | reference_node = 'node-0' 28 | 29 | blocks_needed_to_make_coinbase_spendable = 100 30 | max_in_mempool_ancestors = 25 31 | 32 | log_prefix_timestamp = r'^([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{6}) ' 33 | log_error_grep = 'grep -E -i "WARN|FATAL|ERROR|CRITICAL|EXCEPTION" {} || true' 34 | log_time_format = '%Y-%m-%d %H:%M:%S.%f' 35 | log_line_run_start = 'RUN START ' 36 | log_line_run_end = 'RUN END ' 37 | 38 | smallest_amount = 1 39 | smallest_amount_btc = 0.00000001 40 | transaction_fee = 1000 41 | 42 | amount_of_system_snapshots = 500 43 | 44 | bitcoin_log_file_name = '/debug.log' 45 | 46 | data_dir = '../data/' 47 | 48 | network_csv_file_name = 'network.csv' 49 | ticks_csv_file_name = 'ticks.csv' 50 | nodes_csv_file_name = 'nodes.csv' 51 | args_csv_file_name = 'args.csv' 52 | 53 | network_csv = data_dir + network_csv_file_name 54 | ticks_csv = data_dir + ticks_csv_file_name 55 | nodes_csv = data_dir + nodes_csv_file_name 56 | args_csv = data_dir + args_csv_file_name 57 | 58 | bitcoin_data_dir = '/data' 59 | client_dir = bitcoin_data_dir + '/regtest' 60 | 61 | preprocess_r_file_name = 'preprocess.R' 62 | report_rmd_file_name = 'report.Rmd' 63 | 64 | multi_run_dir_name = 'multi-run-{}'.format(time.time()) 65 | multi_run_dir = '{}{}'.format(data_dir, multi_run_dir_name) 66 | last_multi_run = 'last_multi_run' 67 | soft_link_to_multi_run_dir = '{}{}'.format(data_dir, last_multi_run) 68 | 69 | log_file = data_dir + 'debug.log' 70 | 71 | last_run = 'last_run' 72 | soft_link_to_run_dir = '{}{}'.format(data_dir, last_run) 73 | run_log = soft_link_to_run_dir + '/run.log' 74 | 75 | analysed_tick_infos_file_name = 'analysed_tick_infos.csv' 76 | step_times_csv_file_name = 'step_times.csv' 77 | consensus_chain_csv_file_name = 'consensus_chain.csv' 78 | 79 | postprocessing_dir = soft_link_to_run_dir + '/postprocessing/' 80 | node_config = soft_link_to_run_dir + '/node_config/' 81 | btc_conf_file = node_config + '{}.conf' 82 | consensus_chain_csv = postprocessing_dir + consensus_chain_csv_file_name 83 | general_infos_csv = postprocessing_dir + 'general_infos.csv' 84 | analysed_ticks_csv = postprocessing_dir + 'analysed_ticks.csv' 85 | -------------------------------------------------------------------------------- /code/event.py: -------------------------------------------------------------------------------- 1 | import config 2 | import logging 3 | import time 4 | import utils 5 | from bitcoin.rpc import JSONRPCError 6 | import math 7 | 8 | 9 | class Event: 10 | 11 | def __init__(self, context): 12 | self._context = context 13 | self._txs_count = self._blocks_count = 0 14 | 15 | def execute(self): 16 | try: 17 | utils.check_for_file(config.ticks_csv) 18 | with open(config.ticks_csv, 'r') as file: 19 | 20 | start_time = time.time() 21 | for i, line in enumerate(file): 22 | actual_start = time.time() 23 | planned_start = start_time + i * self._context.args.tick_duration 24 | 25 | self._txs_count = self._blocks_count = 0 26 | 27 | line = line.rstrip() 28 | cmds = line.split(',') 29 | for cmd in cmds: 30 | self._execute_cmd(cmd) 31 | 32 | planned_start_next_tick = start_time + (i + 1) * self._context.args.tick_duration 33 | current_time = time.time() 34 | duration = current_time - actual_start 35 | logging.info('Tick={} with planned_start={}, actual_start={} and duration={:F},' 36 | ' created txs={} and blocks={}' 37 | .format(i, planned_start, actual_start, duration, 38 | self._txs_count, self._blocks_count)) 39 | 40 | if current_time < planned_start_next_tick: 41 | difference = planned_start_next_tick - current_time 42 | logging.info('Sleep {} seconds for next tick={}'.format(difference, i)) 43 | utils.sleep(difference) 44 | except Exception: 45 | logging.exception('Simulation could not execute all events because of an exception') 46 | 47 | def _execute_cmd(self, cmd): 48 | cmd_parts = cmd.split(' ') 49 | 50 | if cmd_parts[0] == 'tx': 51 | node = self._context.nodes[cmd_parts[1]] 52 | try: 53 | node.generate_tx() 54 | except JSONRPCError: 55 | logging.exception('Could not generate tx for node={}'.format(node.name)) 56 | self._txs_count += 1 57 | elif cmd_parts[0] == 'block': 58 | node = self._context.nodes[cmd_parts[1]] 59 | try: 60 | node.generate_blocks() 61 | except JSONRPCError: 62 | logging.exception('Could not generate block for node={}'.format(node.name)) 63 | self._blocks_count += 1 64 | elif len(cmd) == 0: 65 | pass 66 | else: 67 | raise SimulationException('Unknown cmd={} in {}-file'.format(cmd_parts[0], config.ticks_csv)) 68 | 69 | 70 | def _calc_analyze_skip_ticks(blocks_per_tick, tx_per_tick): 71 | return max([1, math.ceil(1/blocks_per_tick), math.ceil(1/tx_per_tick)]) 72 | 73 | 74 | class SimulationException(Exception): 75 | pass 76 | -------------------------------------------------------------------------------- /code/simcoin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | from simulationfiles import nodes_config 4 | from simulationfiles import ticks_config 5 | from simulationfiles import network_config 6 | import sys 7 | import argparse 8 | import simulation_cmd 9 | import config 10 | import os 11 | import bitcoin 12 | import utils 13 | import multirun_cmd 14 | import run_cmd 15 | import logging 16 | 17 | commands = { 18 | 'nodes': nodes_config.create, 19 | 'network': network_config.create, 20 | 'ticks': ticks_config.create, 21 | 'simulate': simulation_cmd.run, 22 | 'run': run_cmd.run, 23 | 'multi-run': multirun_cmd.run, 24 | } 25 | 26 | 27 | def _parse_args(): 28 | parser = argparse.ArgumentParser() 29 | 30 | parser.add_argument('--verbose' 31 | , action="store_true" 32 | , help='Verbose log.' 33 | ) 34 | 35 | parser.add_argument('--tag' 36 | , default='run' 37 | , help='Tag that will be added to every csv file.' 38 | ) 39 | 40 | args = parser.parse_known_args(sys.argv[2:])[0] 41 | utils.update_args(args) 42 | 43 | return args 44 | 45 | 46 | def main(): 47 | cmd_parser = argparse.ArgumentParser( 48 | description='Simcoin a cryptocurrency simulator.', 49 | usage=''' [] 50 | 51 | The commands are: 52 | nodes creates the {} for a simulation 53 | network creates the {} for a simulation 54 | ticks creates the {} for a simulation 55 | simulate executes a simulation based on the {}, {} and {} 56 | run runs all above commands 57 | multi-run run the simulation multiple times 58 | '''.format( 59 | config.nodes_csv_file_name, 60 | config.network_csv_file_name, 61 | config.ticks_csv_file_name, 62 | config.nodes_csv_file_name, 63 | config.network_csv_file_name, 64 | config.ticks_csv_file_name, 65 | )) 66 | 67 | cmd_parser.add_argument('command', help='Subcommand to run') 68 | 69 | # parse_args defaults to [1:] for args, but you need to 70 | # exclude the rest of the args too, or validation will fail 71 | args = cmd_parser.parse_args(sys.argv[1:2]) 72 | command = args.command 73 | if command not in commands: 74 | print('Unrecognized command') 75 | cmd_parser.print_help() 76 | exit(1) 77 | # use dispatch pattern to invoke method with same name 78 | 79 | if not os.path.exists(config.data_dir): 80 | os.makedirs(config.data_dir) 81 | 82 | bitcoin.SelectParams('regtest') 83 | 84 | args = _parse_args() 85 | utils.config_logger(args.verbose) 86 | logging.info("Arguments called with: {}".format(sys.argv)) 87 | logging.info("Parsed arguments in simcoin.py: {}".format(args)) 88 | 89 | logging.info('Executing command={}'.format(command)) 90 | commands[command]() 91 | 92 | 93 | if __name__ == '__main__': 94 | main() 95 | -------------------------------------------------------------------------------- /code/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | import utils 3 | from mock import patch 4 | from mock import mock_open 5 | from textwrap import dedent 6 | from collections import namedtuple 7 | from argparse import Namespace 8 | 9 | 10 | class TestUtils(TestCase): 11 | 12 | @patch('builtins.exit') 13 | @patch('os.path.isfile') 14 | def test_check_for_files_file_not_existing(self, m_isfile, m_exit): 15 | m_isfile.return_value = False 16 | 17 | utils.check_for_file('file.txt') 18 | 19 | self.assertTrue(m_exit.called) 20 | 21 | @patch('builtins.exit') 22 | @patch('os.path.isfile') 23 | def test_check_for_files_file_exists(self, m_isfile, m_exit): 24 | m_isfile.return_value = True 25 | 26 | utils.check_for_file('file.txt') 27 | 28 | self.assertFalse(m_exit.called) 29 | 30 | @patch('os.path.isfile', lambda path: True) 31 | def test_read(self): 32 | data = dedent(""" 33 | int,float,string 34 | 1,45.5,node-1 35 | """).strip() 36 | 37 | m = mock_open(read_data=data) 38 | m.return_value.__iter__ = lambda self: self 39 | m.return_value.__next__ = lambda self: next(iter(self.readline, '')) 40 | with patch('builtins.open', m): 41 | data = utils.read_csv('/some.csv')[0] 42 | self.assertEqual(data.int, 1) 43 | self.assertEqual(data.float, 45.5) 44 | self.assertEqual(data.string, 'node-1') 45 | 46 | def test_read_empty_file(self): 47 | m = mock_open(read_data='') 48 | m.return_value.__iter__ = lambda self: self 49 | m.return_value.__next__ = lambda self: next(iter(self.readline, '')) 50 | with patch('builtins.open', m): 51 | data = utils.read_csv('/some.csv') 52 | self.assertEqual(data, []) 53 | 54 | @patch('utils.read_csv', lambda file: []) 55 | @patch('builtins.open', new_callable=mock_open) 56 | def test_update_args_1(self, m_open): 57 | utils.update_args(Namespace(int=1, float=1.1, string='test')) 58 | 59 | handle = m_open() 60 | self.assertEqual(handle.write.call_count, 2) 61 | self.assertIn('string', handle.write.call_args_list[0][0][0]) 62 | self.assertIn('float', handle.write.call_args_list[0][0][0]) 63 | self.assertIn('int', handle.write.call_args_list[0][0][0]) 64 | 65 | self.assertIn('1', handle.write.call_args_list[1][0][0]) 66 | self.assertIn('1.1', handle.write.call_args_list[1][0][0]) 67 | self.assertIn('test', handle.write.call_args_list[1][0][0]) 68 | 69 | @patch('utils.read_csv') 70 | @patch('builtins.open', new_callable=mock_open) 71 | def test_update_args_2(self, m_open, m_read): 72 | Args = namedtuple('Args', 'int float') 73 | m_read.return_value = [Args(2, 2.2)] 74 | 75 | utils.update_args(Namespace(int=1, string='test')) 76 | 77 | handle = m_open() 78 | self.assertEqual(handle.write.call_count, 2) 79 | 80 | self.assertIn('1', handle.write.call_args_list[1][0][0]) 81 | self.assertIn('2.2', handle.write.call_args_list[1][0][0]) 82 | self.assertIn('test', handle.write.call_args_list[1][0][0]) 83 | -------------------------------------------------------------------------------- /code/tests/test_ticks_config.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from mock import patch 3 | from simulationfiles import ticks_config 4 | from simulationfiles.nodes_config import NodeConfig 5 | import sys 6 | import os 7 | 8 | 9 | class TestTicksConfig(TestCase): 10 | 11 | @classmethod 12 | def setUpClass(cls): 13 | sys.stdout = open(os.devnull, 'w') 14 | 15 | def test_calc_expected_events_two_events_per_tick(self): 16 | expected_events = ticks_config._calc_expected_events(10, 0.5) 17 | 18 | self.assertEqual(expected_events, 25) 19 | 20 | def test_calc_expected_events_one_event_every_two_tick(self): 21 | expected_events = ticks_config._calc_expected_events(10, 2) 22 | 23 | self.assertEqual(expected_events, 70) 24 | 25 | def test_calc_expected_events_one_event_per_tick(self): 26 | expected_events = ticks_config._calc_expected_events(10, 1) 27 | 28 | self.assertEqual(expected_events, 40) 29 | 30 | def test_create_ticks(self): 31 | end = 4 32 | txs_per_tick = 2 33 | node_0 = NodeConfig('group', 'node-0', 0, 0, None) 34 | node_1 = NodeConfig('group', 'node-1', 0, 0, None) 35 | nodes = [node_0, node_1] 36 | block_events = {'node-0': [0.5, 2.1, end], 'node-1': [0.5, 2.1, end]} 37 | 38 | event_ticks = ticks_config._create_ticks(nodes, block_events, txs_per_tick, end) 39 | 40 | self.assertEqual(len(event_ticks), 4) 41 | self.assertEqual(len(event_ticks[0]), 4) 42 | self.assertEqual(len(event_ticks[1]), 2) 43 | self.assertEqual(len(event_ticks[2]), 4) 44 | self.assertEqual(len(event_ticks[3]), 2) 45 | self.assertTrue('tx ' in event_ticks[0][0]) 46 | self.assertTrue('tx ' in event_ticks[1][0]) 47 | self.assertTrue('tx ' in event_ticks[2][0]) 48 | self.assertTrue('tx ' in event_ticks[3][0]) 49 | self.assertTrue('block ' in event_ticks[0][2]) 50 | self.assertTrue('block ' in event_ticks[0][3]) 51 | self.assertTrue('block ' in event_ticks[2][2]) 52 | self.assertTrue('block ' in event_ticks[2][3]) 53 | 54 | def test_create_ticks_with_multiple_blocks_in_one_tick(self): 55 | end = 4 56 | node_0 = NodeConfig('group', 'node-0', 0, 0, None) 57 | block_events = {'node-0': [0.5, 0.6, end]} 58 | 59 | ticks_config._create_ticks([node_0], block_events, 0, end) 60 | 61 | def test_create_block_series(self): 62 | block_events = ticks_config._create_block_series(0.5, 5, 10) 63 | 64 | self.assertEqual(len(block_events), 10) 65 | 66 | @patch('simulationfiles.ticks_config._calc_expected_events', lambda a, b: 5) 67 | @patch('simulationfiles.ticks_config._create_block_series', lambda a, b, c: [10, 11, 9]) 68 | def test_create_block_events(self): 69 | nodes = [NodeConfig('group', 'node-0', 0.5, 0, None), NodeConfig('group', 'node-1', 0.5, 0, None)] 70 | amount_of_ticks = 10 71 | blocks_per_tick = .5 72 | 73 | block_events = ticks_config._create_block_events(nodes, amount_of_ticks, blocks_per_tick) 74 | self.assertEqual(len(block_events), 2) 75 | for block_events in block_events.values(): 76 | self.assertEqual(block_events, [10, 11, 9]) 77 | -------------------------------------------------------------------------------- /code/tests/test_prepare.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from mock import MagicMock 3 | from mock import patch 4 | from mock import mock_open 5 | import prepare 6 | from prepare import Prepare 7 | import config 8 | import bitcoin 9 | from mock import Mock 10 | 11 | 12 | class TestPrepare(TestCase): 13 | 14 | def setUp(self): 15 | self.context = Mock() 16 | self.prepare = Prepare(self.context) 17 | 18 | bitcoin.SelectParams('regtest') 19 | 20 | @patch('node.wait_until_height_reached', lambda node, height: None) 21 | @patch('utils.sleep', lambda time: None) 22 | @patch('prepare._calc_number_of_tx_chains', lambda txs_per_tick, block_per_tick, amount_of_nodes: 5) 23 | def test_warmup_block_generation(self): 24 | node_0 = MagicMock() 25 | node_1 = MagicMock() 26 | nodes = [node_0, node_1] 27 | self.context.nodes.values.return_value = nodes 28 | 29 | self.prepare._pool = MagicMock() 30 | self.prepare._give_nodes_spendable_coins() 31 | 32 | self.assertEqual(node_0.execute_rpc.call_count, 2) 33 | self.assertEqual(node_1.execute_rpc.call_count, 2) 34 | 35 | @patch('os.path.exists') 36 | @patch('os.path.islink') 37 | @patch('os.makedirs') 38 | @patch('bash.check_output') 39 | @patch('builtins.open', new_callable=mock_open) 40 | def test_prepare_simulation_dir(self, m_open, m_check_output, m_makedirs, m_islink, m_exists): 41 | m_exists.return_value = False 42 | self.prepare._pool = MagicMock() 43 | 44 | self.prepare._prepare_simulation_dir() 45 | 46 | self.assertEqual(m_makedirs.call_count, 3) 47 | self.assertEqual(m_check_output.call_count, 10) 48 | 49 | @patch('bash.check_output') 50 | def test_remove_old_containers_if_exists(self, m_check_output): 51 | m_check_output.return_value = ['container1', 'container2'] 52 | 53 | prepare._remove_old_containers_if_exists() 54 | 55 | self.assertEqual(m_check_output.call_count, 2) 56 | 57 | @patch('bash.check_output') 58 | def test_remove_old_containers_if_exists_no_old_containers(self, m_check_output): 59 | m_check_output.return_value = [] 60 | 61 | prepare._remove_old_containers_if_exists() 62 | 63 | self.assertEqual(m_check_output.call_count, 1) 64 | 65 | @patch('utils.sleep', lambda t: None) 66 | @patch('bash.call_silent') 67 | @patch('bash.check_output') 68 | def test_recreate_network(self, m_check_output, m_call_silent): 69 | m_call_silent.return_value = 0 70 | 71 | prepare._recreate_network() 72 | 73 | self.assertEqual(m_check_output.call_count, 2) 74 | self.assertEqual(m_call_silent.call_count, 1) 75 | 76 | @patch('utils.sleep', lambda t: None) 77 | @patch('bash.call_silent') 78 | @patch('bash.check_output') 79 | def test_recreate_network_no_network(self, m_check_output, m_call_silent): 80 | m_call_silent.return_value = -1 81 | 82 | prepare._recreate_network() 83 | 84 | self.assertEqual(m_check_output.call_count, 1) 85 | 86 | def test_calc_number_of_tx_chains(self): 87 | config.max_in_mempool_ancestors = 25 88 | amount = prepare._calc_number_of_tx_chains(2, 1 / 600, 10) 89 | 90 | self.assertEqual(amount, 51) 91 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at simonmulser@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /code/runner.py: -------------------------------------------------------------------------------- 1 | import config 2 | import logging 3 | import time 4 | import systemmonitor 5 | import threading 6 | import queue 7 | import math 8 | import utils 9 | from systemmonitor import CpuTimeSnapshot 10 | from systemmonitor import MemorySnapshot 11 | 12 | 13 | class Runner: 14 | def __init__(self, context, writer): 15 | self._context = context 16 | self._writer = writer 17 | self._prepare = None 18 | self._event = None 19 | self._postprocessing = None 20 | self._pill2kill = threading.Event() 21 | self._q_cpu_time = queue.Queue() 22 | self._q_memory = queue.Queue() 23 | self._system_monitor = threading.Thread( 24 | target=systemmonitor.run, args=(self._pill2kill, 25 | _calculate_frequency( 26 | self._context.args.tick_duration, 27 | self._context.args.amount_of_ticks), 28 | self._q_cpu_time, self._q_memory)) 29 | 30 | def run(self): 31 | try: 32 | self._context.step_times.append(StepTimes(time.time(), 'preparation_start')) 33 | self._prepare.execute() 34 | logging.info('End of Preparation') 35 | 36 | self._system_monitor.start() 37 | self._context.step_times.append(StepTimes(time.time(), 'simulation_start')) 38 | logging.info('Start of simulation') 39 | self._event.execute() 40 | logging.info('End of simulation') 41 | 42 | self._persist_system_snapshots() 43 | 44 | self._context.step_times.append(StepTimes(time.time(), 'postprocessing_start')) 45 | self._postprocessing.execute() 46 | except Exception as exce: 47 | self._postprocessing.clean_up_docker() 48 | raise exce 49 | 50 | def _persist_system_snapshots(self): 51 | self._pill2kill.set() 52 | self._system_monitor.join() 53 | cpu_times = list(self._q_cpu_time.queue) 54 | memory = list(self._q_memory.queue) 55 | 56 | self._writer.write_csv( 57 | CpuTimeSnapshot.file_name, 58 | CpuTimeSnapshot.csv_header, 59 | cpu_times, 60 | ) 61 | self._writer.write_csv( 62 | MemorySnapshot.file_name, 63 | MemorySnapshot.csv_header, 64 | memory, 65 | ) 66 | logging.info('Persisted {} CPU time and {} memory snapshots'.format(len(cpu_times), len(memory))) 67 | 68 | 69 | class StepTimes: 70 | __slots__ = ['_timestamp', '_type'] 71 | 72 | csv_header = ['timestamp', 'type'] 73 | 74 | def __init__(self, timestamp, _type): 75 | self._timestamp = timestamp 76 | self._type = _type 77 | 78 | def vars_to_array(self): 79 | return [self._timestamp, self._type] 80 | 81 | 82 | def _calculate_frequency(tick_duration, amount_of_ticks): 83 | frequency = math.ceil(tick_duration * amount_of_ticks / config.amount_of_system_snapshots) 84 | logging.info('With tick_duration={}, amount_of_ticks={} and amount_of_system_snapshots={}' 85 | ' the system monitor needs to take every {}s a snapshot' 86 | .format(tick_duration, amount_of_ticks, config.amount_of_system_snapshots, frequency)) 87 | return frequency 88 | -------------------------------------------------------------------------------- /code/clistats.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from bitcoin.rpc import JSONRPCError 3 | import utils 4 | import config 5 | 6 | 7 | class CliStats: 8 | def __init__(self, context, writer): 9 | self._context = context 10 | self._writer = writer 11 | 12 | def execute(self): 13 | _persist_consensus_chain(self._calc_consensus_chain()) 14 | self._persist_node_stats() 15 | 16 | logging.info('Executed cli stats') 17 | 18 | def _calc_consensus_chain(self): 19 | height = self._context.first_block_height 20 | nodes = self._context.nodes.values() 21 | consensus_chain = [] 22 | logging.info('Calculating consensus chain starting with height={}'.format(height)) 23 | while True: 24 | block_hashes = {} 25 | failing_nodes = [] 26 | block_hash = None 27 | for node in nodes: 28 | try: 29 | block_hash = node.execute_rpc('getblockhash', height) 30 | if block_hash in block_hashes: 31 | block_hashes[block_hash].append(node.name) 32 | else: 33 | block_hashes[block_hash] = [node.name] 34 | except JSONRPCError: 35 | failing_nodes.append(node.name) 36 | if len(failing_nodes) > 0: 37 | logging.info('Stopped calculating consensus chain on height={} because nodes={}' 38 | ' have no block on this height'.format(height, failing_nodes)) 39 | break 40 | elif len(block_hashes) > 1: 41 | logging.info('Stopped calculating consensus chain on height={} because' 42 | ' nodes have different blocks ({})'.format(height, block_hashes)) 43 | break 44 | else: 45 | consensus_chain.append(block_hash) 46 | height += 1 47 | 48 | logging.info('Added block with hash={} to consensus chain'.format(block_hash)) 49 | 50 | logging.info('Calculated {} block long consensus chain from {} nodes and until height={}' 51 | .format(len(consensus_chain), len(nodes), height - 1)) 52 | return consensus_chain 53 | 54 | def _persist_node_stats(self): 55 | tips = [] 56 | for node in self._context.nodes.values(): 57 | tips.extend([Tip.from_dict(node.name, chain_tip) for chain_tip in node.execute_rpc('getchaintips')]) 58 | 59 | self._writer.write_csv(Tip.file_name, Tip.csv_header, tips) 60 | logging.info('Collected and persisted {} tips'.format(len(tips))) 61 | 62 | 63 | def _persist_consensus_chain(chain): 64 | with open(config.consensus_chain_csv, 'w') as file: 65 | file.write('hash\n') 66 | file.writelines('\n'.join(chain)) 67 | file.write('\n') 68 | 69 | 70 | class Tip: 71 | __slots__ = ['_node', '_status', '_branchlen'] 72 | 73 | csv_header = ['node', 'status', 'branchlen'] 74 | file_name = 'tips.csv' 75 | 76 | def __init__(self, node, status, branchlen): 77 | self._node = node 78 | self._status = status 79 | self._branchlen = branchlen 80 | 81 | @classmethod 82 | def from_dict(cls, node, chain_tip): 83 | return cls(node, chain_tip['status'], chain_tip['branchlen']) 84 | 85 | def vars_to_array(self): 86 | return [self._node, self._status, self._branchlen] 87 | -------------------------------------------------------------------------------- /code/utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | import config 4 | import sys 5 | import os 6 | import re 7 | from collections import namedtuple 8 | import csv 9 | from ast import literal_eval 10 | 11 | 12 | def sleep(seconds): 13 | logging.debug("Sleep for {} seconds".format(seconds)) 14 | time.sleep(seconds) 15 | 16 | 17 | def config_logger(verbose): 18 | log_formatter = logging.Formatter("%(asctime)s.%(msecs)03d000 [%(processName)s-%(threadName)-12.12s] " 19 | "[%(levelname)-5.5s] %(message)s", "%Y-%m-%d %H:%M:%S") 20 | logging.Formatter.converter = time.gmtime 21 | root_logger = logging.getLogger() 22 | 23 | file_handler = logging.FileHandler(config.log_file, mode='w') 24 | file_handler.setFormatter(log_formatter) 25 | root_logger.addHandler(file_handler) 26 | 27 | console_handler = logging.StreamHandler(sys.stdout) 28 | console_handler.setFormatter(log_formatter) 29 | root_logger.addHandler(console_handler) 30 | 31 | if verbose: 32 | root_logger.setLevel(logging.DEBUG) 33 | else: 34 | root_logger.setLevel(logging.INFO) 35 | 36 | 37 | def check_for_file(file): 38 | if not os.path.isfile(file): 39 | command = re.split('\.|/', file)[-2] 40 | print("File={} not found. Please generate this with the command `python3 simcoin.py {} [args].`" 41 | .format(file, command)) 42 | exit(-1) 43 | 44 | 45 | def read_csv(file_name): 46 | if os.path.isfile(file_name): 47 | with open(file_name, 'r') as file: 48 | try: 49 | reader = csv.reader(file) 50 | Object = namedtuple("Object", next(reader)) 51 | objects = [] 52 | for line in reader: 53 | for i, var in enumerate(line): 54 | try: 55 | line[i] = literal_eval(var) 56 | except ValueError: 57 | pass 58 | except SyntaxError: 59 | pass 60 | objects.append(Object._make(line)) 61 | return objects 62 | except StopIteration: 63 | logging.debug('File={} has not enough lines'.format(config.args_csv)) 64 | return [] 65 | else: 66 | return [] 67 | 68 | 69 | def read_args(): 70 | objects = read_csv(config.args_csv) 71 | if len(objects) == 0: 72 | print("File={} is empty. Generate the file first with commands provided by" 73 | " `python3 simcoin.py`".format(config.args_csv)) 74 | exit(-1) 75 | elif len(objects) == 1: 76 | return objects[0] 77 | else: 78 | print("File={} has to many entries. Deleter the file and regenerate it with commands provided by" 79 | " `python3 simcoin.py`".format(config.args_csv)) 80 | exit(-1) 81 | 82 | 83 | def update_args(args): 84 | persisted_args = {} 85 | persisted_tuples = read_csv(config.args_csv) 86 | if len(persisted_tuples) == 1: 87 | persisted_args = dict(persisted_tuples[0]._asdict()) 88 | elif len(persisted_tuples) > 1: 89 | print("File={} has to many entries. Deleter the file and regenerate it with commands provided by" 90 | " `python3 simcoin.py`".format(config.args_csv)) 91 | exit(-1) 92 | 93 | data = {**persisted_args, **vars(args)} 94 | cleaned_data = {k: v for k, v in data.items() if v is not None} 95 | with open(config.args_csv, 'w') as file: 96 | writer = csv.writer(file) 97 | writer.writerow(cleaned_data.keys()) 98 | writer.writerow(cleaned_data.values()) 99 | 100 | 101 | def json_object_hook(d): 102 | return namedtuple('X', d.keys())(*d.values()) 103 | -------------------------------------------------------------------------------- /code/simulationfiles/ticks_config.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import random 3 | import pandas 4 | import numpy as np 5 | import config 6 | import argparse 7 | from simulationfiles import checkargs 8 | import sys 9 | import utils 10 | import logging 11 | 12 | np.set_printoptions(precision=2, suppress=True) 13 | 14 | 15 | def _create_parser(): 16 | parser = argparse.ArgumentParser() 17 | 18 | parser.add_argument('--amount-of-ticks' 19 | , default=60 20 | , type=checkargs.check_positive_int 21 | , help='Amount of ticks.') 22 | 23 | parser.add_argument('--blocks-per-tick' 24 | , default=0.1 25 | , type=checkargs.check_positive_float 26 | , help='Blocks per tick.' 27 | ) 28 | 29 | parser.add_argument('--txs-per-tick' 30 | , default=1 31 | , type=checkargs.check_positive_int 32 | , help='Txs per tick.' 33 | ) 34 | 35 | parser.add_argument('--seed' 36 | , default=0 37 | , type=checkargs.check_positive_int 38 | , help='Set the seed.' 39 | ) 40 | return parser 41 | 42 | 43 | def create(unknown_arguments=False): 44 | logging.info('Called ticks config') 45 | 46 | utils.check_for_file(config.nodes_csv) 47 | nodes = utils.read_csv(config.nodes_csv) 48 | 49 | parser = _create_parser() 50 | if unknown_arguments: 51 | args = parser.parse_known_args(sys.argv[2:])[0] 52 | else: 53 | args = parser.parse_args(sys.argv[2:]) 54 | logging.info("Parsed arguments in {}: {}".format(__name__, args)) 55 | utils.update_args(args) 56 | 57 | random.seed(args.seed) 58 | np.random.seed(args.seed) 59 | 60 | block_events = _create_block_events(nodes, args.amount_of_ticks, args.blocks_per_tick) 61 | 62 | ticks = _create_ticks(nodes, block_events, args.txs_per_tick, args.amount_of_ticks) 63 | 64 | logging.info('Created {}:'.format(config.ticks_csv)) 65 | print(pandas.DataFrame(ticks)) 66 | 67 | with open(config.ticks_csv, "w") as file: 68 | writer = csv.writer(file) 69 | writer.writerows(ticks) 70 | logging.info('End ticks config') 71 | 72 | 73 | def _calc_expected_events(number_of_ticks, events_per_tick): 74 | # 3 times + 10 to have some buffer 75 | return int(number_of_ticks * events_per_tick * 3) + 10 76 | 77 | 78 | def _create_block_events(nodes, amount_of_ticks, blocks_per_tick): 79 | expected_blocks = _calc_expected_events(amount_of_ticks, blocks_per_tick) 80 | block_events = {} 81 | for node in nodes: 82 | block_events[node.name] = _create_block_series(node.share, blocks_per_tick, expected_blocks) 83 | return block_events 84 | 85 | 86 | def _create_block_series(share, blocks_per_tick, expected_blocks): 87 | random_event_ticks = np.random.exponential((1 / blocks_per_tick) * (1 / share), expected_blocks) 88 | block_events = np.cumsum(random_event_ticks) 89 | return block_events.tolist() 90 | 91 | 92 | def _create_ticks(nodes, block_events, txs_per_tick, amount_of_ticks): 93 | index_tx = 0 94 | ticks = [[] for _ in range(amount_of_ticks)] 95 | for index, tick in enumerate(ticks): 96 | for i in range(txs_per_tick): 97 | tick.append('tx ' + random.choice(nodes).name) 98 | index_tx += 1 99 | 100 | for node in block_events.keys(): 101 | pop_count = 0 102 | while block_events[node][0] < index + 1: 103 | tick.append('block ' + node) 104 | block_events[node].pop(0) 105 | pop_count += 1 106 | if pop_count > 1: 107 | exit('A tick contains multiple block events of one node. Change your input arguments.') 108 | return ticks 109 | -------------------------------------------------------------------------------- /code/simulationfiles/nodes_config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import config 3 | import argparse 4 | import sys 5 | import utils 6 | import bash 7 | from cmd import dockercmd 8 | import logging 9 | import csv 10 | from collections import namedtuple 11 | 12 | node_groups = [ 13 | {'argparse': '--group-a', 'variable': 'group_a', 'default': 14 | [10, 1, 200, config.standard_image]}, 15 | {'argparse': '--group-b', 'variable': 'group_b', 'default': None}, 16 | {'argparse': '--group-c', 'variable': 'group_c', 'default': None}, 17 | {'argparse': '--group-d', 'variable': 'group_d', 'default': None}, 18 | {'argparse': '--group-e', 'variable': 'group_e', 'default': None}, 19 | ] 20 | 21 | 22 | def _create_parser(): 23 | parser = argparse.ArgumentParser() 24 | 25 | for node_group in node_groups: 26 | parser.add_argument(node_group['argparse'] 27 | , default=node_group['default'] 28 | , nargs='+' 29 | , help='{}. Pass [amount] [share] [latency] [docker-image]' 30 | .format(node_group['variable']) 31 | ) 32 | return parser 33 | 34 | 35 | def create(unknown_arguments=False): 36 | logging.info('Called nodes config') 37 | 38 | parser = _create_parser() 39 | if unknown_arguments: 40 | args = parser.parse_known_args(sys.argv[2:])[0] 41 | else: 42 | args = parser.parse_args(sys.argv[2:]) 43 | logging.info("Parsed arguments in {}: {}".format(__name__, args)) 44 | utils.update_args(args) 45 | 46 | nodes = [] 47 | for index, node_group in enumerate(node_groups): 48 | node_args = getattr(args, node_group['variable']) 49 | if node_args: 50 | if len(node_args) != config.number_of_node_group_arguments: 51 | parser.exit(-1, 'Pass all {} arguments [amount] [share] [latency] [docker-image] for {}\n' 52 | .format(config.number_of_node_group_arguments, node_group['variable'])) 53 | _check_if_image_exists(node_args) 54 | 55 | nodes.extend(_create_node_group(node_args, node_group['variable'], index + 1)) 56 | 57 | _check_if_share_sum_is_1(nodes) 58 | 59 | logging.info('Created {}:'.format(config.nodes_csv)) 60 | print(json.dumps([node for node in nodes], indent=4)) 61 | 62 | with open(config.nodes_csv, 'w') as file: 63 | writer = csv.writer(file) 64 | writer.writerow(['group', 'name', 'share', 'latency', 'docker_image']) 65 | writer.writerows( 66 | [[node.group, node.name, node.share, node.latency, node.docker_image] for node in nodes]) 67 | logging.info('End nodes config') 68 | 69 | 70 | def _check_if_image_exists(node_args): 71 | docker_image = str(node_args[3]) 72 | 73 | return_value = bash.call_silent(dockercmd.inspect(docker_image)) 74 | if return_value != 0: 75 | logging.error("Image {} doesn't exist. Check `docker images` for available images and" 76 | " consult the Makefile for how wo create the image.".format(docker_image)) 77 | exit(-1) 78 | 79 | 80 | def _check_if_share_sum_is_1(nodes): 81 | sum_of_shares = 0 82 | for node in nodes: 83 | sum_of_shares += node.share 84 | sum_of_shares = round(sum_of_shares, 2) 85 | if sum_of_shares != 1: 86 | logging.error('Sum of shares should be 1. It was {} instead.'.format(sum_of_shares)) 87 | exit(-1) 88 | 89 | 90 | def _create_node_group(node_args, group, index): 91 | amount = int(node_args[0]) 92 | share = float(node_args[1]) 93 | latency = int(node_args[2]) 94 | docker_image = str(node_args[3]) 95 | 96 | nodes = [] 97 | for i in range(amount): 98 | nodes.append(NodeConfig(group, config.node_name.format(index, i + 1), share/amount, latency, docker_image)) 99 | return nodes 100 | 101 | 102 | NodeConfig = namedtuple('NodeConfig', 'group name share latency docker_image') 103 | -------------------------------------------------------------------------------- /code/simulationfiles/network_config.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import random 3 | import pandas 4 | import config 5 | import argparse 6 | from simulationfiles import checkargs 7 | import sys 8 | import utils 9 | import logging 10 | 11 | 12 | def _create_parser(): 13 | parser = argparse.ArgumentParser() 14 | 15 | parser.add_argument('--seed' 16 | , default=0 17 | , type=checkargs.check_positive_int 18 | , help='Set the seed' 19 | ) 20 | 21 | parser.add_argument('--connectivity' 22 | , default=1 23 | , type=checkargs.check_percentage 24 | , help='Connectivity between nodes.' 25 | ) 26 | 27 | return parser 28 | 29 | 30 | def create(unknown_arguments=False): 31 | logging.info('Called network config') 32 | 33 | utils.check_for_file(config.nodes_csv) 34 | nodes = utils.read_csv(config.nodes_csv) 35 | 36 | parser = _create_parser() 37 | if unknown_arguments: 38 | args = parser.parse_known_args(sys.argv[2:])[0] 39 | else: 40 | args = parser.parse_args(sys.argv[2:]) 41 | logging.info("Parsed arguments in {}: {}".format(__name__, args)) 42 | utils.update_args(args) 43 | 44 | random.seed(args.seed) 45 | 46 | header = _create_header(nodes) 47 | 48 | matrix = _create_matrix(header, args.connectivity) 49 | 50 | if _check_if_fully_connected(matrix) is not True: 51 | raise Exception("Not all nodes a reachable. Consider to raise the connectivity.") 52 | 53 | logging.info('Created {}:'.format(config.network_csv)) 54 | print(pandas.DataFrame(matrix)) 55 | 56 | with open(config.network_csv, "w") as file: 57 | writer = csv.writer(file) 58 | writer.writerows(matrix) 59 | logging.info('End network config') 60 | 61 | 62 | def _create_header(nodes): 63 | header = [''] 64 | for node in nodes: 65 | name = node.name 66 | header.append(name) 67 | 68 | return header 69 | 70 | 71 | def _create_matrix(header, connectivity): 72 | length = len(header) 73 | matrix = [[] for _ in range(length)] 74 | 75 | for i in range(1, length): 76 | matrix[i] = [-1 for _ in range(length)] 77 | matrix[i][0] = header[i] 78 | matrix[0] = header 79 | 80 | for i in range(1, length): 81 | for j in range(1, i + 1): 82 | if i is j: 83 | matrix[i][i] = 0 84 | elif random.random() < connectivity: 85 | if i % 2 == j % 2: 86 | matrix[i][j] = 1 87 | matrix[j][i] = 0 88 | else: 89 | matrix[i][j] = 0 90 | matrix[j][i] = 1 91 | else: 92 | matrix[i][j] = matrix[j][i] = 0 93 | return matrix 94 | 95 | 96 | def _check_if_fully_connected(matrix): 97 | connected = _recursive_check(matrix) 98 | 99 | return len(connected) == len(matrix) - 1 100 | 101 | 102 | def _recursive_check(matrix, visited=None, start=1): 103 | if visited is None: 104 | visited = {key: False for key in range(1, len(matrix))} 105 | 106 | if visited[start]: 107 | return [] 108 | visited[start] = True 109 | output = [start] 110 | for neighbour in range(1, len(matrix)): 111 | if matrix[start][neighbour] > 0: 112 | output.extend(_recursive_check(matrix, visited, neighbour)) 113 | return output 114 | 115 | 116 | def read_connections(): 117 | utils.check_for_file(config.network_csv) 118 | connections = {} 119 | network_config = pandas.read_csv(open(config.network_csv), index_col=0) 120 | 121 | for node_row, row in network_config.iterrows(): 122 | connections[node_row] = [] 123 | for node_column, value in row.iteritems(): 124 | if node_column == node_row: 125 | pass 126 | elif value == 1: 127 | connections[node_row].append(node_column) 128 | 129 | return connections 130 | -------------------------------------------------------------------------------- /code/multirun_cmd.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from simulationfiles import checkargs 3 | import sys 4 | import utils 5 | import logging 6 | from simulationfiles import nodes_config 7 | from simulationfiles import ticks_config 8 | from simulationfiles import network_config 9 | import simulation_cmd 10 | import os 11 | import config 12 | import bash 13 | from cmd import rcmd 14 | import parse 15 | import systemmonitor 16 | import clistats 17 | from argparse import Namespace 18 | 19 | files_to_concat = [ 20 | config.analysed_tick_infos_file_name, 21 | parse.BlockCreateEvent.file_name_after_R_preprocessing, 22 | parse.BlockStatsEvent.file_name_after_R_preprocessing, 23 | parse.BlockReceivedEvent.file_name_after_R_preprocessing, 24 | parse.BlockReconstructEvent.file_name_after_R_preprocessing, 25 | parse.BlockExceptionEvent.file_name, 26 | parse.UpdateTipEvent.file_name_after_R_preprocessing, 27 | parse.PeerLogicValidationEvent.file_name_after_R_preprocessing, 28 | config.consensus_chain_csv_file_name, 29 | 30 | parse.TxEvent.file_name_after_R_preprocessing, 31 | parse.TxReceivedEvent.file_name_after_R_preprocessing, 32 | parse.TxExceptionEvent.file_name, 33 | 34 | parse.RPCExceptionEvent.file_name, 35 | clistats.Tip.file_name, 36 | 37 | config.step_times_csv_file_name, 38 | parse.TickEvent.file_name, 39 | 40 | systemmonitor.CpuTimeSnapshot.file_name, 41 | systemmonitor.MemorySnapshot.file_name, 42 | ] 43 | 44 | 45 | def _parse_args(): 46 | parser = argparse.ArgumentParser() 47 | 48 | parser.add_argument('--repeat' 49 | , default=2 50 | , type=checkargs.check_positive_int 51 | , help='Number of repetition of the simulation.' 52 | ) 53 | 54 | args = parser.parse_known_args(sys.argv[2:])[0] 55 | utils.update_args(args) 56 | return args 57 | 58 | 59 | def run(): 60 | args = _parse_args() 61 | logging.info("Parsed arguments in {}: {}".format(__name__, args)) 62 | 63 | _prepare() 64 | 65 | nodes_config.create(unknown_arguments=True) 66 | ticks_config.create(unknown_arguments=True) 67 | network_config.create(unknown_arguments=True) 68 | 69 | for i in range(args.repeat): 70 | run_number = str(i + 1) 71 | logging.info('Starting {}/{} simulation'.format(run_number, args.repeat)) 72 | 73 | utils.update_args(Namespace(tag_appendix='_' + run_number)) 74 | simulation_cmd.run(unknown_arguments=True) 75 | 76 | bash.check_output('cp -r {}/postprocessing {}/run-{}' 77 | .format(config.soft_link_to_run_dir, config.soft_link_to_multi_run_dir, run_number)) 78 | bash.check_output('cp {} {}/run-{}'.format(config.run_log, config.soft_link_to_multi_run_dir, run_number)) 79 | logging.info('Finished {}/{} simulation'.format(run_number, args.repeat)) 80 | 81 | for file in [config.args_csv, config.ticks_csv, config.analysed_ticks_csv, 82 | config.general_infos_csv, config.nodes_csv, config.network_csv]: 83 | bash.check_output('cp {} {}/.'.format(file, config.soft_link_to_multi_run_dir)) 84 | _concat_files() 85 | 86 | bash.check_output(rcmd.create_report(config.soft_link_to_multi_run_dir)) 87 | logging.info('Created report in folder={}'.format(config.soft_link_to_multi_run_dir)) 88 | 89 | 90 | def _prepare(): 91 | os.makedirs(config.multi_run_dir) 92 | 93 | if os.path.islink(config.soft_link_to_multi_run_dir): 94 | bash.check_output('unlink {}'.format(config.soft_link_to_multi_run_dir)) 95 | bash.check_output('cd {}; ln -s {} {}'.format(config.data_dir, config.multi_run_dir_name, config.last_multi_run)) 96 | 97 | 98 | def _concat_files(): 99 | for file in files_to_concat: 100 | bash.check_output('head -n 1 {}/run-1/{} > {}/{}' 101 | .format(config.multi_run_dir, file, config.multi_run_dir, file)) 102 | bash.check_output('sed -s 1d {}/*/{} >> {}/{}' 103 | .format(config.multi_run_dir, file, config.multi_run_dir, file)) 104 | -------------------------------------------------------------------------------- /code/postprocessing.py: -------------------------------------------------------------------------------- 1 | from clistats import CliStats 2 | from parse import Parser 3 | import config 4 | import bash 5 | import logging 6 | from cmd import rcmd 7 | from cmd import dockercmd 8 | import utils 9 | from multiprocessing import Pool 10 | from multiprocessing.dummy import Pool as ThreadPool 11 | import subprocess 12 | from runner import StepTimes 13 | import time 14 | import csv 15 | import node as node_utils 16 | 17 | 18 | class PostProcessing: 19 | def __init__(self, context, writer): 20 | self._context = context 21 | self._writer = writer 22 | self._pool = None 23 | self._thread_pool = None 24 | 25 | def execute(self): 26 | self._pool = Pool(config.pool_processors) 27 | self._thread_pool = ThreadPool(5) 28 | 29 | cli_stats = CliStats(self._context, self._writer) 30 | cli_stats.execute() 31 | 32 | self.clean_up_docker() 33 | 34 | logging.info(config.log_line_run_end + self._context.run_name) 35 | _flush_log_handlers() 36 | _extract_from_file(config.log_file, config.run_log, 37 | config.log_line_run_start + self._context.run_name, 38 | config.log_line_run_end + self._context.run_name) 39 | 40 | parser = Parser(self._context, self._writer) 41 | parser.execute() 42 | 43 | _collect_general_information() 44 | 45 | self._context.step_times.append(StepTimes(time.time(), 'postprocessing_end')) 46 | self._writer.write_csv(config.step_times_csv_file_name, StepTimes.csv_header, self._context.step_times) 47 | 48 | _create_report() 49 | 50 | self._pool.close() 51 | self._thread_pool.close() 52 | logging.info('Executed post processing') 53 | 54 | def clean_up_docker(self): 55 | node_utils.graceful_rm(self._thread_pool, self._context.nodes.values()) 56 | logging.info('Removed all nodes') 57 | 58 | utils.sleep(1) 59 | 60 | bash.check_output(dockercmd.rm_network()) 61 | logging.info('Deleted docker network') 62 | 63 | bash.check_output(dockercmd.fix_data_dirs_permissions(self._context.run_dir)) 64 | logging.info('Fixed permissions of dirs used by docker') 65 | 66 | 67 | def _flush_log_handlers(): 68 | for handler in logging.getLogger().handlers: 69 | handler.flush() 70 | logging.debug('Flushed all logging handlers') 71 | 72 | 73 | def _extract_from_file(source, destination, start, end): 74 | with open(source, 'r') as source_file: 75 | with open(destination, 'w') as destination_file: 76 | write = False 77 | for line in source_file: 78 | if write: 79 | if end in line: 80 | destination_file.write(line) 81 | break 82 | else: 83 | destination_file.write(line) 84 | if start in line: 85 | destination_file.write(line) 86 | write = True 87 | logging.debug('Extracted from file={} lines between start={} and end={} into file {}' 88 | .format(source, destination, start, end)) 89 | 90 | 91 | def _collect_general_information(): 92 | general_infos = { 93 | 'total_memory': _try_cmd('cat /proc/meminfo | sed -n 1p | grep -ohE [0-9]+'), 94 | 'cpu_model': _try_cmd("lscpu | grep -oP 'Model name:\s+\K(.*)'"), 95 | 'cpus': _try_cmd("lscpu | grep -oP 'CPU\(s\):\s+\K([0-9]+)$'"), 96 | } 97 | 98 | with open(config.general_infos_csv, 'w') as file: 99 | writer = csv.writer(file) 100 | writer.writerow(general_infos.keys()) 101 | writer.writerow(general_infos.values()) 102 | 103 | 104 | def _try_cmd(cmd): 105 | try: 106 | return bash.check_output(cmd) 107 | except subprocess.CalledProcessError: 108 | return 'cmd={} failed'.format(cmd) 109 | 110 | 111 | def _create_report(): 112 | bash.check_output(rcmd.preprocess(config.postprocessing_dir)) 113 | bash.check_output(rcmd.create_report(config.postprocessing_dir)) 114 | logging.info('Created report in folder={}'.format(config.postprocessing_dir)) 115 | -------------------------------------------------------------------------------- /code/tests/test_clistats.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from clistats import CliStats 3 | from mock import MagicMock 4 | from bitcoin.rpc import JSONRPCError 5 | 6 | 7 | class TestCliStats(TestCase): 8 | 9 | def __init__(self, *args, **kwargs): 10 | super(TestCliStats, self).__init__(*args, **kwargs) 11 | 12 | def setUp(self): 13 | self.context = MagicMock() 14 | self.writer = MagicMock() 15 | self.cli_stats = CliStats(self.context, self.writer) 16 | 17 | def test_calc_consensus_chain_first_node_no_block(self): 18 | node_0 = MagicMock() 19 | node_0.execute_rpc.side_effect = JSONRPCError({'code': -1, 'message': 'error'}) 20 | self.context.first_block_height = 10 21 | self.context.nodes = {'0': node_0} 22 | 23 | chain = self.cli_stats._calc_consensus_chain() 24 | 25 | self.assertEqual(len(chain), 0) 26 | 27 | def test_calc_consensus_chain_one_node(self): 28 | node_0 = MagicMock() 29 | node_0.execute_rpc.side_effect = ['hash', JSONRPCError({'code': -1, 'message': 'error'})] 30 | 31 | self.context.first_block_height = 10 32 | self.context.nodes = {'0': node_0} 33 | 34 | chain = self.cli_stats._calc_consensus_chain() 35 | 36 | self.assertEqual(len(chain), 1) 37 | self.assertEqual(chain[0], 'hash') 38 | 39 | def test_calc_consensus_chain_multiple_nodes(self): 40 | node_0 = MagicMock() 41 | node_0.execute_rpc.side_effect = ['hash1', 'hash2', JSONRPCError({'code': -1, 'message': 'error'})] 42 | node_1 = MagicMock() 43 | node_1.execute_rpc.side_effect = ['hash1', 'hash2', JSONRPCError({'code': -1, 'message': 'error'})] 44 | 45 | self.context.first_block_height = 10 46 | self.context.nodes = {'0': node_0, '1': node_1} 47 | 48 | chain = self.cli_stats._calc_consensus_chain() 49 | 50 | self.assertEqual(len(chain), 2) 51 | self.assertEqual(chain[0], 'hash1') 52 | self.assertEqual(chain[1], 'hash2') 53 | 54 | def test_calc_consensus_chain_one_node_trailing_back(self): 55 | node_0 = MagicMock() 56 | node_0.execute_rpc.side_effect = ['hash1', 'hash2'] 57 | node_1 = MagicMock() 58 | node_1.execute_rpc.side_effect = ['hash1', JSONRPCError({'code': -1, 'message': 'error'})] 59 | 60 | self.context.first_block_height = 10 61 | self.context.nodes = {'0': node_0, '1': node_1} 62 | 63 | chain = self.cli_stats._calc_consensus_chain() 64 | 65 | self.assertEqual(len(chain), 1) 66 | self.assertEqual(chain[0], 'hash1') 67 | 68 | def test_calc_consensus_chain_different_chains(self): 69 | node_0 = MagicMock() 70 | node_0.execute_rpc.side_effect = ['hash1', 'hash2', 'hash4'] 71 | node_1 = MagicMock() 72 | node_1.execute_rpc.side_effect = ['hash1', 'hash3', 'hash4'] 73 | 74 | self.context.first_block_height = 10 75 | self.context.nodes = {'0': node_0, '1': node_1} 76 | 77 | chain = self.cli_stats._calc_consensus_chain() 78 | 79 | self.assertEqual(len(chain), 1) 80 | self.assertEqual(chain[0], 'hash1') 81 | 82 | def test_calc_consensus_chain_three_nodes(self): 83 | node_0 = MagicMock() 84 | node_0.execute_rpc.side_effect = ['hash1', 'hash2', 'hash5'] 85 | node_1 = MagicMock() 86 | node_1.execute_rpc.side_effect = ['hash1', 'hash3', 'hash4'] 87 | node_2 = MagicMock() 88 | node_2.execute_rpc.side_effect = ['hash1', 'hash3', 'hash4'] 89 | 90 | self.context.first_block_height = 10 91 | self.context.nodes = {'0': node_0, '1': node_1, '2': node_2} 92 | 93 | chain = self.cli_stats._calc_consensus_chain() 94 | 95 | self.assertEqual(len(chain), 1) 96 | self.assertEqual(chain[0], 'hash1') 97 | 98 | def test_node_stats(self): 99 | node_0 = MagicMock() 100 | node_0.name = 'name' 101 | node_0.execute_rpc.return_value = [{'status': 'active', 'branchlen': 2}] 102 | self.context.nodes = {'0': node_0} 103 | 104 | self.cli_stats._persist_node_stats() 105 | 106 | self.assertEqual(self.writer.write_csv.call_args[0][1], ['node', 'status', 'branchlen']) 107 | self.assertEqual(len(self.writer.write_csv.call_args[0][2]), 1) 108 | -------------------------------------------------------------------------------- /code/tests/test_network_config.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from simulationfiles import network_config 3 | from mock import patch 4 | from mock import mock_open 5 | from textwrap import dedent 6 | from simulationfiles.nodes_config import NodeConfig 7 | 8 | 9 | class TestNetworkConfig(TestCase): 10 | 11 | def test_create_header(self, ): 12 | header = network_config._create_header([ 13 | NodeConfig('group', 'node-1', 0, 0, None), 14 | NodeConfig('group', 'node-2', 0, 0, None) 15 | ]) 16 | 17 | self.assertEqual(len(header), 3) 18 | self.assertEqual(header, ['', 'node-1', 'node-2']) 19 | 20 | def test_create_matrix_full_connection(self): 21 | header = ['', 'node-0', 'node-1', 'node-2'] 22 | connectivity = 1 23 | 24 | matrix = network_config._create_matrix(header, connectivity) 25 | for i in range(1, len(header)): 26 | for j in range(1, i): 27 | if i != j: 28 | connection_between_nodes = matrix[i][j] + matrix[j][i] 29 | self.assertEqual(connection_between_nodes, 1) 30 | 31 | def test_create_matrix_no_connection(self): 32 | header = ['', 'node-0', 'node-1', 'node-2'] 33 | connectivity = 0 34 | 35 | matrix = network_config._create_matrix(header, connectivity) 36 | for i in range(1, len(matrix)): 37 | for j in range(1, len(matrix)): 38 | if i != j: 39 | self.assertEqual(matrix[i][j], 0) 40 | 41 | DATA_1 = dedent(""" 42 | ,node-0,node-1,node-2 43 | node-0,1,1,0 44 | node-1,1,2,1 45 | node-2,0,1,3 46 | """).strip() 47 | 48 | @patch("builtins.open", mock_open(read_data=DATA_1)) 49 | @patch('utils.check_for_file', lambda file: None) 50 | def test_read_connections(self): 51 | connections = network_config.read_connections() 52 | 53 | self.assertEqual(len(connections.keys()), 3) 54 | 55 | self.assertEqual(connections['node-0'], ['node-1']) 56 | self.assertEqual(connections['node-1'], ['node-0', 'node-2']) 57 | self.assertEqual(connections['node-2'], ['node-1']) 58 | 59 | def test_check_if_fully_connected_1(self): 60 | matrix = [ 61 | ['', 'node-0', 'node-1'], 62 | ['node-0', 1, 0], 63 | ['node-1', 0, 1] 64 | ] 65 | fully_connected = network_config._check_if_fully_connected(matrix) 66 | 67 | self.assertFalse(fully_connected) 68 | 69 | def test_check_if_fully_connected_2(self): 70 | matrix = [ 71 | ['', 'node-0', 'node-1'], 72 | ['node-0', 1, 1], 73 | ['node-1', 1, 1] 74 | ] 75 | fully_connected = network_config._check_if_fully_connected(matrix) 76 | 77 | self.assertTrue(fully_connected) 78 | 79 | def test_check_if_fully_connected_3(self): 80 | matrix = [ 81 | ['', 'node-0', 'node-1', 'node-2'], 82 | ['node-0', 1, 0, 1], 83 | ['node-1', 0, 1, 1], 84 | ['node-2', 1, 1, 1] 85 | ] 86 | fully_connected = network_config._check_if_fully_connected(matrix) 87 | 88 | self.assertTrue(fully_connected) 89 | 90 | def test_check_if_fully_connected_4(self): 91 | matrix = [ 92 | ['', 'node-0', 'node-1', 'node-2'], 93 | ['node-0', 1, 0, 1], 94 | ['node-1', 0, 1, 0], 95 | ['node-2', 1, 0, 1] 96 | ] 97 | fully_connected = network_config._check_if_fully_connected(matrix) 98 | 99 | self.assertFalse(fully_connected) 100 | 101 | def test_check_if_fully_connected_5(self): 102 | matrix = [ 103 | ['', 'node-0', 'node-1', 'node-2', 'node-3'], 104 | ['node-0', 1, 0, 0, 1], 105 | ['node-1', 0, 1, 1, 0], 106 | ['node-2', 0, 1, 1, 0], 107 | ['node-3', 1, 0, 0, 1], 108 | 109 | ] 110 | fully_connected = network_config._check_if_fully_connected(matrix) 111 | 112 | self.assertFalse(fully_connected) 113 | 114 | def test_check_if_fully_connected_6(self): 115 | matrix = [ 116 | ['', 'node-0', 'node-1', 'node-2', 'node-3'], 117 | ['node-0', 1, 1, 0, 0], 118 | ['node-1', 1, 1, 1, 0], 119 | ['node-2', 0, 1, 1, 0], 120 | ['node-3', 0, 0, 0, 1], 121 | 122 | ] 123 | fully_connected = network_config._check_if_fully_connected(matrix) 124 | 125 | self.assertFalse(fully_connected) 126 | -------------------------------------------------------------------------------- /code/tests/test_event.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from event import Event 3 | from unittest.mock import patch 4 | from unittest.mock import MagicMock 5 | from unittest.mock import mock_open 6 | from bitcoin.rpc import JSONRPCError 7 | import event 8 | import logging 9 | 10 | 11 | class TestEvent(TestCase): 12 | 13 | @classmethod 14 | def setUpClass(cls): 15 | logging.disable(logging.CRITICAL) 16 | 17 | @patch('time.time') 18 | @patch('utils.sleep') 19 | @patch('utils.check_for_file', lambda file: None) 20 | def test_execute_multiple_cmds(self, m_sleep, m_time): 21 | m_file = mock_open(read_data=''.join( 22 | 'cmd1,cmd2,cmd3' 23 | )) 24 | m_file.return_value.__iter__ = lambda self: self 25 | m_file.return_value.__next__ = lambda self: next(iter(self.readline, '')) 26 | 27 | with patch('builtins.open', m_file): 28 | mock = MagicMock() 29 | mock.args.tick_duration = 1 30 | e = Event(mock) 31 | e._execute_cmd = MagicMock() 32 | 33 | m_time.return_value = 0 34 | 35 | e.execute() 36 | 37 | self.assertEqual(e._execute_cmd.call_count, 3) 38 | self.assertTrue(m_sleep.called) 39 | 40 | @patch('time.time') 41 | @patch('utils.sleep') 42 | @patch('utils.check_for_file', lambda file: None) 43 | def test_execute_multiple_lines(self, m_sleep, m_time): 44 | m_file = mock_open(read_data=''.join( 45 | 'cmd1\n' 46 | 'cmd2' 47 | )) 48 | m_file.return_value.__iter__ = lambda self: self 49 | m_file.return_value.__next__ = lambda self: next(iter(self.readline, '')) 50 | 51 | with patch('builtins.open', m_file): 52 | mock = MagicMock() 53 | mock.args.tick_duration = 1 54 | e = Event(mock) 55 | e._execute_cmd = MagicMock() 56 | 57 | m_time.return_value = 0 58 | 59 | e.execute() 60 | 61 | self.assertEqual(e._execute_cmd.call_count, 2) 62 | self.assertTrue(m_sleep.call_count, 2) 63 | 64 | @patch('utils.check_for_file', lambda file: None) 65 | @patch('logging.error') 66 | def test_execute_with_exce_execute_cmd(self, m_error): 67 | m_file = mock_open(read_data=''.join( 68 | 'cmd1' 69 | )) 70 | m_file.return_value.__iter__ = lambda self: self 71 | m_file.return_value.__next__ = lambda self: next(iter(self.readline, '')) 72 | 73 | with patch('builtins.open', m_file): 74 | mock = MagicMock() 75 | mock.args.tick_duration = 0 76 | e = Event(mock) 77 | e._execute_cmd = MagicMock() 78 | e._execute_cmd.side_effect = Exception('mock') 79 | 80 | e.execute() 81 | self.assertRegex(m_error.call_args[0][0], 'Simulation could not .*') 82 | 83 | def test_execute_cmd_with_block_cmd(self): 84 | node_1 = MagicMock() 85 | cmd = 'block node-1' 86 | e = Event(MagicMock()) 87 | e._context.nodes = {'node-1': node_1} 88 | e._execute_cmd(cmd) 89 | 90 | self.assertTrue(node_1.generate_blocks.called) 91 | 92 | def test_execute_cmd_with_block_cmd_with_empty_cmd(self): 93 | node_1 = MagicMock() 94 | 95 | e = Event(MagicMock()) 96 | e.generate_tx = MagicMock() 97 | e._execute_cmd('') 98 | 99 | self.assertFalse(node_1.execute_rpc.called) 100 | self.assertFalse(e.generate_tx.called) 101 | 102 | def test_execute_cmd_with_tx_tmd(self): 103 | node = MagicMock() 104 | cmd = 'tx node-1' 105 | 106 | e = Event(MagicMock()) 107 | e.generate_tx = MagicMock() 108 | e._context.nodes = {'node-1': node} 109 | e._execute_cmd(cmd) 110 | 111 | self.assertTrue(node.generate_tx.called) 112 | 113 | def test_execute_cmd_with_unknown_cmd(self): 114 | cmd = 'unknown node-1' 115 | e = Event(MagicMock()) 116 | e._context.nodes = {'node-1': {}} 117 | 118 | with self.assertRaises(Exception) as context: 119 | e._execute_cmd(cmd) 120 | 121 | self.assertTrue('Unknown cmd' in str(context.exception)) 122 | 123 | def test_execute_cmd_with_exception(self): 124 | context = MagicMock() 125 | node = MagicMock() 126 | node.generate_tx.side_effect = JSONRPCError({'code': -1, 'message': 'test_message'}) 127 | context.nodes = {'node-1': node} 128 | 129 | e = Event(context) 130 | e._execute_cmd('tx node-1') 131 | 132 | def test_calc_analyze_skip_ticks_1(self): 133 | tick_count = event._calc_analyze_skip_ticks(.1, 50) 134 | self.assertEqual(tick_count, 10) 135 | 136 | def test_calc_analyze_skip_ticks_2(self): 137 | tick_count = event._calc_analyze_skip_ticks(.1, .05) 138 | self.assertEqual(tick_count, 20) 139 | 140 | def test_calc_analyze_skip_ticks_3(self): 141 | tick_count = event._calc_analyze_skip_ticks(100, 50) 142 | self.assertEqual(tick_count, 1) 143 | -------------------------------------------------------------------------------- /code/prepare.py: -------------------------------------------------------------------------------- 1 | import config 2 | import logging 3 | import bash 4 | from cmd import dockercmd 5 | import os 6 | import utils 7 | import math 8 | from multiprocessing.dummy import Pool as ThreadPool 9 | import itertools 10 | from bitcoin.rpc import DEFAULT_HTTP_TIMEOUT 11 | import node as node_utils 12 | 13 | 14 | class Prepare: 15 | def __init__(self, context): 16 | self._context = context 17 | self._pool = None 18 | 19 | def execute(self): 20 | self._pool = ThreadPool(5) 21 | 22 | logging.info('Begin of prepare step') 23 | 24 | self._prepare_simulation_dir() 25 | 26 | _remove_old_containers_if_exists() 27 | _recreate_network() 28 | 29 | self._give_nodes_spendable_coins() 30 | 31 | self._start_nodes() 32 | 33 | self._pool.close() 34 | 35 | logging.info('End of prepare step') 36 | 37 | def _prepare_simulation_dir(self): 38 | if not os.path.exists(self._context.run_dir): 39 | os.makedirs(self._context.run_dir) 40 | 41 | if os.path.islink(config.soft_link_to_run_dir): 42 | bash.check_output('unlink {}'.format(config.soft_link_to_run_dir)) 43 | bash.check_output('cd {}; ln -s {} {}'.format(config.data_dir, self._context.run_name, config.last_run)) 44 | os.makedirs(config.postprocessing_dir) 45 | 46 | for file in [config.network_csv_file_name, config.ticks_csv_file_name, 47 | config.nodes_csv_file_name, config.args_csv_file_name]: 48 | bash.check_output('cp {}{} {}'.format(config.data_dir, file, self._context.run_dir)) 49 | bash.check_output('cd {}; ln -s ../{} {}'.format(config.postprocessing_dir, file, file)) 50 | 51 | os.makedirs(config.node_config) 52 | self._pool.map(node_utils.create_conf_file, self._context.nodes.values()) 53 | 54 | logging.info('Simulation directory created') 55 | 56 | def _give_nodes_spendable_coins(self): 57 | nodes = list(self._context.nodes.values()) 58 | cbs = [] 59 | for i, node in enumerate(nodes): 60 | cbs.append( 61 | self._pool.apply_async( 62 | node_utils.start_node, 63 | args=(node, (str(node.ip) for node in nodes[max(0, i - 5):i])) 64 | ) 65 | ) 66 | for cb in cbs: 67 | cb.get() 68 | 69 | self._pool.map(node_utils.check_startup_node, nodes) 70 | 71 | amount_of_tx_chains = _calc_number_of_tx_chains( 72 | self._context.args.txs_per_tick, 73 | self._context.args.blocks_per_tick, 74 | len(nodes) 75 | ) 76 | logging.info('Each node receives {} tx-chains'.format(amount_of_tx_chains)) 77 | 78 | for i, node in enumerate(nodes): 79 | node_utils.wait_until_height_reached(node, i * amount_of_tx_chains) 80 | node.execute_rpc('generate', amount_of_tx_chains) 81 | logging.info('Generated {} blocks for node={} for their tx-chains'.format(amount_of_tx_chains, node.name)) 82 | 83 | node_utils.wait_until_height_reached(nodes[0], amount_of_tx_chains * len(nodes)) 84 | nodes[0].generate_blocks(config.blocks_needed_to_make_coinbase_spendable) 85 | current_height = config.blocks_needed_to_make_coinbase_spendable + amount_of_tx_chains * len(nodes) 86 | 87 | self._pool.starmap(node_utils.wait_until_height_reached, zip(nodes, itertools.repeat(current_height))) 88 | 89 | self._pool.map(node_utils.transfer_coinbase_tx_to_normal_tx, nodes) 90 | 91 | for i, node in enumerate(nodes): 92 | node_utils.wait_until_height_reached(node, current_height + i) 93 | node.execute_rpc('generate', 1) 94 | 95 | current_height += len(nodes) 96 | self._context.first_block_height = current_height 97 | 98 | self._pool.starmap(node_utils.wait_until_height_reached, zip( 99 | nodes, 100 | itertools.repeat(current_height) 101 | )) 102 | 103 | self._pool.map(node_utils.rm_peers_file, nodes) 104 | node_utils.graceful_rm(self._pool, nodes) 105 | 106 | def _start_nodes(self): 107 | nodes = self._context.nodes.values() 108 | 109 | self._pool.map(node_utils.start_node, nodes) 110 | self._pool.starmap(node_utils.check_startup_node, zip( 111 | nodes, 112 | itertools.repeat(self._context.first_block_height) 113 | )) 114 | 115 | self._pool.starmap(node_utils.add_latency, zip( 116 | self._context.nodes.values(), 117 | itertools.repeat(self._context.zone.zones) 118 | )) 119 | 120 | logging.info('All nodes for the simulation are started') 121 | utils.sleep(1) 122 | 123 | 124 | def _remove_old_containers_if_exists(): 125 | containers = bash.check_output(dockercmd.ps_containers()) 126 | if len(containers) > 0: 127 | bash.check_output(dockercmd.remove_all_containers(), lvl=logging.DEBUG) 128 | logging.info('Old containers removed') 129 | 130 | 131 | def _calc_number_of_tx_chains(txs_per_tick, blocks_per_tick, number_of_nodes): 132 | txs_per_block = txs_per_tick / blocks_per_tick 133 | txs_per_block_per_node = txs_per_block / number_of_nodes 134 | 135 | # 10 times + 3 chains in reserve 136 | needed_tx_chains = (txs_per_block_per_node / config.max_in_mempool_ancestors) * 10 + 3 137 | 138 | return math.ceil(needed_tx_chains) 139 | 140 | 141 | def _recreate_network(): 142 | exit_code = bash.call_silent(dockercmd.inspect_network()) 143 | if exit_code == 0: 144 | bash.check_output(dockercmd.rm_network()) 145 | bash.check_output(dockercmd.create_network()) 146 | logging.info('Docker network {} created'.format(config.network_name)) 147 | utils.sleep(1) 148 | -------------------------------------------------------------------------------- /code/tests/test_parse.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | import parse 3 | from parse import Parser 4 | from mock import MagicMock 5 | from datetime import datetime 6 | import pytz 7 | 8 | 9 | class TestParse(TestCase): 10 | 11 | def setUp(self): 12 | node_0 = MagicMock() 13 | node_0.name = 'node-0' 14 | node_1 = MagicMock() 15 | node_1.name = 'node-1' 16 | node_2 = MagicMock() 17 | node_2.name = 'node-2' 18 | 19 | self.context = MagicMock() 20 | self.writer = MagicMock() 21 | self.parser = Parser(self.context, self.writer) 22 | 23 | def test_parse_stats_block(self): 24 | event = parse.BlockCreateEvent.from_log_line( 25 | '2017-07-27 11:01:22.173139 Simcoin CreateNewBlock():' 26 | ' hash:45205cac616c0344721d2552482024528883e9fdf7439bfbfc02567060c56d71', 'node-1' 27 | ) 28 | 29 | self.assertEqual(event._timestamp, datetime(2017, 7, 27, 11, 1, 22, 173139, pytz.UTC).timestamp()) 30 | self.assertEqual(event._node, 'node-1') 31 | self.assertEqual(event._hash, '45205cac616c0344721d2552482024528883e9fdf7439bfbfc02567060c56d71') 32 | 33 | def test_parse_stats_block(self): 34 | event = parse.BlockStatsEvent.from_log_line( 35 | '2017-07-27 11:01:22.173139 CreateNewBlock(): total size: 226 block weight:' 36 | ' 904 txs: 1 fees: 0 sigops 400', 37 | 'node-1' 38 | ) 39 | 40 | self.assertEqual(event._timestamp, datetime(2017, 7, 27, 11, 1, 22, 173139, pytz.UTC).timestamp()) 41 | self.assertEqual(event._node, 'node-1') 42 | self.assertEqual(event._total_size, 226) 43 | self.assertEqual(event._txs, 1) 44 | 45 | def test_parse_update_tip(self): 46 | event = parse.UpdateTipEvent.from_log_line( 47 | '2017-07-27 11:01:27.183575 UpdateTip: ' 48 | 'new best=1d205cac616c0344721d2552482024528883e9fdf7439bfbfc02567060c56d71 height=106 version=0x20000000' 49 | ' log2_work=7.741467 tx=113 date=\'2017-07-27 11:01:29\' progress=1.000000 cache=0.0MiB(112tx)', 50 | 'node-1' 51 | ) 52 | 53 | self.assertEqual(event._timestamp, datetime(2017, 7, 27, 11, 1, 27, 183575, pytz.UTC).timestamp()) 54 | self.assertEqual(event._node, 'node-1') 55 | self.assertEqual(event._hash, '1d205cac616c0344721d2552482024528883e9fdf7439bfbfc02567060c56d71') 56 | self.assertEqual(event._height, 106) 57 | self.assertEqual(event._tx, 113) 58 | 59 | def test_parse_received_block(self): 60 | event = parse.BlockReceivedEvent.from_log_line( 61 | '2017-07-27 15:34:58.122336 received block' 62 | ' 4ec9b518b23d460c01abaf1c6e32ec46dbbfc8c81c599dd71c0c175e2367f278' 63 | ' peer=0', 64 | 'node-1' 65 | ) 66 | 67 | self.assertEqual(event._timestamp, datetime(2017, 7, 27, 15, 34, 58, 122336, pytz.UTC).timestamp()) 68 | self.assertEqual(event._node, 'node-1') 69 | self.assertEqual(event._hash, '4ec9b518b23d460c01abaf1c6e32ec46dbbfc8c81c599dd71c0c175e2367f278') 70 | 71 | def test_successfully_reconstructed_block(self): 72 | event = parse.BlockReconstructEvent.from_log_line( 73 | '2017-07-28 08:41:43.637277 Successfully reconstructed' 74 | ' block 27ebf5f20b3860fb3a8ed82f0721300bf96c1836252fddd67b60f48d227d3a3c with 1 txn prefilled,' 75 | ' 0 txn from mempool (incl at least 0 from extra pool) and 0 txn requested', 76 | 'node-3' 77 | ) 78 | 79 | self.assertEqual(event._timestamp, datetime(2017, 7, 28, 8, 41, 43, 637277, pytz.UTC).timestamp()) 80 | self.assertEqual(event._node, 'node-3') 81 | self.assertEqual(event._hash, '27ebf5f20b3860fb3a8ed82f0721300bf96c1836252fddd67b60f48d227d3a3c') 82 | 83 | def test_parse_add_to_wallet(self): 84 | event = parse.TxEvent.from_log_line( 85 | '2017-07-30 07:48:48.337577 AddToWallet' 86 | ' 2e1b05f9248ae5f29b2234ac0eb86e0fccbacc084ed91937eee7eea248fc9a6a new', 87 | 'node-1' 88 | ) 89 | 90 | self.assertEqual(event._timestamp, datetime(2017, 7, 30, 7, 48, 48, 337577, pytz.UTC).timestamp()) 91 | self.assertEqual(event._node, 'node-1') 92 | self.assertEqual(event._hash, '2e1b05f9248ae5f29b2234ac0eb86e0fccbacc084ed91937eee7eea248fc9a6a') 93 | 94 | def test_parse_accept_to_memory_pool(self): 95 | event = parse.TxReceivedEvent.from_log_line( 96 | '2017-07-30 07:48:42.907223 AcceptToMemoryPool: peer=1:' 97 | ' accepted 701cd618d630780ac19a78325f24cdd13cbf87279103c7e9cec9fb6382e90ce7' 98 | ' (poolsz 11 txn, 13 kB)', 99 | 'node-2' 100 | ) 101 | 102 | self.assertEqual(event._timestamp, datetime(2017, 7, 30, 7, 48, 42, 907223, pytz.UTC).timestamp()) 103 | self.assertEqual(event._node, 'node-2') 104 | self.assertEqual(event._hash, '701cd618d630780ac19a78325f24cdd13cbf87279103c7e9cec9fb6382e90ce7') 105 | 106 | def test_parse_peer_logic_validation(self): 107 | event = parse.PeerLogicValidationEvent.from_log_line( 108 | '2017-07-31 16:09:28.663985 PeerLogicValidation::NewPoWValidBlock' 109 | ' sending header-and-ids 107692460326feaa6f0c6c35bb218bdb3ff2adbc0d10a3a36b8252acf54e0c03' 110 | ' to peer=0', 111 | 'node-0' 112 | ) 113 | 114 | self.assertEqual(event._timestamp, datetime(2017, 7, 31, 16, 9, 28, 663985, pytz.UTC).timestamp()) 115 | self.assertEqual(event._node, 'node-0') 116 | self.assertEqual(event._hash, '107692460326feaa6f0c6c35bb218bdb3ff2adbc0d10a3a36b8252acf54e0c03') 117 | 118 | def test_parse_tick(self): 119 | event = parse.TickEvent.from_log_line('2017-08-19 16:05:14.609000 [MainThread ] [INFO ] Tick=11 with' 120 | ' planned_start=45.12, actual_start=110.01 and duration=0.9823310375213623,' 121 | ' created txs=101 and blocks=45', 'simcoin' 122 | ) 123 | 124 | self.assertEqual(event._timestamp, datetime(2017, 8, 19, 16, 5, 14, 609000, pytz.UTC).timestamp()) 125 | self.assertEqual(event._source, 'simcoin') 126 | self.assertEqual(event._number, 11) 127 | self.assertEqual(event._planned_start, 45.12) 128 | self.assertEqual(event._actual_start, 110.01) 129 | self.assertEqual(event._duration, 0.9823310375213623) 130 | self.assertEqual(event._txs, 101) 131 | self.assertEqual(event._blocks, 45) 132 | 133 | def test_parse_tx_creation_exception(self): 134 | event = parse.TxExceptionEvent.from_log_line( 135 | '2017-08-19 16:05:14.609000 [MainThread ] [INFO ] Could not generate tx for node=s-node-1.1.' 136 | ' Exception="41: too-long-mempool-chain"', 137 | 'simcoin' 138 | ) 139 | 140 | self.assertEqual(event._timestamp, datetime(2017, 8, 19, 16, 5, 14, 609000, pytz.UTC).timestamp()) 141 | self.assertEqual(event._node, 's-node-1.1') 142 | self.assertEqual(event._source, 'simcoin') 143 | self.assertEqual(event._exception, '41: too-long-mempool-chain') 144 | 145 | def test_parse_block_creation_exception(self): 146 | event = parse.BlockExceptionEvent.from_log_line( 147 | '2017-08-19 16:05:14.609000 [MainThread ] [INFO ] Could not generate block for node=s-node-1.2.' 148 | ' Exception="41: no tx"', 149 | 'simcoin' 150 | ) 151 | 152 | self.assertEqual(event._timestamp, datetime(2017, 8, 19, 16, 5, 14, 609000, pytz.UTC).timestamp()) 153 | self.assertEqual(event._node, 's-node-1.2') 154 | self.assertEqual(event._source, 'simcoin') 155 | self.assertEqual(event._exception, '41: no tx') 156 | 157 | def test_parse_rpc_exception(self): 158 | event = parse.RPCExceptionEvent.from_log_line( 159 | '2017-08-19 16:05:14.609000 [MainThread ] [INFO ] Could not execute RPC-call=getnewaddress' 160 | ' on node=s-node-1.1 because of error="Connection timeout".' 161 | ' Reconnecting and retrying, 5 retries left', 162 | 'simcoin' 163 | ) 164 | 165 | self.assertEqual(event._timestamp, datetime(2017, 8, 19, 16, 5, 14, 609000, pytz.UTC).timestamp()) 166 | self.assertEqual(event._node, 's-node-1.1') 167 | self.assertEqual(event._source, 'simcoin') 168 | self.assertEqual(event._method, 'getnewaddress') 169 | self.assertEqual(event._exception, 'Connection timeout') 170 | self.assertEqual(event._retries_left, 5) 171 | -------------------------------------------------------------------------------- /code/node.py: -------------------------------------------------------------------------------- 1 | from cmd import dockercmd 2 | from cmd import bitcoincmd 3 | import config 4 | import bash 5 | import logging 6 | from cmd import tccmd 7 | import utils 8 | from collections import OrderedDict 9 | from collections import namedtuple 10 | from bitcoin.wallet import CBitcoinSecret 11 | from bitcoin.core import lx, b2x, COutPoint, CMutableTxOut, CMutableTxIn, \ 12 | CMutableTransaction, Hash160 13 | from bitcoin.core.script import CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY,\ 14 | OP_CHECKSIG, SignatureHash, SIGHASH_ALL 15 | from bitcoin.wallet import CBitcoinAddress 16 | from http.client import CannotSendRequest 17 | from bitcoin.rpc import Proxy 18 | from bitcoin.rpc import JSONRPCError 19 | from bitcoin.rpc import DEFAULT_HTTP_TIMEOUT 20 | 21 | 22 | class Node: 23 | __slots__ = ['_name', '_ip', '_docker_image', '_group'] 24 | 25 | def __init__(self, name, group, ip, docker_image): 26 | self._name = name 27 | self._ip = ip 28 | self._docker_image = docker_image 29 | self._group = group 30 | 31 | def rm(self): 32 | return bash.check_output(dockercmd.rm_container(self._name)) 33 | 34 | @property 35 | def name(self): 36 | return self._name 37 | 38 | @property 39 | def ip(self): 40 | return self._ip 41 | 42 | 43 | class BitcoinNode(Node): 44 | __slots__ = ['_path', '_spent_to', '_rpc_connection', '_current_tx_chain_index', '_tx_chains'] 45 | 46 | def __init__(self, name, group, ip, docker_image, path): 47 | super().__init__(name, group, ip, docker_image) 48 | self._path = path 49 | self._spent_to = None 50 | self._rpc_connection = None 51 | self._current_tx_chain_index = 0 52 | self._tx_chains = [] 53 | 54 | def create_conf_file(self): 55 | # file is needed for RPC connection 56 | with open(config.btc_conf_file.format(self.name), 'w') as file: 57 | file.write('rpcconnect={}\n'.format(self._ip)) 58 | file.write('rpcport={}\n'.format(config.rpc_port)) 59 | file.write('rpcuser={}\n'.format(config.rpc_user)) 60 | file.write('rpcpassword={}\n'.format(config.rpc_password)) 61 | 62 | def run(self, connect_to_ips): 63 | bash.check_output(bitcoincmd.start(self._name, str(self._ip), self._docker_image, self._path, connect_to_ips)) 64 | 65 | def is_running(self): 66 | return bash.check_output( 67 | dockercmd.check_if_running( 68 | self._name 69 | ) 70 | ) == 'true' 71 | 72 | def close_rpc_connection(self): 73 | if self._rpc_connection is not None: 74 | self._rpc_connection.__dict__['_BaseProxy__conn'].close() 75 | logging.debug('Closed rpc connection to node={}'.format(self._name)) 76 | 77 | def stop(self): 78 | self.execute_rpc('stop') 79 | logging.info('Send stop to node={}'.format(self.name)) 80 | 81 | def get_log_file(self): 82 | return self._path + config.bitcoin_log_file_name 83 | 84 | def wait_until_rpc_ready(self): 85 | while True: 86 | try: 87 | bash.check_output( 88 | "nc -z -w1 {} {}" 89 | .format(self._ip, config.rpc_port) 90 | ) 91 | break 92 | except Exception: 93 | logging.debug("Waiting with netcat until port is open") 94 | 95 | while True: 96 | try: 97 | self.execute_rpc('getnetworkinfo') 98 | break 99 | except JSONRPCError: 100 | logging.debug('Waiting until RPC of node={} is ready.'.format(self._name)) 101 | utils.sleep(1) 102 | 103 | def connect_to_rpc(self): 104 | self._rpc_connection = Proxy( 105 | btc_conf_file=config.btc_conf_file.format(self.name), 106 | timeout=config.rpc_timeout 107 | ) 108 | 109 | def rm_peers_file(self): 110 | return bash.check_output(bitcoincmd.rm_peers(self._name)) 111 | 112 | def execute_rpc(self, *args): 113 | retry = 30 114 | while retry > 0: 115 | try: 116 | return self._rpc_connection.call(args[0], *args[1:]) 117 | except (IOError, CannotSendRequest) as error: 118 | logging.exception('Could not execute RPC-call={} on node={} because of error={}.' 119 | ' Reconnecting and retrying, {} retries left' 120 | .format(args[0], self._name, error, retry)) 121 | retry -= 1 122 | self.connect_to_rpc() 123 | raise Exception('Could not execute RPC-call={} on node {}'.format(args[0], self._name)) 124 | 125 | def transfer_coinbases_to_normal_tx(self): 126 | for tx_chain in self._tx_chains: 127 | tx_chain.amount /= 2 128 | tx_chain.amount -= int(config.transaction_fee / 2) 129 | raw_transaction = self.execute_rpc( 130 | 'createrawtransaction', 131 | [{ 132 | 'txid': tx_chain.current_unspent_tx, 133 | 'vout': 0, 134 | }], 135 | OrderedDict([ 136 | (tx_chain.address, str(tx_chain.amount / 100000000)), 137 | (self._spent_to.address, str(tx_chain.amount / 100000000)) 138 | ]) 139 | ) 140 | signed_raw_transaction = self.execute_rpc( 141 | 'signrawtransaction', raw_transaction 142 | )['hex'] 143 | tx_chain.current_unspent_tx = self.execute_rpc( 144 | 'sendrawtransaction', 145 | signed_raw_transaction 146 | ) 147 | 148 | def generate_blocks(self, amount=1): 149 | logging.debug('{} trying to generate block'.format(self._name)) 150 | block_hash = self.execute_rpc('generate', amount) 151 | logging.info('{} generated block with hash={}'.format(self._name, block_hash)) 152 | 153 | def generate_tx(self): 154 | tx_chain = self.get_next_tx_chain() 155 | txid = lx(tx_chain.current_unspent_tx) 156 | txins = [ 157 | CMutableTxIn(COutPoint(txid, 0)), 158 | CMutableTxIn(COutPoint(txid, 1)) 159 | ] 160 | txin_seckeys = [tx_chain.seckey, self._spent_to.seckey] 161 | 162 | amount_in = tx_chain.amount 163 | tx_chain.amount -= int(config.transaction_fee / 2) 164 | 165 | txout1 = CMutableTxOut( 166 | tx_chain.amount, 167 | CBitcoinAddress(tx_chain.address).to_scriptPubKey() 168 | ) 169 | txout2 = CMutableTxOut( 170 | tx_chain.amount, 171 | CBitcoinAddress(self._spent_to.address).to_scriptPubKey() 172 | ) 173 | 174 | tx = CMutableTransaction(txins, [txout1, txout2], nVersion=2) 175 | 176 | for i, txin in enumerate(txins): 177 | txin_scriptPubKey = CScript([ 178 | OP_DUP, 179 | OP_HASH160, 180 | Hash160(txin_seckeys[i].pub), 181 | OP_EQUALVERIFY, 182 | OP_CHECKSIG 183 | ]) 184 | sighash = SignatureHash(txin_scriptPubKey, tx, i, SIGHASH_ALL) 185 | sig = txin_seckeys[i].sign(sighash) + bytes([SIGHASH_ALL]) 186 | txin.scriptSig = CScript([sig, txin_seckeys[i].pub]) 187 | 188 | tx_serialized = tx.serialize() 189 | logging.debug( 190 | '{} trying to sendrawtransaction' 191 | ' (in=2x{} out=2x{} fee={} bytes={})' 192 | ' using tx_chain number={}' 193 | .format(self._name, 194 | amount_in, 195 | txout1.nValue, 196 | (amount_in * 2) - (txout1.nValue * 2), 197 | len(tx_serialized), 198 | self._current_tx_chain_index) 199 | ) 200 | tx_hash = self.execute_rpc('sendrawtransaction', b2x(tx_serialized)) 201 | tx_chain.current_unspent_tx = tx_hash 202 | logging.info( 203 | '{} sendrawtransaction was successful; tx got hash={}' 204 | .format(self._name, tx_hash) 205 | ) 206 | 207 | def generate_spent_to_address(self): 208 | address = self.execute_rpc('getnewaddress') 209 | seckey = CBitcoinSecret(self.execute_rpc('dumpprivkey', address)) 210 | self._spent_to = SpentToAddress(address, seckey) 211 | 212 | def create_tx_chains(self): 213 | for unspent_tx in self.execute_rpc('listunspent'): 214 | seckey = CBitcoinSecret( 215 | self.execute_rpc('dumpprivkey', unspent_tx['address']) 216 | ) 217 | tx_chain = TxChain( 218 | unspent_tx['txid'], 219 | unspent_tx['address'], 220 | seckey, 221 | unspent_tx['amount'] * 100000000 222 | ) 223 | 224 | self._tx_chains.append(tx_chain) 225 | 226 | def get_next_tx_chain(self): 227 | tx_chain = self._tx_chains[self._current_tx_chain_index] 228 | self._current_tx_chain_index = ( 229 | (self._current_tx_chain_index + 1) % 230 | len(self._tx_chains) 231 | ) 232 | 233 | return tx_chain 234 | 235 | 236 | class PublicBitcoinNode(BitcoinNode): 237 | __slots__ = ['_latency', '_outgoing_ips'] 238 | 239 | def __init__(self, name, group, ip, latency, docker_image, path): 240 | BitcoinNode.__init__(self, name, group, ip, docker_image, path) 241 | self._latency = latency 242 | self._outgoing_ips = [] 243 | 244 | def set_outgoing_ips(self, outgoing_ips): 245 | self._outgoing_ips = outgoing_ips 246 | 247 | def add_latency(self, zones): 248 | for cmd in tccmd.create(self._name, zones, self._latency): 249 | bash.check_output(cmd) 250 | 251 | def run(self, connect_to_ips=None): 252 | if connect_to_ips is None: 253 | connect_to_ips = self._outgoing_ips 254 | 255 | super(PublicBitcoinNode, self).run(connect_to_ips) 256 | 257 | 258 | class TxChain: 259 | __slots__ = ['_current_unspent_tx', '_address', '_seckey', '_amount'] 260 | 261 | def __init__(self, current_unspent_tx, address, seckey, amount): 262 | self._current_unspent_tx = current_unspent_tx 263 | self._address = address 264 | self._seckey = seckey 265 | self._amount = amount 266 | 267 | @property 268 | def current_unspent_tx(self): 269 | return self._current_unspent_tx 270 | 271 | @current_unspent_tx.setter 272 | def current_unspent_tx(self, unspent_tx): 273 | self._current_unspent_tx = unspent_tx 274 | 275 | @property 276 | def address(self): 277 | return self._address 278 | 279 | @property 280 | def seckey(self): 281 | return self._seckey 282 | 283 | @property 284 | def amount(self): 285 | return self._amount 286 | 287 | @amount.setter 288 | def amount(self, amount): 289 | self._amount = amount 290 | 291 | 292 | SpentToAddress = namedtuple('SpentToAddress', 'address seckey') 293 | 294 | 295 | def create_conf_file(node): 296 | node.create_conf_file() 297 | 298 | 299 | def start_node(node, connect_to_ips=None): 300 | node.run(connect_to_ips) 301 | 302 | 303 | def check_startup_node(node, height=0): 304 | node.connect_to_rpc() 305 | node.wait_until_rpc_ready() 306 | wait_until_height_reached(node, height) 307 | 308 | 309 | def wait_until_height_reached(node, height): 310 | while True: 311 | node_height = node.execute_rpc('getblockcount') 312 | if height <= int(node_height): 313 | break 314 | logging.debug('Waiting until node={} with current height={} reached height={}...' 315 | .format(node.name, node_height, height)) 316 | utils.sleep(0.2) 317 | 318 | 319 | def transfer_coinbase_tx_to_normal_tx(node): 320 | node.generate_spent_to_address() 321 | node.create_tx_chains() 322 | node.transfer_coinbases_to_normal_tx() 323 | logging.info("Transferred all coinbase-tx to normal tx for node={}".format(node.name)) 324 | 325 | 326 | def add_latency(node, zones): 327 | node.add_latency(zones) 328 | 329 | 330 | def wait_until_node_stopped(node): 331 | parts = 10 332 | step = config.max_wait_time_bitcoin_runs_out / parts 333 | for i in range(parts): 334 | utils.sleep(step) 335 | logging.info('Wait until node={} runs out'.format(node.name)) 336 | if node.is_running() is False: 337 | return 338 | logging.warning('Node={} did not stopped running'.format(node.name)) 339 | 340 | 341 | def rm_peers_file(node): 342 | node.rm_peers_file() 343 | 344 | 345 | def graceful_rm(pool, nodes): 346 | pool.map(stop_node, nodes) 347 | pool.map(wait_until_node_stopped, nodes) 348 | pool.map(rm_node, nodes) 349 | 350 | 351 | def stop_node(node): 352 | node.stop() 353 | node.close_rpc_connection() 354 | 355 | 356 | def rm_node(node): 357 | node.rm() 358 | -------------------------------------------------------------------------------- /code/parse.py: -------------------------------------------------------------------------------- 1 | import config 2 | import re 3 | from datetime import datetime 4 | import logging 5 | import pytz 6 | from multiprocessing import Pool 7 | from itertools import repeat 8 | from chunker import Chunker 9 | import write 10 | 11 | 12 | class Parser: 13 | def __init__(self, context, writer): 14 | self._context = context 15 | self._writer = writer 16 | self._pool = None 17 | 18 | logging.info('Created parser with host={} and node={} log parsers' 19 | .format(len(host_parsers), len(node_parsers))) 20 | 21 | def execute(self): 22 | self._pool = Pool(config.pool_processors) 23 | 24 | for parser in host_parsers + node_parsers: 25 | write.write_header_csv(parser.file_name, parser.csv_header) 26 | logging.info('Created all empty csv files') 27 | 28 | self._pool.starmap(_parse, zip( 29 | repeat(self._writer,), 30 | repeat(config.run_log), 31 | repeat('simcoin'), 32 | Chunker.chunkify(config.run_log, config.file_chunk_size), 33 | repeat(host_parsers), 34 | )) 35 | 36 | for node in self._context.nodes.values(): 37 | self._pool.starmap(_parse, zip( 38 | repeat(self._writer), 39 | repeat(node.get_log_file()), 40 | repeat(node.name), 41 | Chunker.chunkify(node.get_log_file(), config.file_chunk_size), 42 | repeat(node_parsers), 43 | )) 44 | 45 | self._pool.close() 46 | logging.info('Finished parsing of run_log and all node logs') 47 | 48 | 49 | def _parse(writer, log_file, name, chunk, parsers): 50 | parsed_objects = {} 51 | for line in Chunker.parse(Chunker.read(log_file, chunk)): 52 | for parser in parsers: 53 | try: 54 | parsed_object = parser.from_log_line(line, name) 55 | parsed_objects.setdefault(parsed_object.file_name, []).append(parsed_object) 56 | break 57 | except ParseException: 58 | pass 59 | 60 | for key in parsed_objects: 61 | writer.append_csv(key, parsed_objects[key]) 62 | logging.info('Parsed {} event types out of chunk {} from file={}' 63 | .format(len(parsed_objects), chunk, log_file, log_file)) 64 | 65 | 66 | def _parse_datetime(date_time): 67 | parsed_date_time = datetime.strptime(date_time, config.log_time_format) 68 | return parsed_date_time.replace(tzinfo=pytz.UTC).timestamp() 69 | 70 | 71 | class Event: 72 | __slots__ = ['_timestamp', '_node'] 73 | 74 | csv_header = ['timestamp', 'node'] 75 | 76 | def __init__(self, timestamp, node): 77 | self._timestamp = timestamp 78 | self._node = node 79 | 80 | def vars_to_array(self): 81 | return [self._timestamp, self._node] 82 | 83 | 84 | class BlockCreateEvent(Event): 85 | __slots__ = ['_hash'] 86 | 87 | csv_header = Event.csv_header + ['hash'] 88 | file_name = 'blocks_create_raw.csv' 89 | file_name_after_R_preprocessing = 'blocks_create.csv' 90 | 91 | def __init__(self, timestamp, node, _hash): 92 | super().__init__(timestamp, node) 93 | self._hash = _hash 94 | 95 | @classmethod 96 | def from_log_line(cls, line, node): 97 | match = re.match( 98 | config.log_prefix_timestamp + 'Simcoin CreateNewBlock\(\): hash:([0-9,a-z]{64})$', line) 99 | 100 | if match is None: 101 | raise ParseException("Didn't match 'CreateNewBlock' log line.") 102 | 103 | return cls( 104 | _parse_datetime(match.group(1)), 105 | node, 106 | str(match.group(2)), 107 | ) 108 | 109 | def vars_to_array(self): 110 | return Event.vars_to_array(self) + [self._hash] 111 | 112 | 113 | class BlockStatsEvent(Event): 114 | __slots__ = ['_total_size', '_txs'] 115 | 116 | csv_header = Event.csv_header + ['total_size', 'txs'] 117 | file_name = 'blocks_stats_raw.csv' 118 | file_name_after_R_preprocessing = 'blocks_stats.csv' 119 | 120 | def __init__(self, timestamp, node, total_size, txs): 121 | super().__init__(timestamp, node) 122 | self._total_size = total_size 123 | self._txs = txs 124 | 125 | @classmethod 126 | def from_log_line(cls, line, node): 127 | match = re.match( 128 | config.log_prefix_timestamp + 'CreateNewBlock\(\): total size: ([0-9]+) block weight: [0-9]+ txs: ([0-9]+)' 129 | ' fees: [0-9]+ sigops [0-9]+$', line) 130 | 131 | if match is None: 132 | raise ParseException("Didn't match 'CreateNewBlock' log line.") 133 | 134 | return cls( 135 | _parse_datetime(match.group(1)), 136 | node, 137 | int(match.group(2)), 138 | int(match.group(3)), 139 | ) 140 | 141 | def vars_to_array(self): 142 | return Event.vars_to_array(self) + [self._total_size, self._txs] 143 | 144 | 145 | class UpdateTipEvent(Event): 146 | __slots__ = ['_hash', '_height', '_tx'] 147 | 148 | csv_header = Event.csv_header + ['hash', 'height', 'tx'] 149 | file_name = 'update_tip_raw.csv' 150 | file_name_after_R_preprocessing = 'update_tip.csv' 151 | 152 | def __init__(self, timestamp, node, _hash, height, tx): 153 | super().__init__(timestamp, node) 154 | self._hash = _hash 155 | self._height = height 156 | self._tx = tx 157 | 158 | @classmethod 159 | def from_log_line(cls, line, node): 160 | match = re.match( 161 | config.log_prefix_timestamp + 'UpdateTip: new best=([0-9,a-z]{64}) height=([0-9]+) version=0x[0-9]{8}' 162 | ' log2_work=[0-9]+\.?[0-9]* tx=([0-9]+)' 163 | ' date=\'[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}\'' 164 | ' progress=[0-9]+.[0-9]+ cache=[0-9]+\.[0-9]+[a-zA-Z]+\([0-9]+txo?\)$', line) 165 | 166 | if match is None: 167 | raise ParseException("Didn't match 'UpdateTip' log line.") 168 | 169 | return cls( 170 | _parse_datetime(match.group(1)), 171 | node, 172 | str(match.group(2)), 173 | int(match.group(3)), 174 | int(match.group(4)) 175 | ) 176 | 177 | def vars_to_array(self): 178 | return Event.vars_to_array(self) + [self._hash, self._height, self._tx] 179 | 180 | 181 | class PeerLogicValidationEvent(Event): 182 | __slots__ = ['_hash'] 183 | 184 | csv_header = Event.csv_header + ['hash'] 185 | file_name = 'peer_logic_validation_raw.csv' 186 | file_name_after_R_preprocessing = 'peer_logic_validation.csv' 187 | 188 | def __init__(self, timestamp, node, _hash): 189 | super().__init__(timestamp, node) 190 | self._hash = _hash 191 | 192 | @classmethod 193 | def from_log_line(cls, line, node): 194 | match = re.match( 195 | config.log_prefix_timestamp + 'PeerLogicValidation::NewPoWValidBlock sending header-and-ids ([a-z0-9]{64}) ' 196 | 'to peer=[0-9]+', line) 197 | 198 | if match is None: 199 | raise ParseException("Didn't match 'PeerLogicValidation' log line.") 200 | 201 | return cls( 202 | _parse_datetime(match.group(1)), 203 | node, 204 | str(match.group(2)) 205 | ) 206 | 207 | def vars_to_array(self): 208 | return Event.vars_to_array(self) + [self._hash] 209 | 210 | 211 | class TxEvent(Event): 212 | __slots__ = ['_hash'] 213 | 214 | csv_header = Event.csv_header + ['hash'] 215 | file_name = 'txs_raw.csv' 216 | file_name_after_R_preprocessing = 'txs.csv' 217 | 218 | def __init__(self, timestamp, node, _hash): 219 | super().__init__(timestamp, node) 220 | self._hash = _hash 221 | 222 | @classmethod 223 | def from_log_line(cls, line, node): 224 | match = re.match(config.log_prefix_timestamp + 'AddToWallet ([a-z0-9]{64}) new$', line) 225 | 226 | if match is None: 227 | raise ParseException("Didn't match 'AddToWallet' log line.") 228 | 229 | return cls( 230 | _parse_datetime(match.group(1)), 231 | node, 232 | str(match.group(2)), 233 | ) 234 | 235 | def vars_to_array(self): 236 | return Event.vars_to_array(self) + [self._hash] 237 | 238 | 239 | class TickEvent: 240 | __slots__ = ['_timestamp', '_source', '_number', '_planned_start', '_actual_start', '_duration', '_txs', '_blocks'] 241 | 242 | csv_header = ['timestamp', 'source', 'number', 'planned_start', 'actual_start', 'duration', 'txs', 'blocks'] 243 | file_name = 'tick_infos.csv' 244 | 245 | def __init__(self, timestamp, source, number, planned_start, actual_start, duration, txs, blocks): 246 | self._timestamp = timestamp 247 | self._source = source 248 | self._number = number 249 | self._planned_start = planned_start 250 | self._actual_start = actual_start 251 | self._duration = duration 252 | self._txs = txs 253 | self._blocks = blocks 254 | 255 | @classmethod 256 | def from_log_line(cls, line, source): 257 | match = re.match( 258 | config.log_prefix_timestamp + '\[.*\] \[.*\] Tick=([0-9]+) with planned_start=([0-9]+\.[0-9]+),' 259 | ' actual_start=([0-9]+\.[0-9]+) and duration=([0-9]+\.[0-9]+),' 260 | ' created txs=([0-9]+) and blocks=([0-9]+)$', line) 261 | if match is None: 262 | raise ParseException("Didn't match 'Tick' log line.") 263 | 264 | return cls( 265 | _parse_datetime(match.group(1)), 266 | source, 267 | int(match.group(2)), 268 | float(match.group(3)), 269 | float(match.group(4)), 270 | float(match.group(5)), 271 | int(match.group(6)), 272 | int(match.group(7)), 273 | ) 274 | 275 | def vars_to_array(self): 276 | return [self._timestamp, self._source, self._number, self._planned_start, self._actual_start, self._duration, 277 | self._txs, self._blocks] 278 | 279 | 280 | class ReceivedEvent(Event): 281 | csv_header = Event.csv_header + ['hash'] 282 | 283 | def __init__(self, timestamp, node, _hash): 284 | super().__init__(timestamp, node) 285 | self._hash = _hash 286 | 287 | def vars_to_array(self): 288 | return Event.vars_to_array(self) + [self._hash] 289 | 290 | 291 | class BlockReceivedEvent(ReceivedEvent): 292 | file_name = 'blocks_received_raw.csv' 293 | file_name_after_R_preprocessing = 'blocks_received.csv' 294 | 295 | @classmethod 296 | def from_log_line(cls, line, node): 297 | match = re.match(config.log_prefix_timestamp + 'received block ([a-z0-9]{64}) peer=[0-9]+$', line) 298 | 299 | if match is None: 300 | raise ParseException("Didn't match 'Received block' log line.") 301 | 302 | return cls( 303 | _parse_datetime(match.group(1)), 304 | node, 305 | str(match.group(2)), 306 | ) 307 | 308 | 309 | class BlockReconstructEvent(ReceivedEvent): 310 | file_name = 'blocks_reconstructed_raw.csv' 311 | file_name_after_R_preprocessing = 'blocks_reconstructed.csv' 312 | 313 | @classmethod 314 | def from_log_line(cls, line, node): 315 | match = re.match( 316 | config.log_prefix_timestamp + 'Successfully reconstructed block ([a-z0-9]{64}) with ([0-9]+) txn prefilled,' 317 | ' ([0-9]+) txn from mempool \(incl at least ([0-9]+) from extra pool\) and' 318 | ' [0-9]+ txn requested$', line) 319 | if match is None: 320 | raise ParseException("Didn't match 'Reconstructed block' log line.") 321 | 322 | return cls( 323 | _parse_datetime(match.group(1)), 324 | node, 325 | str(match.group(2)), 326 | ) 327 | 328 | 329 | class TxReceivedEvent(ReceivedEvent): 330 | file_name = 'txs_received_raw.csv' 331 | file_name_after_R_preprocessing = 'txs_received.csv' 332 | 333 | @classmethod 334 | def from_log_line(cls, line, node): 335 | match = re.match(config.log_prefix_timestamp + 336 | 'AcceptToMemoryPool: peer=([0-9]+): accepted ([0-9a-z]{64}) \(poolsz ([0-9]+) txn,' 337 | ' ([0-9]+) [a-zA-Z]+\)$', line) 338 | 339 | if match is None: 340 | raise ParseException("Didn't match 'AcceptToMemoryPool' log line.") 341 | 342 | return cls( 343 | _parse_datetime(match.group(1)), 344 | node, 345 | str(match.group(3)), 346 | ) 347 | 348 | 349 | class ExceptionEvent(Event): 350 | __slots__ = ['_source', '_exception'] 351 | 352 | csv_header = Event.csv_header + ['source', 'exception'] 353 | 354 | def __init__(self, timestamp, node, source, exception): 355 | super().__init__(timestamp, node) 356 | self._source = source 357 | self._exception = exception 358 | 359 | def vars_to_array(self): 360 | return Event.vars_to_array(self) + [self._source, self._exception] 361 | 362 | 363 | class BlockExceptionEvent(ExceptionEvent): 364 | file_name = 'block_exceptions.csv' 365 | 366 | @classmethod 367 | def from_log_line(cls, line, node): 368 | match = re.match(config.log_prefix_timestamp + 369 | '\[.*\] \[.*\] Could not generate block for node=([a-zA-Z0-9-.]+)\.' 370 | ' Exception="(.+)"$', line) 371 | 372 | if match is None: 373 | raise ParseException("Didn't match 'Block exception' log line.") 374 | 375 | return cls( 376 | _parse_datetime(match.group(1)), 377 | str(match.group(2)), 378 | node, 379 | str(match.group(3)) 380 | ) 381 | 382 | 383 | class TxExceptionEvent(ExceptionEvent): 384 | file_name = 'tx_exceptions.csv' 385 | 386 | @classmethod 387 | def from_log_line(cls, line, node): 388 | match = re.match(config.log_prefix_timestamp + 389 | '\[.*\] \[.*\] Could not generate tx for node=([a-zA-Z0-9-.]+)\.' 390 | ' Exception="(.+)"$', line) 391 | 392 | if match is None: 393 | raise ParseException("Didn't match 'Tx exception' log line.") 394 | 395 | return cls( 396 | _parse_datetime(match.group(1)), 397 | str(match.group(2)), 398 | node, 399 | str(match.group(3)) 400 | ) 401 | 402 | 403 | class RPCExceptionEvent(Event): 404 | __slots__ = ['_timestamp', '_node', '_source', '_method', '_exception', '_retries_left'] 405 | 406 | csv_header = Event.csv_header + ['source', 'method', 'exception', 'retries_left'] 407 | file_name = 'rpc_exceptions.csv' 408 | 409 | def __init__(self, timestamp, node, source, method, exception, retries_left): 410 | super().__init__(timestamp, node) 411 | self._source = source 412 | self._method = method 413 | self._exception = exception 414 | self._retries_left = retries_left 415 | 416 | @classmethod 417 | def from_log_line(cls, line, node): 418 | match = re.match(config.log_prefix_timestamp + 419 | '\[.*\] \[.*\] Could not execute RPC-call=([a-zA-Z0-9]+) on node=([a-zA-Z0-9-\.]+)' 420 | ' because of error="(.*)"\. Reconnecting and retrying, ([0-9]+) retries left', line) 421 | 422 | if match is None: 423 | raise ParseException("Didn't match 'RPC exception' log line.") 424 | 425 | return cls( 426 | _parse_datetime(match.group(1)), 427 | str(match.group(3)), 428 | node, 429 | str(match.group(2)), 430 | str(match.group(4)), 431 | int(match.group(5)) 432 | ) 433 | 434 | def vars_to_array(self): 435 | return Event.vars_to_array(self) + [self._source, self._method, self._exception, self._retries_left] 436 | 437 | 438 | class ParseException(Exception): 439 | pass 440 | 441 | 442 | node_parsers = [ 443 | BlockCreateEvent, 444 | BlockStatsEvent, 445 | BlockReceivedEvent, 446 | BlockReconstructEvent, 447 | UpdateTipEvent, 448 | PeerLogicValidationEvent, 449 | 450 | TxEvent, 451 | TxReceivedEvent, 452 | ] 453 | 454 | host_parsers = [ 455 | TxExceptionEvent, 456 | BlockExceptionEvent, 457 | RPCExceptionEvent, 458 | 459 | TickEvent, 460 | ] 461 | -------------------------------------------------------------------------------- /code/reporter/report.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Report" 3 | date: '`r format(Sys.time(), "%d %B, %Y")`' 4 | author: "simcoin" 5 | output: 6 | pdf_document: 7 | pandoc_args: 8 | - -V 9 | - classoption=twocolumn 10 | toc: yes 11 | html_document: 12 | toc: yes 13 | --- 14 | 15 | ```{r knitr_options, include=FALSE} 16 | library(knitr) 17 | 18 | knitr::opts_chunk$set( 19 | fig.path='RmdFigs/', 20 | warning=FALSE, 21 | message=FALSE, 22 | error = TRUE, 23 | echo = FALSE, 24 | dev = 'pdf', 25 | fig.align='center' 26 | ) 27 | 28 | # set working directory for development: knitr::opts_knit$set(root.dir = '/tmp/run/postprocessing') 29 | ``` 30 | 31 | ```{r imports} 32 | library(anytime) 33 | library(dplyr) 34 | library(kableExtra) 35 | library(lattice) 36 | library(reshape2) 37 | library(stringr) 38 | Sys.setenv(TZ='UTC') 39 | ``` 40 | 41 | ```{r setup, include=FALSE} 42 | blocks_create <- read.csv("blocks_create.csv", dec=".") 43 | blocks_stats <- read.csv("blocks_stats.csv", dec=".") 44 | blocks_reconstructed <- read.csv("blocks_reconstructed.csv", dec=".") 45 | blocks_received <- read.csv("blocks_received.csv", dec=".") 46 | block_exces <- read.csv("block_exceptions.csv", dec=".") 47 | peer_logic_validation <- read.csv("peer_logic_validation.csv", dec=".") 48 | update_tip <- read.csv("update_tip.csv", dec=".") 49 | consensus_chain <- read.csv("consensus_chain.csv", dec=".") 50 | 51 | txs <- read.csv("txs.csv", dec=".") 52 | txs_received <- read.csv("txs_received.csv", dec=".") 53 | tx_exces <- read.csv("tx_exceptions.csv", dec=".") 54 | 55 | rpc_exces <- read.csv("rpc_exceptions.csv", dec=".") 56 | tips <- read.csv("tips.csv", dec=".") 57 | 58 | general_infos <- read.csv('general_infos.csv', stringsAsFactors=FALSE) 59 | step_times <- read.csv("step_times.csv", dec=".") 60 | 61 | tick_infos <- read.csv("tick_infos.csv", dec=".") 62 | number_of_runs <- length(unique(tick_infos$tag)) 63 | sim_starts <- aggregate(tick_infos$actual_start, by=list(tick_infos$tag), FUN=min) 64 | colnames(sim_starts) <- c('tag', 'run_start') 65 | analysed_tick_infos <- read.csv('analysed_tick_infos.csv', dec='.') 66 | 67 | cpu_time <- read.csv("cpu_time.csv", dec=".") 68 | memory <- read.csv("memory.csv", dec=".") 69 | 70 | args <- read.csv('args.csv') 71 | ticks <- readLines(file('ticks.csv')) 72 | analysed_ticks <- readLines(file('analysed_ticks.csv')) 73 | 74 | nodes <- read.csv('nodes.csv', dec='.') 75 | ``` 76 | 77 | ## Simulation 78 | ```{r general_infos} 79 | general_infos_table <- data.frame( 80 | c(format(utctime(step_times$timestamp[1]), '%Y-%m-%d %H:%M:%S %Z'), 81 | format(utctime(step_times$timestamp[length(step_times)]), '%Y-%m-%d %H:%M:%S %Z'), 82 | length(ticks), 83 | length(analysed_ticks), 84 | (length(ticks) - length(analysed_ticks))) 85 | ) 86 | row.names(general_infos_table) <- c('Start', 'End', 'Planned ticks', 'Planned analysed ticks', 'Skipped ticks') 87 | kable(general_infos_table, 88 | col.names = c('Value'), caption = 'General information', format = 'latex') %>% 89 | kable_styling(latex_options = c('scale_down','HOLD_position')) 90 | ``` 91 | 92 | ```{r start_arguments} 93 | args_transposed <- as.data.frame(t(args)) 94 | kable(args_transposed, col.names = c('Value'), format = 'latex', caption ='Start arguments') %>% 95 | kable_styling(latex_options = c('scale_down','HOLD_position')) 96 | ``` 97 | 98 | ```{r block_events_summary} 99 | analysed_blocks <- analysed_tick_infos %>% group_by(tag) %>% summarise(blocks = sum(blocks)) 100 | parsed_blocks <- blocks_create %>% group_by(tag) %>% summarise(n = n()) 101 | 102 | block_count = sum(str_count(analysed_ticks, 'block')) 103 | summary <- data.frame(rep(block_count, number_of_runs), analysed_blocks$blocks, parsed_blocks$n) 104 | rownames(summary) <- analysed_blocks$tag 105 | kable(summary, 106 | col.names = c('Planned', 'Created', 'Parsed'), caption = 'Blocks events summary', format = 'latex') %>% 107 | kable_styling(latex_options = c('HOLD_position')) 108 | ``` 109 | 110 | ```{r tx_events_summary} 111 | analysed_txs <- analysed_tick_infos %>% group_by(tag) %>% summarise(txs = sum(txs)) 112 | summary_txs <- txs %>% group_by(tag) %>% summarise(n = n()) 113 | summary_txs$n <- summary_txs$n - analysed_blocks$blocks 114 | 115 | tx_count = sum(str_count(analysed_ticks, 'tx')) 116 | summary <- data.frame(rep(tx_count, number_of_runs), analysed_txs$txs, summary_txs$n) 117 | rownames(summary) <- analysed_txs$tag 118 | kable(summary, 119 | col.names = c('Planned', 'Created', 'Parsed'), caption = 'Transaction events summary', format = 'latex') %>% 120 | kable_styling(latex_options = c('HOLD_position')) 121 | ``` 122 | 123 | ```{r step_times_barchart, fig.cap='Step times', fig.pos='H'} 124 | step_diff <- step_times %>% 125 | group_by(tag) %>% 126 | mutate(diff = c(NA, diff(timestamp))) %>% 127 | filter(!is.na(diff)) 128 | step_diff <- dcast(step_diff, tag~type, value.var = 'diff') 129 | rownames(step_diff) <- step_diff[,1] 130 | step_diff <- step_diff[,-1] 131 | step_diff <- step_diff[, c(3,2,1)] 132 | step_diff <- round(step_diff/60, 1) 133 | colnames(step_diff) <- c('Preparation', 'Simulation', 'Postprocessing') 134 | barchart(as.matrix(step_diff), stack = TRUE, ref = FALSE, xlab = 'Time [min]', 135 | auto.key = list(columns=2, rectangles = TRUE, points = FALSE, space='top', text=colnames(step_diff)), 136 | panel=function(x,y,...){ 137 | panel.barchart(x,y,...) 138 | xx <- unsplit(unname(lapply(split(x, y), function(t)cumsum(t)-t/2)), y) 139 | ltext(xx, y=y, labels=round(x, 1)) 140 | }) 141 | ``` 142 | 143 | ## System 144 | ```{r system_information} 145 | kable(Sys.info(), col.names = c('Value'), format = 'latex', caption = 'System information') %>% 146 | kable_styling(latex_options = c('scale_down','HOLD_position')) 147 | ``` 148 | 149 | ```{r system_hardware_specs} 150 | infos <- data.frame(c(general_infos$cpu_model, general_infos$cpus, round(as.double(general_infos$total_memory) / 1000000, 3))) 151 | row.names(infos) <- c('CPU model', 'CPU(s)', 'Memory [GB]') 152 | kable(infos, col.names = c('Value'), format = 'latex', caption = 'System hardware specs') %>% 153 | kable_styling(latex_options = c('scale_down','HOLD_position')) 154 | ``` 155 | 156 | ```{r cpu_time_plot, fig.cap='CPU usage over time', fig.pos='H'} 157 | cpu_time_diff <- cpu_time %>% 158 | group_by(tag) %>% 159 | mutate(idle = c(NA, diff(idle)), user = c(NA, diff(user)), nice = c(NA, diff(nice)), system = c(NA, diff(system))) %>% 160 | filter(!is.na(idle)) 161 | cpu_time_diff$total <- cpu_time_diff$user + cpu_time_diff$nice + cpu_time_diff$system 162 | 163 | cpu_time_diff$usage <- (1 - cpu_time_diff$idle / (cpu_time_diff$total + cpu_time_diff$idle)) * 100 164 | cpu_time_diff <- merge(cpu_time_diff, sim_starts) 165 | cpu_time_diff$elapsed <- cpu_time_diff$timestamp - cpu_time_diff$run_start 166 | xyplot(cpu_time_diff$usage ~ cpu_time_diff$elapsed, groups = cpu_time_diff$tag, 167 | auto.key = list(space = "top", columns = 2, points = FALSE, lines = TRUE), 168 | xlab = 'Time [s]', ylab = 'CPU usage [%]', t = 'l' ) 169 | ``` 170 | 171 | ```{r memory_xyplot, fig.cap='Memory usage over time', fig.pos='H'} 172 | memory$usage <- (1 - memory$available/memory$total) * 100 173 | memory <- merge(memory, sim_starts) 174 | memory$elapsed <- memory$timestamp - memory$run_start 175 | xyplot(memory$usage ~ memory$elapsed, groups = memory$tag, 176 | auto.key = list(space = "top", columns = 2, points = FALSE, lines = TRUE), 177 | xlab = 'Time [s]', ylab = 'Memory usage [%]', t = 'l' ) 178 | ``` 179 | 180 | ## Ticks 181 | ```{r tick_duration_summary} 182 | kable(unclass(summary(tick_infos$duration)), col.names = c('Duration [s]'), digits = 3, caption = 'Overall tick duration', format = 'latex') %>% 183 | kable_styling(latex_options = c('HOLD_position')) 184 | ``` 185 | 186 | ```{r ticks_wait} 187 | tick_infos$wait <- args$tick_duration - tick_infos$duration 188 | ``` 189 | 190 | `r sum(tick_infos$wait < 0)` times the execution of tick events took longer than the defined tick duration. 191 | 192 | ```{r tick_duration_xyplot, fig.cap='Tick duration over time', fig.pos='H'} 193 | ticks <- merge(tick_infos, sim_starts) 194 | ticks$elapsed <- ticks$timestamp - ticks$run_start 195 | xyplot(ticks$duration ~ ticks$elapsed, groups = ticks$tag, 196 | xlab = 'Time [s]', ylim = c(0, max(max(tick_infos$duration), args$tick_duration) * 1.1), ylab = 'Duration [s]', t = 'l', 197 | panel = function(...) { 198 | panel.xyplot(...) 199 | panel.abline(h = args$tick_duration) 200 | }) 201 | ``` 202 | 203 | ```{r tick_duration_median, fig.cap='Tick duration', fig.pos='H'} 204 | bwplot(tick_infos$duration ~ tick_infos$tag, ylab = 'Duration [s]') 205 | ``` 206 | 207 | ## Blocks 208 | ```{r blocks_setup} 209 | block_propagation <- rbind(blocks_reconstructed, blocks_received, peer_logic_validation, update_tip[, !(names(update_tip) %in% c('tx', 'height'))]) 210 | 211 | block_propagation <- merge(block_propagation, blocks_create[,c('timestamp', 'node', 'hash')], by = 'hash') 212 | 213 | block_propagation <- block_propagation %>% 214 | filter(as.character(node.x) != as.character(node.y)) %>% 215 | select(-node.y, node = node.x) %>% group_by(hash, node) %>% 216 | filter(which.min(timestamp.x)==row_number()) %>% 217 | mutate(propagation_time = timestamp.x - timestamp.y) 218 | ``` 219 | 220 | ```{r block_stale_rate_summary} 221 | consensus_chain$stale = 'Accepted' 222 | blocks_create <- merge(blocks_create, consensus_chain, all.x = TRUE) 223 | blocks_create$stale[is.na(blocks_create$stale)] <- 'Stale' 224 | 225 | stale_block <- table(blocks_create$tag, factor(blocks_create$stale, levels = c('Accepted', 'Stale'))) 226 | stale_rate <- data.frame(c(round(prop.table(stale_block, 1)[,2] * 100, 3))) 227 | kable(stale_rate, col.names = c('Rate [%]'), digits = 3, caption = 'Stale block rate', format = 'latex') %>% 228 | kable_styling(latex_options = c('HOLD_position')) 229 | ``` 230 | 231 | ```{r block_stale_barchart, fig.cap='Accepted/stale blocks', fig.pos='H'} 232 | stale_block <- table(blocks_create$tag, blocks_create$stale) 233 | barchart(stale_block, stack = TRUE, ref = FALSE, xlab = 'Blocks', 234 | auto.key = list(columns=2, points = FALSE, rectangles = TRUE), panel=function(x,y,...){ 235 | panel.barchart(x,y,...) 236 | xx <- unsplit(unname(lapply(split(x, y), function(t)cumsum(t)-t/2)), y) 237 | xx <- xx [! x %in% 0] 238 | y <- y [! x %in% 0] 239 | x <- x [! x %in% 0] 240 | ltext(xx, y=y, labels=x) 241 | }) 242 | ``` 243 | 244 | ```{r block_stale_distribution, eval=nlevels(nodes$group)>1, fig.cap='Stale blocks distribution', fig.pos='H'} 245 | stale_block_nodes <- blocks_create %>% filter(stale == 'Stale') %>% select(node, tag) 246 | stale_block_groups <- merge(stale_block_nodes, nodes, by.x = 'node', by.y = 'name') %>% select(group, tag) 247 | stale_per_group <- table(stale_block_groups$tag, stale_block_groups$group) 248 | barchart(stale_per_group, stack = TRUE, ref = FALSE, xlab = 'Blocks', 249 | auto.key = list(columns=2, points = FALSE, rectangles = TRUE), panel=function(x,y,...){ 250 | panel.barchart(x,y,...) 251 | xx <- unsplit(unname(lapply(split(x, y), function(t)cumsum(t)-t/2)), y) 252 | xx <- xx [! x %in% 0] 253 | y <- y [! x %in% 0] 254 | x <- x [! x %in% 0] 255 | ltext(xx, y=y, labels=x) 256 | }) 257 | ``` 258 | 259 | ```{r block_distribution_summary, eval=nlevels(nodes$group)>1} 260 | group_share <- nodes %>% 261 | group_by(group) %>% 262 | summarise(share = round(sum(share) * 100, 2)) 263 | 264 | accepted_block_nodes <- blocks_create %>% filter(stale == 'Accepted') 265 | blocks_with_group <- merge(accepted_block_nodes, nodes, by.x = 'node', by.y = 'name') 266 | tag_size <- blocks_with_group %>% group_by(tag) %>% summarise(tag_size = n()) 267 | blocks_with_group <- merge(blocks_with_group, tag_size) 268 | 269 | group_blocks <- blocks_with_group %>% 270 | group_by(tag, group, tag_size) %>% 271 | summarise(block_count = n()) %>% 272 | mutate(block_share = round(block_count/tag_size * 100, 2)) 273 | 274 | block_dist_summary <- merge(group_share, group_blocks, all.x = TRUE) 275 | block_dist_summary[is.na(block_dist_summary)] <- 0 276 | 277 | block_dist_summary <- block_dist_summary %>% arrange(tag) %>% select(tag, group, share, block_count, block_share) 278 | 279 | kable(block_dist_summary, col.names = c('', 'Group', 'Share [%]', 'Blocks', 'Blocks share [%]'), digits = 2, caption = 'Accepted block distribution summary', format = 'latex') %>% 280 | kable_styling(latex_options = c('scale_down', 'HOLD_position')) %>% 281 | collapse_rows(columns = 1) 282 | ``` 283 | 284 | ```{r block_distribution, eval=nlevels(nodes$group)>1, fig.cap='Accepted block distribution', fig.pos='H'} 285 | block_dist <- dcast(group_blocks, tag~group, value.var = 'block_count') 286 | block_dist[is.na(block_dist)] <- 0 287 | 288 | rownames(block_dist) <- block_dist$tag 289 | block_dist <- block_dist[ , !(names(block_dist) %in% c('tag')), drop = FALSE] 290 | 291 | barchart(as.matrix(block_dist), stack = TRUE, ref = FALSE, xlab = 'Blocks', 292 | auto.key = list(points = FALSE, rectangles = TRUE), panel=function(x,y,...){ 293 | panel.barchart(x,y,...) 294 | xx <- unsplit(unname(lapply(split(x, y), function(t)cumsum(t)-t/2)), y) 295 | xx <- xx [! x %in% 0] 296 | y <- y [! x %in% 0] 297 | x <- x [! x %in% 0] 298 | ltext(xx, y=y, labels=x) 299 | }) 300 | ``` 301 | 302 | ```{r block_size_summary} 303 | kable(unclass(summary(blocks_stats$total_size / 1000)), col.names = c('Size [kB]'), digits = 3, caption = 'Overall block size', format = 'latex') %>% 304 | kable_styling(latex_options = c('HOLD_position')) 305 | ``` 306 | 307 | ```{r block_size_bloxplot, fig.cap='Block size', fig.pos='H'} 308 | bwplot(blocks_stats$total_size / 1000 ~ blocks_stats$tag, ylab = 'Block size [kB]') 309 | ``` 310 | 311 | ```{r block_propagation_summary} 312 | kable(unclass(summary(block_propagation$propagation_time * 1000)), col.names = c('Propagation [ms]'), digits = 3, caption = 'Overall block propagation', format = 'latex') %>% 313 | kable_styling(latex_options = c('HOLD_position')) 314 | ``` 315 | 316 | ```{r block_propagation_density, eval=nrow(block_propagation)>0, fig.cap='Block propagation density', fig.pos='H'} 317 | densityplot(~propagation_time, data=block_propagation, groups = tag, 318 | auto.key = list(space = "top", columns = 2), 319 | ref = TRUE, xlab = 'Duration [s]', plot.points = FALSE) 320 | ``` 321 | 322 | ## Transactions 323 | ```{r txs_propagation_summary} 324 | txs_received <- merge(txs[, c('timestamp', 'hash')], txs_received, by = 'hash') 325 | txs_received$propagation_duration <- txs_received$timestamp.y - txs_received$timestamp.x 326 | 327 | kable(unclass(summary(txs_received$propagation_duration * 1000)), col.names = c('Propagation [ms]'), digits = 3, caption = 'Overall transactions propagation', format = 'latex') %>% 328 | kable_styling(latex_options = c('HOLD_position')) 329 | ``` 330 | 331 | ```{r tx_propagation_density, eval=nrow(txs_received)>0, fig.cap='Transaction propagation density', fig.pos='H'} 332 | densityplot(~propagation_duration, data=txs_received, groups = tag, 333 | auto.key = list(space = "top", columns = 2), ref = TRUE, 334 | xlab = 'Duration [s]', plot.points = FALSE) 335 | ``` 336 | 337 | ```{r txs_per_block_summary} 338 | kable(unclass(summary(blocks_stats$txs)), digits = 0, caption = 'Overall transactions per block', format = 'latex') %>% 339 | kable_styling(latex_options = c('HOLD_position')) 340 | ``` 341 | 342 | ```{r txs_per_block_boxplot, fig.cap='Transaction per block boxplot', fig.pos='H'} 343 | bwplot(blocks_stats$txs ~ blocks_stats$tag, ylab = 'Transactions') 344 | ``` 345 | 346 | ## Tips 347 | ```{r tips_setup} 348 | tips <- tips[tips$status != 'active',] 349 | tips_per_node <- tips %>% 350 | add_count(node, tag) 351 | ``` 352 | 353 | `r if(nrow(tips) == 0) {'There were 0 tips during all simulation runs.'}` 354 | 355 | ```{r tips_per_node_summary, eval=nrow(tips) > 0} 356 | kable(unclass(summary(tips_per_node$n)), digits = 2, caption = 'Overall tips per node', format = 'latex') %>% 357 | kable_styling(latex_options = c('HOLD_position')) 358 | ``` 359 | 360 | ```{r tips_per_node_boxplot, eval=nrow(tips) > 0, fig.cap='Tips per node', fig.pos='H'} 361 | bwplot(tips_per_node$n ~ tips_per_node$tag, ylab = 'Tips') 362 | ``` 363 | 364 | ```{r tips_branchlen_summary, eval=nrow(tips) > 0} 365 | kable(unclass(summary(tips$branchlen)), digits = 2, caption = 'Overall branch length of tips', format = 'latex') %>% 366 | kable_styling(latex_options = c('HOLD_position')) 367 | ``` 368 | 369 | ```{r tips_branchlen_boxplot, eval=nrow(tips) > 0, fig.cap='Overall branch length of tips', fig.pos='H'} 370 | bwplot(tips$branchlen ~ tips$tag, ylab = 'Branch length') 371 | ``` 372 | 373 | ## Exceptions 374 | ```{r event_exces_setup} 375 | if (nrow(tx_exces) > 0) { 376 | tx_exces$type <- 'Transaction' 377 | } 378 | if (nrow(block_exces) > 0) { 379 | block_exces$type <- 'Block' 380 | } 381 | event_exces <- rbind(tx_exces, block_exces) 382 | ``` 383 | 384 | `r if(nrow(event_exces) == 0) {'There were 0 exceptions during the creation of transaction/block events. All events were executed successfully.'}` 385 | 386 | ```{r event_exces, eval=nrow(event_exces)>0, fig.cap='Exceptions transaction/block event', fig.pos='H'} 387 | barchart(table(event_exces$tag, event_exces$type), ref = FALSE, xlab = 'Amount of exceptions', 388 | auto.key = list(space = "top", columns = 2, points = FALSE, rectangles = TRUE), 389 | panel=function(x,y,...){ 390 | panel.barchart(x,y,...) 391 | xx <- unsplit(unname(lapply(split(x, y), function(t)cumsum(t)-t/2)), y) 392 | xx <- xx [! x %in% 0] 393 | y <- y [! x %in% 0] 394 | x <- x [! x %in% 0] 395 | ltext(xx, y=y, labels=x) 396 | }) 397 | ``` 398 | 399 | ```{r tx_exce_types, eval=nrow(tx_exces)>0, fig.cap='Transaction exception types', fig.pos='H'} 400 | barchart(table(tx_exces$tag, tx_exces$exception), ref = FALSE, xlab = 'Amount of exceptions', 401 | auto.key = list(space = "top", columns = 2, points = FALSE, rectangles = TRUE), 402 | panel=function(x,y,...){ 403 | panel.barchart(x,y,...) 404 | xx <- unsplit(unname(lapply(split(x, y), function(t)cumsum(t)-t/2)), y) 405 | xx <- xx [! x %in% 0] 406 | y <- y [! x %in% 0] 407 | x <- x [! x %in% 0] 408 | ltext(xx, y=y, labels=x) 409 | }) 410 | ``` 411 | 412 | ```{r block_exce_types, eval=nrow(block_exces)>0, fig.cap='Block exception types', fig.pos='H'} 413 | barchart(table(block_exces$tag, block_exces$exception), ref = FALSE, xlab = 'Amount of exceptions', 414 | auto.key = list(space = "top", columns = 2, points = FALSE, rectangles = TRUE), 415 | panel=function(x,y,...){ 416 | panel.barchart(x,y,...) 417 | xx <- unsplit(unname(lapply(split(x, y), function(t)cumsum(t)-t/2)), y) 418 | xx <- xx [! x %in% 0] 419 | y <- y [! x %in% 0] 420 | x <- x [! x %in% 0] 421 | ltext(xx, y=y, labels=x) 422 | }) 423 | ``` 424 | 425 | ```{r rpc_excs, eval=nrow(rpc_exces)>0, fig.cap='RPC exceptions', fig.pos='H'} 426 | barchart(table(rpc_exces$tag, rpc_exces$method), ref = FALSE, xlab = 'Amount of exceptions', 427 | auto.key = list(space = "top", columns = 2, points = FALSE, rectangles = TRUE), 428 | panel=function(x,y,...){ 429 | panel.barchart(x,y,...) 430 | xx <- unsplit(unname(lapply(split(x, y), function(t)cumsum(t)-t/2)), y) 431 | xx <- xx [! x %in% 0] 432 | y <- y [! x %in% 0] 433 | x <- x [! x %in% 0] 434 | ltext(xx, y=y, labels=x) 435 | }) 436 | ``` 437 | 438 | \clearpage 439 | 440 | #### R and package versions used 441 | 442 | ```{r sessionInfo, include=TRUE, echo=TRUE, results='markup'} 443 | devtools::session_info() 444 | ``` 445 | --------------------------------------------------------------------------------