├── .gitignore ├── LICENSE ├── README.md ├── SentMessageLogger.sol ├── blocks.py ├── config.py ├── evm-macos ├── evm-macos-dangerous ├── evm-macos-dangerous.sb ├── evm-ubuntu ├── evm_transition.py ├── fork_choice.py ├── generate_transactions.py ├── genesis_state.py ├── simulator.py ├── validator.py └── visualizer.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2018 Vlad Zamfir, Steve Marx, Maurelian, Alex Skidanov, Jennifer Strange, Tim Beiko, Aditya Asgaonkar 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ethereum Sharding Proof of Concept 2 | 3 | This repository contains a proof of concept for a sharding implementation on Ethereum by Vlad Zamfir. 4 | The project was built during [ETHBerlin](http://ethberlin.com/), over 2 days, and should *not* be considered final nor production grade. There are probably major bugs/issues. 5 | 6 | ## Getting started 7 | 8 | The dependencies of the simulation (run with ```python simulator.py```), are satisfied by this Dockerfile: 9 | 10 | ``` 11 | FROM ubuntu:xenial 12 | 13 | # PREPARE FOR BUIDL 14 | RUN apt-get update 15 | RUN apt-get upgrade 16 | RUN apt-get install -y software-properties-common 17 | RUN add-apt-repository ppa:fkrull/deadsnakes #source of python 3.6, use at own risk 18 | RUN apt-get update 19 | RUN apt-get install -y build-essential 20 | 21 | # PYTHON3.6 22 | RUN apt-get install -y python3.6 23 | RUN apt-get install -y python3.6-dev 24 | RUN apt-get install -y python3.6-venv 25 | RUN apt-get install -y python3.6-tk 26 | 27 | # GET PIP 28 | RUN apt-get install -y wget 29 | RUN wget https://bootstrap.pypa.io/get-pip.py 30 | RUN python3.6 get-pip.py 31 | 32 | # LINK PYTHON NAMES 33 | RUN ln -s -f /usr/bin/python3.6 /usr/local/bin/python3 34 | RUN ln -s -f /usr/bin/python3.6 /usr/local/bin/python 35 | RUN ln -s -f /usr/local/bin/pip /usr/local/bin/pip3 36 | 37 | # IPYTHON 38 | RUN pip3 install --upgrade ipython 39 | 40 | # WEB3 41 | RUN pip3 install --upgrade web3 42 | 43 | # MATPLOTLIB 44 | RUN pip3 install numpy 45 | RUN apt-get install -y libxml2 46 | RUN apt-get install -y libxml2-dev 47 | RUN pip3 install requests 48 | RUN pip3 install ftfy 49 | #RUN pip3 install zeep 50 | RUN pip3 install pytz 51 | RUN pip3 install docker-py 52 | RUN pip3 install mysql-connector==2.1.6 53 | RUN pip3 install networkx 54 | RUN apt-get install -y libpng-dev 55 | RUN apt-get install -y freetype2-demos 56 | #RUN apt-get install -y freetype-dev 57 | RUN apt-get install -y pkg-config 58 | #RUN pkg-config --cflags freetype 59 | RUN pip3 install --upgrade matplotlib 60 | 61 | ``` 62 | I build it with ```sudo docker build --tag py3web3mpl . ```, then run the container from the ```ethshardingpoc``` repo, mounting it as a docker volume with the command: 63 | ``` 64 | sudo docker run -it --net=host --env="DISPLAY" --volume="$HOME/.Xauthority:/root/.Xauthority:rw" --volume "$(pwd)":/ethshardingpoc py3web3mpl 65 | ``` 66 | Note that it uses X11 to display matplotlib, so please use it at your own risk, maybe by running the simulation in the container: 67 | ``` 68 | cd ethshardingpoc && python simulator.py 69 | ``` 70 | -------------------------------------------------------------------------------- /SentMessageLogger.sol: -------------------------------------------------------------------------------- 1 | pragma solidity ^0.4.20; 2 | 3 | contract SentMessageLogger { 4 | 5 | event SentMessage( 6 | uint indexed shard_ID, 7 | uint sendGas, 8 | address sendFromAddress, 9 | address indexed sendToAddress, 10 | uint value, 11 | bytes data, 12 | uint indexed base, 13 | uint TTL 14 | ); 15 | 16 | uint TTL = 30; // some number of blocks 17 | 18 | // public instead of external due to https://github.com/ethereum/solidity/issues/3493 19 | function send(uint _shard_ID, uint _sendGas, address _sendToAddress, bytes _data) 20 | public 21 | payable 22 | { 23 | uint base = block.number; 24 | uint value = msg.value; 25 | address sender = msg.sender; 26 | 27 | emit SentMessage( 28 | _shard_ID, 29 | _sendGas, 30 | msg.sender, 31 | _sendToAddress, 32 | msg.value, 33 | _data, 34 | block.number, 35 | TTL 36 | ); 37 | } 38 | 39 | } -------------------------------------------------------------------------------- /blocks.py: -------------------------------------------------------------------------------- 1 | from genesis_state import genesis_state 2 | 3 | from config import SHARD_IDS 4 | from config import VALIDITY_CHECKS_OFF 5 | from config import VALIDITY_CHECKS_WARNING_OFF 6 | from config import DEADBEEF 7 | import copy 8 | import random as rand 9 | 10 | 11 | def format_msg(msg): 12 | return "base: %s, target_shard_ID: %s, payload_hash: %s, random_hash: %s" % (msg.base, msg.target_shard_ID, hash(msg.payload), msg.hash) 13 | 14 | 15 | class MessagePayload: 16 | ''' has properties necessary to create tx on the new shard ''' 17 | def __init__(self, fromAddress, toAddress, value, data):#, nonce, gasPrice, gasLimit): 18 | self.fromAddress = fromAddress 19 | self.toAddress = DEADBEEF # Using "toAddress here leads to an error, apparently not an address" 20 | self.value = value 21 | self.data = data 22 | # the special transaction pusher address will have these values hard coded 23 | # self.nonce = nonce 24 | # self.gasPrice = gasPrice 25 | # self.gasLimit = gasLimit 26 | self.hash = rand.randint(1, 10000000) 27 | 28 | def __hash__(self): 29 | return self.hash 30 | 31 | def __eq__(self, message): 32 | return self.hash == message.hash 33 | 34 | class Message(object): 35 | def __init__(self, base, TTL, target_shard_ID, payload): 36 | super(Message, self).__init__() 37 | 38 | self.hash = rand.randint(1, 10000000) 39 | 40 | assert isinstance(base, Block) 41 | assert base.is_valid(), "expected block to be valid" 42 | self.base = base 43 | assert isinstance(TTL, int), "expected integer time-to-live" 44 | self.TTL = TTL 45 | assert target_shard_ID in SHARD_IDS, "expected shard ID" 46 | self.target_shard_ID = target_shard_ID 47 | assert isinstance(payload, MessagePayload) or payload is None, "expected messagepayload format" 48 | self.payload = payload 49 | 50 | def __hash__(self): 51 | return self.hash 52 | 53 | def __eq__(self, message): 54 | return self.hash == message.hash 55 | 56 | 57 | class SwitchMessage_BecomeAParent(Message): 58 | def __init__(self, base, TTL, target_shard_ID, new_child_ID): 59 | super(SwitchMessage_BecomeAParent, self).__init__(base, TTL, target_shard_ID, None) 60 | self.new_child_ID = new_child_ID 61 | self.hash = rand.randint(1, 1000000) 62 | 63 | def __hash__(self): 64 | return self.hash 65 | 66 | def __eq__(self, message): 67 | return self.hash == message.hash 68 | 69 | 70 | class SwitchMessage_Orbit(Message): 71 | def __init__(self, base, TTL, target_shard_ID, new_child_ID, new_parent_ID): 72 | super(SwitchMessage_Orbit, self).__init__(base, TTL, target_shard_ID, None) 73 | self.new_child_ID = new_child_ID 74 | self.new_parent_ID = new_parent_ID 75 | self.hash = rand.randint(1, 1000000) 76 | 77 | def __hash__(self): 78 | return self.hash 79 | 80 | def __eq__(self, message): 81 | return self.hash == message.hash 82 | 83 | 84 | class SwitchMessage_ChangeParent(Message): 85 | def __init__(self, base, TTL, target_shard_ID, new_parent_ID): 86 | super(SwitchMessage_ChangeParent, self).__init__(base, TTL, target_shard_ID, None) 87 | self.new_parent_ID = new_parent_ID 88 | self.hash = rand.randint(1, 1000000) 89 | 90 | def __hash__(self): 91 | return self.hash 92 | 93 | def __eq__(self, message): 94 | return self.hash == message.hash 95 | 96 | class Block: 97 | def __init__(self, ID, prevblock=None, switch_block=False, txn_log=[], sent_log={}, received_log={}, sources={}, parent_ID=None, child_IDs=None, routing_table=None, vm_state=genesis_state): 98 | 99 | if sent_log == {}: 100 | for i in SHARD_IDS: 101 | sent_log[i] = [] 102 | 103 | if received_log == {}: 104 | for i in SHARD_IDS: 105 | received_log[i] = [] 106 | 107 | assert ID in SHARD_IDS, "expected shard ID" 108 | self.shard_ID = ID 109 | self.prevblock = prevblock 110 | self.switch_block = switch_block 111 | self.txn_log = txn_log 112 | self.sent_log = sent_log 113 | for i in SHARD_IDS: 114 | if i not in self.sent_log.keys(): 115 | sent_log[i] = [] 116 | self.received_log = received_log 117 | for i in SHARD_IDS: 118 | if i not in self.received_log.keys(): 119 | received_log[i] = [] 120 | self.sources = sources 121 | self.vm_state = vm_state 122 | self.parent_ID = parent_ID 123 | self.child_IDs = child_IDs 124 | self.routing_table = routing_table 125 | self.hash = rand.randint(1, 10000000) 126 | 127 | if prevblock is None: # genesis block 128 | self.height = 0 129 | else: 130 | self.height = self.prevblock.height + 1 131 | 132 | 133 | def trace_history(self, other_shard_ID): 134 | b = self 135 | print("Tracing block from shard %s with sources from %s" % (self.shard_ID, other_shard_ID)) 136 | while b is not None: 137 | print("%s(%s,%s) - " % (b.hash, b.sources[other_shard_ID].hash, b.switch_block), end='') 138 | b = b.prevblock 139 | print("") 140 | 141 | def __str__(self): 142 | return "Block(%d): shard_ID:%d height:%d" % (self.hash, self.shard_ID, self.height) 143 | 144 | def __eq__(self, block): 145 | return self.hash == block.hash 146 | 147 | def __hash__(self): 148 | return self.hash 149 | 150 | def is_changing_neighbors(self): 151 | # Genesis block isn't changing neighbors 152 | if self.prevblock is None: 153 | return False 154 | 155 | # if the parent shard changes then it's changing neighbors 156 | if self.parent_ID != self.prevblock.parent_ID: 157 | return True 158 | 159 | # or if the child shards change then it's changing neighbors 160 | if self.child_IDs != self.prevblock.child_IDs: 161 | return True 162 | 163 | # otherwise it's not changing neighbors 164 | return False 165 | 166 | def is_in_chain(self, block, strict=False): 167 | assert isinstance(block, Block), "expected block" 168 | #assert block.is_valid(), "expected block to be valid" 169 | if self.shard_ID != block.shard_ID: 170 | return False 171 | 172 | if self == block: 173 | return not strict 174 | 175 | if block.height >= self.height: 176 | return False 177 | 178 | if self.prevblock is None: 179 | return False 180 | 181 | return self.prevblock.is_in_chain(block) 182 | 183 | def agrees(self, block): 184 | assert isinstance(block, Block), "expected block" 185 | assert self.shard_ID == block.shard_ID, "expected to check agreement between blocks on same shard" 186 | return self.is_in_chain(block) or block.is_in_chain(self) 187 | 188 | def get_neighbors(self): 189 | neighbors = [] 190 | if self.parent_ID is not None: 191 | neighbors.append(self.parent_ID) 192 | for c in self.child_IDs: 193 | neighbors.append(c) 194 | return neighbors 195 | 196 | def first_block_with_message_in_sent_log(self, ID, message): 197 | assert message in self.sent_log[ID] 198 | if self.prevblock is None: 199 | return self 200 | if message not in self.prevblock.sent_log[ID]: 201 | return self 202 | else: 203 | return self.prevblock.first_block_with_message_in_sent_log(ID, message) 204 | 205 | def next_hop(self, target_shard_ID): 206 | assert self.shard_ID != target_shard_ID 207 | if target_shard_ID in self.routing_table: 208 | return self.routing_table[target_shard_ID] 209 | else: 210 | return self.parent_ID 211 | 212 | def newly_sent(self): 213 | new_sent = dict.fromkeys(SHARD_IDS) 214 | for ID in self.get_neighbors(): 215 | new = [] 216 | num_sent = len(self.sent_log[ID]) 217 | if self.prevblock is not None: 218 | prev_num_sent = len(self.prevblock.sent_log[ID]) 219 | else: 220 | prev_num_sent = 0 221 | num_new_sent = num_sent - prev_num_sent 222 | assert num_new_sent >= 0, "expected growing sent log" 223 | for i in range(num_new_sent): 224 | new.append(self.sent_log[ID][prev_num_sent + i]) 225 | new_sent[ID] = new 226 | 227 | return new_sent 228 | 229 | def newly_received(self): 230 | new_received = {} 231 | for ID in self.get_neighbors(): 232 | new_received[ID] = [] 233 | num_received = len(self.received_log[ID]) 234 | if self.prevblock is not None: 235 | prev_num_received = len(self.prevblock.received_log[ID]) 236 | else: 237 | prev_num_received = 0 238 | num_new_received = num_received - prev_num_received 239 | assert num_new_received >= 0, "expected growing received log, shard_ID: %s, ID: %s, was: %s, now: %s" % (self.shard_ID, ID, prev_num_received, num_received) 240 | for i in range(num_new_received): 241 | new_received[ID].append(self.received_log[ID][prev_num_received + i]) 242 | 243 | return new_received 244 | 245 | def compute_routing_table(self): 246 | self.routing_table = {self.shard_ID: self.shard_ID} 247 | q = [(x, x, self.sources[x]) for x in self.child_IDs] 248 | for target, hop, source_block in q: 249 | self.routing_table[target] = hop 250 | for child in source_block.child_IDs: 251 | q.append((child, hop, source_block.sources[child])) 252 | 253 | # Goal: make this constant time 254 | def is_valid(self): 255 | 256 | # THE VALIDITY SWITCH 257 | if VALIDITY_CHECKS_OFF: 258 | if not VALIDITY_CHECKS_WARNING_OFF: 259 | print("Warning: Validity checks off") 260 | return True, "VALIDITY_CHECKS_OFF" 261 | 262 | # CHECKING INDIVIDUAL TYPES OF INDIVIDUAL DATA FIELDS 263 | if self.shard_ID not in SHARD_IDS: 264 | return False, "expected a shard ID" 265 | if self.prevblock is not None: 266 | if not isinstance(self.prevblock, Block): 267 | return False, "expected prevblock to be a block" 268 | if not isinstance(self.sent_log, dict): 269 | return False, "expected sent log" 270 | if not isinstance(self.received_log, dict): 271 | return False, "expected received_log" 272 | # if not isinstance(self.VM_state, EVM_State): 273 | # return False, "expected an EVM State" 274 | 275 | #leaving out the genesis blocks for now.. 276 | if self.prevblock is None: 277 | return True, "Genesis block taken as valid" 278 | # --------------------------------------------------------------------# 279 | 280 | 281 | # we're going to need these over and over again: 282 | new_sent_messages = self.newly_sent() 283 | new_received_messages = self.newly_received() 284 | 285 | saw_switch_messages = False 286 | for msg in new_sent_messages.items(): 287 | if isinstance(msg, (SwitchMessage_BecomeAParent, SwitchMessage_ChangeParent)): 288 | # TODO: validate the correctness of the switch 289 | saw_switch_messages = True 290 | 291 | if not saw_switch_messages: 292 | for key, value in list(new_sent_messages.items()) + list(new_received_messages.items()): 293 | if value is not None: 294 | if len(value) and key not in [self.parent_ID, self.shard_ID] + self.child_IDs: 295 | return False, "Block on shard %s has sent or received message to shard %s which is not its neighbor or itself (%s messages)" % (self.shard_ID, key, new_sent_messages) 296 | 297 | # SHARD ID VALIDITY CONDITIONS 298 | 299 | # check that the prev block is on the same shard as this block 300 | if self.shard_ID != self.prevblock.shard_ID: 301 | return False, "prevblock should be on same shard as this block" 302 | 303 | for ID in self.get_neighbors(): 304 | 305 | # bases for messages sent to shard i are on shard i 306 | for message in new_sent_messages[ID]: 307 | if message.base.shard_ID != ID: 308 | return False, "message sent to shard i has base on shard j != i" 309 | 310 | # bases for received messages are on this shard 311 | for message in new_received_messages[ID]: 312 | if message.base.shard_ID != self.shard_ID: 313 | return False, "received message with base on different shard" 314 | 315 | # sources of messages received from shard i are on shard i 316 | assert ID in self.sources, "ID not in self.sources, ID: %s, self.sources: %s, shard_ID: %s" % (ID, self.sources, self.shard_ID) 317 | if self.sources[ID] is not None: 318 | if self.sources[ID].shard_ID != ID: 319 | return False, "source for shard i on shard j != i" 320 | # --------------------------------------------------------------------# 321 | 322 | 323 | # MONOTONICITY/AGREEMENT CONDITIONS 324 | for ID in self.get_neighbors(): 325 | 326 | # sources are montonic 327 | if self.prevblock.sources[ID] is not None and ID in [self.parent_ID] + self.child_IDs: 328 | if not self.sources[ID].is_in_chain(self.prevblock.sources[ID]): 329 | return False, "expected sources to be monotonic, shard_ID: %s, source shard id: %s, old height: %s, new height: %s, %s, %s" % (self.shard_ID, ID, self.prevblock.sources[ID].height, self.sources[ID].height, self.sources[ID], self.prevblock.sources[ID]) 330 | 331 | 332 | # previous tx list is a prefix of this txn list 333 | prev_num_txs = len(self.prevblock.txn_log) 334 | new_num_txs = len(self.txn_log) 335 | if new_num_txs < prev_num_txs: 336 | return False, "expected current txn log to be an extension of the previous -- error 1" 337 | for i in range(prev_num_txs): 338 | if self.txn_log == []: 339 | return False, "expected current txn log to be an extension of the previous -- error 2, shard_id: %s, old_txn_num: %s, new_txn_num: %s" % (self.shard_ID, prev_num_txs, len(self.txn_log)) 340 | if self.prevblock.txn_log[i] != self.txn_log[i]: 341 | return False, "expected current txn log to be an extension of the previous -- error 3" 342 | 343 | # previous sent log is a prefix of current sent log 344 | prev_num_sent = len(self.prevblock.sent_log[ID]) 345 | new_num_sent = len(self.sent_log[ID]) 346 | if new_num_sent < prev_num_sent: 347 | return False, "expected current sent log to be an extension of the previous -- error 1" 348 | for i in range(prev_num_sent): 349 | if self.prevblock.sent_log[ID][i] != self.sent_log[ID][i]: 350 | return False, "expected current sent log to be an extension of the previous -- error 2" 351 | 352 | # previous received log is a prefix of current received log 353 | prev_num_received = len(self.prevblock.received_log[ID]) 354 | new_num_received = len(self.received_log[ID]) 355 | if new_num_received < prev_num_received: 356 | return False, "expected current received log to be an extension of the previous -- error 1" 357 | for i in range(prev_num_received): 358 | if self.prevblock.received_log[ID][i] != self.received_log[ID][i]: 359 | return False, "expected current received log to be an extension of the previous -- error 2, shard_ID: %s, log shard ID: %s, old length: %s, new_length: %s, items: %s <> %s, pos: %s" % (self.shard_ID, ID, len(self.prevblock.received_log[ID]), len(self.received_log[ID]), format_msg(self.prevblock.received_log 360 | [ID][i]), format_msg(self.received_log[ID][i]), i) 361 | 362 | # bases of sent messages are monotonic 363 | if len(self.prevblock.sent_log[ID]) > 0: 364 | last_old_sent_message = self.prevblock.sent_log[ID][-1] 365 | first_time = True 366 | for message in new_sent_messages[ID]: 367 | if first_time: 368 | m1 = last_old_sent_message 369 | m2 = message 370 | first_time = False 371 | if not first_time: 372 | m1 = m2 373 | m2 = message 374 | 375 | if not m2.base.is_in_chain(m1.base): 376 | return False, "expected bases to be monotonic -- error 1" 377 | 378 | # bases of received messages are monotonic 379 | if len(self.prevblock.received_log[ID]) > 0: 380 | last_old_received_message = self.prevblock.received_log[ID][-1] 381 | first_time = True 382 | for message in new_received_messages[ID]: 383 | if first_time: 384 | m1 = last_old_received_message 385 | m2 = message 386 | first_time = False 387 | if not first_time: 388 | m1 = m2 389 | m2 = message 390 | 391 | 392 | if not m2.base.is_in_chain(m1.base): 393 | return False, "expected bases to be monotonic -- error 2" 394 | 395 | if self.prevblock.sources[ID] is not None and ID in [self.parent_ID] + self.child_IDs: 396 | # sources after bases 397 | # ... easier to check than agreement between bases and sources, 398 | # ... also easy for a block producer to enforce 399 | source = self.sources[ID] 400 | if len(self.prevblock.sent_log[ID]) > 0: 401 | base = last_old_sent_message.base # most recent base from prev block 402 | if not source.agrees(base): # source is after ^^ 403 | return False, "expected bases to be in the chain of sources -- error 1" 404 | 405 | if len(new_sent_messages[ID]) > 0: 406 | base = new_sent_messages[ID][-1].base # most recent base from this block 407 | if not source.agrees(base): # source is also after ^^ 408 | return False, "expected bases to be in the chain of sources -- error 2 (sid: %s, id: %s)" % (self.shard_ID, ID) 409 | 410 | 411 | # --------------------------------------------------------------------# 412 | # SOURCE SYNCHRONICITY CONDITIONS 413 | for ID in [self.parent_ID] + self.child_IDs: 414 | if ID is None: 415 | continue 416 | 417 | assert ID in self.sources, "ID not in self.sources, ID: %s, self.sources: %s, shard_ID: %s" % (ID, self.sources, shard_ID) 418 | if self.sources[ID] is not None: 419 | 420 | source = self.sources[ID] 421 | 422 | # check that the received messages are sent by the source 423 | for i in range(len(self.received_log[ID])): # warning: inefficient 424 | if self.received_log[ID][i] != source.sent_log[self.shard_ID][i]: 425 | return False, "expected the received messages to be sent by source" 426 | 427 | # their sent messages are received by the TTL as seen from the sources 428 | for m in source.sent_log[self.shard_ID]: # inefficient 429 | if m in self.received_log[ID]: 430 | continue 431 | # a message incoming (but not yet received) to this shard is expired if... 432 | if m.base.height + m.TTL <= self.height: 433 | self.trace_history(ID) 434 | source.trace_history(self.shard_ID) 435 | return False, "expected all expired messages in source to be recieved, shard_ID: %s ID=%s switch_block?: %s" % (self.shard_ID, ID, self.switch_block) 436 | 437 | # our sent messages are received by the TTL as seen from our sources 438 | for m in self.sent_log[ID]: # inefficient 439 | if m in source.received_log[self.shard_ID]: 440 | continue 441 | # a message outgoing from this shard that hasn't been received is expired if... 442 | # print(m.base.height + m.TTL, source.height) 443 | if m.base.height + m.TTL <= source.height: 444 | return False, "expected all expired sent messages to be received by source" 445 | 446 | # --------------------------------------------------------------------# 447 | # BASE SYNCHRONICITY CONDITIONS 448 | for ID in self.get_neighbors(): 449 | # newly received messages are received by the TTL of the base 450 | for message in new_received_messages[ID]: 451 | if not self.is_in_chain(message.base): 452 | return False, "expected only to receive messages with base in chain, my shard id: %s, their shard id: %s" % (self.shard_ID, ID) 453 | # Message on received this block are expired if... 454 | if message.base.height + message.TTL < self.height: 455 | return False, "message not received within TTL of its base" 456 | 457 | # questionable validity condition 458 | # our sent messages are received by the TTL as seen from our bases 459 | for m1 in self.sent_log[ID]: # super inefficient 460 | for m2 in self.sent_log[ID]: 461 | if m1 in m2.base.received_log[self.shard_ID]: 462 | continue 463 | # m1 from this shard that hasn't been received by m2.base, and is expired if... 464 | if m1.base.height + m1.TTL <= m2.base.height: 465 | return False, "expected sent messages to be received by the TTL" 466 | 467 | # --------------------------------------------------------------------# 468 | # ALL RECEIVED MESSAGES THAT DO NOT TARGET THIS SHARD MUST BE REROUTED 469 | payloads_to_reroute = [] 470 | payloads = [] 471 | for ID in self.get_neighbors(): 472 | for message in new_received_messages[ID]: 473 | if message.target_shard_ID != self.shard_ID: 474 | assert message.payload not in payloads 475 | payloads.append(message.payload) 476 | payloads_to_reroute.append((message.target_shard_ID, message.TTL, message.payload)) 477 | 478 | for ID in self.get_neighbors(): 479 | for message in new_sent_messages[ID]: 480 | key = (message.target_shard_ID, message.TTL, message.payload) 481 | if key in payloads_to_reroute: 482 | payloads_to_reroute.remove(key) 483 | 484 | if len(payloads_to_reroute): 485 | return False, "%s messages were not rerouted" % len(payloads_to_reroute) 486 | 487 | # --------------------------------------------------------------------# 488 | 489 | # made it! 490 | return True, "Valid block" 491 | 492 | 493 | ''' 494 | as a general thought, our goal is to make "the world" seem valid from the point of view of every block 495 | 496 | "valid" in our case means: 497 | 498 | logs hold messages with bases and sources from the expected shards 499 | logs grow monotonically 500 | sources and bases are monotonic 501 | sources and bases agree 502 | receives happen from sources 503 | 504 | And also these more difficult ones: 505 | 506 | local receives happen before the TTL (is the received message's base not more than TTL prevblocks away) 507 | sents are received before the TTL (as seen from bases) 508 | 509 | We need to look at our receives and see them be received by the TTL 510 | 511 | And look at the bases and sources and: 512 | 513 | see our sent messages be received by the TTL 514 | 515 | see their sent messages be received by the TTL 516 | ''' 517 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import random as rand 2 | from web3 import Web3 3 | import copy 4 | import sys 5 | from collections import defaultdict 6 | 7 | ORBIT_MODE = False 8 | if 'orbit' in sys.argv: 9 | ORBIT_MODE = True 10 | # whether to generate more blocks in shards 0 and 1 (makes ORBITs happen faster) 11 | MORE_BLOCKS_IN = None #[0, 1, 3, 4] # create more blocks in these shards. Set to None to disable 12 | 13 | SWITCH_BLOCK_EXTRA = 2 # a multiplier for switch block weights (is added on top of regular weight) 14 | 15 | if not ORBIT_MODE: 16 | INITIAL_TOPOLOGY = [[1, 2], [3, 4], [5], [], [], [6], []] 17 | else: 18 | INITIAL_TOPOLOGY = [[1], []] 19 | NUM_SHARDS = len(INITIAL_TOPOLOGY) 20 | 21 | NUM_VALIDATORS_PER_SHARD = 5 22 | NUM_VALIDATORS = 1 + NUM_VALIDATORS_PER_SHARD*NUM_SHARDS 23 | 24 | SHARD_IDS = list(range(NUM_SHARDS)) 25 | VALIDATOR_NAMES = [] 26 | for i in range(NUM_VALIDATORS): 27 | VALIDATOR_NAMES.append(i) 28 | VALIDATOR_WEIGHTS = {} 29 | for v in VALIDATOR_NAMES: 30 | VALIDATOR_WEIGHTS[v] = rand.uniform(7, 10) 31 | 32 | 33 | assert all([x > y for (y, lst) in enumerate(INITIAL_TOPOLOGY) for x in lst]) 34 | 35 | VALIDATOR_SHARD_ASSIGNMENT = {} 36 | SHARD_VALIDATOR_ASSIGNMENT = {} 37 | 38 | remaining_validators = copy.copy(VALIDATOR_NAMES) 39 | remaining_validators.remove(0) 40 | 41 | for ID in SHARD_IDS: 42 | sample = rand.sample(remaining_validators, NUM_VALIDATORS_PER_SHARD) 43 | 44 | SHARD_VALIDATOR_ASSIGNMENT[ID] = sample 45 | for v in sample: 46 | remaining_validators.remove(v) 47 | VALIDATOR_SHARD_ASSIGNMENT[v] = ID 48 | print(VALIDATOR_SHARD_ASSIGNMENT) 49 | 50 | TTL_CONSTANT = 5 51 | TTL_SWITCH_CONSTANT = 1 52 | assert TTL_CONSTANT > 0 53 | 54 | NUM_TRANSACTIONS = 100 55 | 56 | # Experiment parameters 57 | NUM_ROUNDS = 1000 58 | NUM_WITHIN_SHARD_RECEIPTS_PER_ROUND = NUM_SHARDS * 5 // 2 59 | NUM_BETWEEN_SHARD_RECEIPTS_PER_ROUND = NUM_SHARDS * 7 // 2 60 | MEMPOOL_DRAIN_RATE = 5 61 | 62 | # In ORBIT_MODE the first orbit happens at SWITCH_ROUND, not at either of the ORBIT_ROUNDs 63 | SWITCH_ROUND = 5 64 | ORBIT_ROUND_1 = 45 65 | ORBIT_ROUND_2 = 85 66 | 67 | # Instant broadcast 68 | FREE_INSTANT_BROADCAST = False 69 | 70 | # Validity check options 71 | VALIDITY_CHECKS_OFF = False 72 | VALIDITY_CHECKS_WARNING_OFF = False 73 | 74 | # The deadbeef address 75 | DEADBEEF = Web3.toChecksumAddress(hex(1271270613000041655817448348132275889066893754095)) 76 | 77 | # Reporting Parameters 78 | REPORTING = True 79 | SHOW_FRAMES = True 80 | SAVE_FRAMES = False 81 | FIG_SIZE = (30, 20) 82 | REPORT_INTERVAL = 1 83 | PAUSE_LENGTH = 0.000000001 84 | DISPLAY_WIDTH = 250 85 | DISPLAY_HEIGHT = 250 86 | DISPLAY_MARGIN = 5 87 | SHARD_X_SPACING = 5 88 | SHARD_Y_SPACING = 5 89 | SHARD_MESSAGE_YOFFSET = 10 90 | SHARD_MESSAGE_XOFFSET = 5 91 | 92 | CONSENSUS_MESSAGE_HEIGHTS_TO_DISPLAY_IN_ROOT = 25 93 | 94 | # Set to True to restrict routing to paths specified in MSG_ROUTES 95 | RESTRICT_ROUTING = True 96 | 97 | # Define message routes in a dict {source: [destination1, destination2, ...]} 98 | if not ORBIT_MODE: 99 | MSG_ROUTES = {3: [6], 6: [3]} 100 | else: 101 | MSG_ROUTES = {1: [0], 0: [1]} 102 | -------------------------------------------------------------------------------- /evm-macos: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sandbox-exec -f evm-macos-dangerous.sb ./evm-macos-dangerous $* 3 | -------------------------------------------------------------------------------- /evm-macos-dangerous: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smarx/ethshardingpoc/62420e26f00be20369e05690792e957a131e68f0/evm-macos-dangerous -------------------------------------------------------------------------------- /evm-macos-dangerous.sb: -------------------------------------------------------------------------------- 1 | (version 1) 2 | (deny default) 3 | 4 | (allow file-read-data file-read-metadata 5 | (regex 6 | #"^/System/Library" 7 | #"^/usr/lib" 8 | #"^/dev/stdin" 9 | #"^/dev/fd/0" 10 | ) 11 | ) 12 | 13 | (allow mach* sysctl-read) 14 | 15 | (allow process-exec (regex "evm-macos-dangerous")) 16 | 17 | -------------------------------------------------------------------------------- /evm-ubuntu: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smarx/ethshardingpoc/62420e26f00be20369e05690792e957a131e68f0/evm-ubuntu -------------------------------------------------------------------------------- /evm_transition.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | from collections import defaultdict 3 | import json 4 | import os 5 | import subprocess 6 | import sys 7 | 8 | from blocks import * 9 | from web3 import Web3 10 | from genesis_state import * 11 | from config import DEADBEEF 12 | from generate_transactions import format_transaction 13 | 14 | abi = json.loads('[{"constant":false,"inputs":[{"name":"_shard_ID","type":"uint256"},{"name":"_sendGas","type":"uint256"},{"name":"_sendToAddress","type":"address"},{"name":"_data","type":"bytes"}],"name":"send","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"shard_ID","type":"uint256"},{"indexed":false,"name":"sendGas","type":"uint256"},{"indexed":false,"name":"sendFromAddress","type":"address"},{"indexed":true,"name":"sendToAddress","type":"address"},{"indexed":false,"name":"value","type":"uint256"},{"indexed":false,"name":"data","type":"bytes"},{"indexed":true,"name":"base","type":"uint256"},{"indexed":false,"name":"TTL","type":"uint256"}],"name":"SentMessage","type":"event"}]') 15 | 16 | evm_path = './evm-ubuntu' 17 | if (sys.platform == 'darwin'): 18 | evm_path = './evm-macos' 19 | 20 | contract = web3.eth.contract(address='0x000000000000000000000000000000000000002A', abi=abi) 21 | 22 | 23 | 24 | def convert_state_to_pre(state): 25 | ''' The evm output isn't quite how we want it ''' 26 | pre = {} 27 | for key, value in state["state"]["accounts"].items(): 28 | # print(value) 29 | pre[key] = value 30 | return pre 31 | 32 | # NOTES: from convo with steve 33 | # The “vm state” is really the “pre” part of what we send to evm. 34 | # The “env” stuff is constant 35 | # the “transactions” list is a list of transactions that come from the 36 | # mempool (originally a file full of test data?) and ones that are constructed from 37 | # `MessagePayload`s. (This is done via `web3.eth.account.signTransaction(…)`.) 38 | # function apply(vm_state, [tx], mapping(S => received)) -> (vm_state, mapping(S => received) ) 39 | def apply_to_state(pre_state, tx, received_log, genesis_blocks): 40 | # print(pre_state["pre"][address]["nonce"]) 41 | nonce = int(pre_state["pre"][pusher_address]["nonce"], 0) 42 | flattened_payloads = [message.payload for l in received_log.values() for message in l] 43 | for payload in flattened_payloads: 44 | transaction = { 45 | "gas": 3000000, 46 | "gasPrice": "0x2", 47 | "nonce": hex(nonce), 48 | "to": payload.toAddress, 49 | "value": payload.value, 50 | "data": payload.data, 51 | } 52 | nonce += 1 53 | signed = web3.eth.account.signTransaction(transaction, pusher_key) 54 | tx.append(format_transaction(transaction, signed)) 55 | 56 | # create inputst evm by combining the pre_state, env, and transactions 57 | transition_inputs = {} 58 | transition_inputs["pre"] = pre_state["pre"] 59 | transition_inputs["env"] = pre_state["env"] 60 | transition_inputs["transactions"] = tx 61 | 62 | # open evm 63 | evm = subprocess.Popen([evm_path, 'apply', '/dev/stdin'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) 64 | 65 | out = evm.communicate(json.dumps(transition_inputs).encode())[0].decode('utf-8') 66 | # print("out2", out) 67 | 68 | result = json.loads(out) 69 | new_state = { 70 | "env": pre_state["env"], 71 | "pre": result["state"]["accounts"].copy(), 72 | } 73 | for addr, account in new_state["pre"].items(): 74 | for key in ("nonce", "balance"): 75 | account[key] = hex(int(account[key])) 76 | for key in ("code", "codeHash"): 77 | account[key] = "0x" + account[key] 78 | 79 | # look through logs for outgoing messages 80 | sent_log = {} 81 | for ID in SHARD_IDS: 82 | sent_log[ID] = [] 83 | for receipt in result.get('receipts', []): 84 | if receipt['logs'] is not None: 85 | for log in receipt['logs']: 86 | log['topics'] = [binascii.unhexlify(t[2:]) for t in log['topics']] 87 | log['data'] = binascii.unhexlify(log['data'][2:]) 88 | for event in contract.events.SentMessage().processReceipt(receipt): 89 | sent_log[event.args.shard_ID].append( 90 | # This is not a message that will be stored in the sent log, it will be 91 | # postprocessed in make_block. Namely, the next hop shard will be computed, 92 | # the base block will be computed and TTL will be assigned. 93 | Message( 94 | Block(event.args.shard_ID, sources={ID : genesis_blocks[ID] for ID in SHARD_IDS}), 95 | 10, 96 | event.args.shard_ID, 97 | MessagePayload( 98 | event.args.sendFromAddress.lower()[2:], 99 | event.args.sendToAddress.lower()[2:], 100 | event.args.value, 101 | event.args.data, 102 | ) 103 | ) 104 | ) 105 | 106 | return new_state, sent_log 107 | 108 | # received_log = ReceivedLog() 109 | # received_log.add_received_message(2, Message( 110 | # None, # base 111 | # 5, # TTL 112 | # MessagePayload( 113 | # 0, # from address 114 | # "0x1234567890123456789012345678901234567890", # to address 115 | # 42, # value 116 | # "0x", # data 117 | # ) 118 | # )) 119 | # new_state, sent_log = apply_to_state(vm_state, transactions, received_log) 120 | # print(json.dumps(new_state)) 121 | # print(sent_log) 122 | -------------------------------------------------------------------------------- /fork_choice.py: -------------------------------------------------------------------------------- 1 | from blocks import Block 2 | from config import SHARD_IDS 3 | import copy as copy 4 | 5 | 6 | 7 | # filter blocks with any orphaned sources in parent 8 | 9 | # This checks filter conditions 10 | # Filter conditions are like validity conditions for the fork choice 11 | # They can't be checked from the block data structure! 12 | def is_block_filtered(child, parent_fork_choice=None): 13 | 14 | # No parent? No filter 15 | if parent_fork_choice is None: 16 | return False 17 | 18 | assert isinstance(parent_fork_choice, Block), "Expected parent fork choice to be a block" 19 | 20 | parent_ID = parent_fork_choice.shard_ID 21 | #assert parent_ID == child.prevblock.parent_ID, "Expected parent fork choice to be on the parent shard of the prevblock" 22 | 23 | child_ID = child.shard_ID 24 | 25 | # filter condition for sources: 26 | if not parent_fork_choice.is_in_chain(child.sources[parent_ID]): # c.sources[c.parent_ID] != parent_fork_choice_until_source: 27 | print("Reason 1") 28 | return True 29 | 30 | # Fiter blocks that don't agree with parent source for child 31 | if not parent_fork_choice.sources[child_ID].agrees(child): 32 | print("Reason 2") 33 | return True 34 | 35 | # Filter blocks that send messages that are not received and expired in parent fork choice 36 | for shard_message in child.sent_log[parent_ID]: 37 | if not parent_fork_choice.agrees(shard_message.base): 38 | print("Reason 3") 39 | return True 40 | 41 | if shard_message not in parent_fork_choice.received_log[child_ID]: 42 | if shard_message.base.height + shard_message.TTL <= parent_fork_choice.height: 43 | print("Reason 4. parent_ID: %s, shard_message.base.height: %s, parent_fork.h: %s. My block: %s, msg_base: %s" % (parent_ID, shard_message.base.height, parent_fork_choice.height, child.hash, shard_message.base.hash)) 44 | return True 45 | 46 | # Filter blocks that haven't received all expired messages from parent fork choice 47 | for shard_message in parent_fork_choice.sent_log[child_ID]: 48 | if not child.agrees(shard_message.base): 49 | print("Reason 5") 50 | return True 51 | 52 | if shard_message not in child.received_log[parent_ID]: 53 | if shard_message.base.height + shard_message.TTL <= child.height: 54 | print("Reason 6") 55 | return True 56 | 57 | return False 58 | 59 | 60 | # now going to "give" all blocks weights, blocks without weights don't get their weight added in (basically weight 0) 61 | 62 | forks = {} 63 | already_jumped = [] 64 | 65 | def update_forks(block): 66 | global forks 67 | global already_jumped 68 | 69 | if block.is_in_chain(forks[block.shard_ID]): 70 | forks[block.shard_ID] = block 71 | 72 | return True 73 | else: 74 | return False 75 | 76 | def fork_choice(target_shard_ID, starting_block, blocks, block_weights, genesis_blocks, current=None): 77 | if current is None: 78 | current = {} 79 | for shard_ID, block in genesis_blocks.items(): 80 | current[shard_ID] = block 81 | 82 | # some unnecessary redundancy 83 | assert starting_block.shard_ID == target_shard_ID 84 | assert current[target_shard_ID] == starting_block 85 | 86 | children = [b for b in [b for b in blocks if b.prevblock is not None] if b.prevblock == starting_block] 87 | 88 | if starting_block.parent_ID is not None: 89 | the_source = starting_block.sources[starting_block.parent_ID] 90 | assert the_source.agrees(current[starting_block.parent_ID]) 91 | if not current[starting_block.parent_ID].is_in_chain(the_source, strict=True): 92 | fork_choice(starting_block.parent_ID, current[starting_block.parent_ID], blocks, block_weights, genesis_blocks, current) 93 | if not current[starting_block.parent_ID].is_in_chain(the_source): 94 | assert False 95 | 96 | if current[target_shard_ID] != starting_block: # we ended up recursively calling back 97 | old = current[target_shard_ID] 98 | try_it = fork_choice(target_shard_ID, current[target_shard_ID], blocks, block_weights, genesis_blocks, current) 99 | assert try_it == old 100 | return current[target_shard_ID] 101 | 102 | filter_block = current[starting_block.parent_ID] 103 | additional_filter_block = None 104 | if starting_block.prevblock is not None and starting_block.prevblock.parent_ID != starting_block.parent_ID and starting_block.prevblock.parent_ID not in starting_block.child_IDs and starting_block.prevblock.parent_ID is not None: 105 | assert starting_block.prevblock.parent_ID == 1, starting_block.prevblock.parent_ID 106 | additional_filter_block = current[starting_block.prevblock.parent_ID] 107 | 108 | filter_child = {} 109 | print("Start filtering blocks for block in shard %s" % target_shard_ID) 110 | for c in children: 111 | filter_child[c] = is_block_filtered(c, filter_block) # deals with filter_block = None by not filtering 112 | #if not filter_child[c] and additional_filter_block is not None: 113 | # filter_child[c] = is_block_filtered(c, additional_filter_block) 114 | 115 | children = [c for c in children if not filter_child[c]] 116 | 117 | if len(children) == 0: 118 | return current[target_shard_ID] 119 | 120 | # scorekeeping stuff 121 | max_score = 0 122 | winning_child = children[0] 123 | 124 | for c in children: 125 | 126 | # calculates sum of agreeing weight 127 | score = 0 128 | for b in block_weights.keys(): 129 | if b.is_in_chain(c): 130 | score += block_weights[b] 131 | 132 | # check if this is a high score 133 | if score > max_score: 134 | winning_child = c 135 | max_score = score 136 | 137 | assert winning_child.is_in_chain(current[target_shard_ID]) 138 | assert current[target_shard_ID] == starting_block 139 | current[target_shard_ID] = winning_child 140 | return fork_choice(target_shard_ID, winning_child, blocks, block_weights, genesis_blocks, current) 141 | 142 | -------------------------------------------------------------------------------- /generate_transactions.py: -------------------------------------------------------------------------------- 1 | import json 2 | from web3 import Web3 3 | from config import NUM_TRANSACTIONS 4 | from config import DEADBEEF 5 | from config import SHARD_IDS 6 | 7 | web3 = Web3() 8 | 9 | alice_key = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318' 10 | alice_address = web3.eth.account.privateKeyToAccount(alice_key).address.lower()[2:] 11 | 12 | abi = json.loads('[{"constant":false,"inputs":[{"name":"_shard_ID","type":"uint256"},{"name":"_sendGas","type":"uint256"},{"name":"_sendToAddress","type":"address"},{"name":"_data","type":"bytes"}],"name":"send","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"shard_ID","type":"uint256"},{"indexed":false,"name":"sendGas","type":"uint256"},{"indexed":false,"name":"sendFromAddress","type":"address"},{"indexed":true,"name":"sendToAddress","type":"address"},{"indexed":false,"name":"value","type":"uint256"},{"indexed":false,"name":"data","type":"bytes"},{"indexed":true,"name":"base","type":"uint256"},{"indexed":false,"name":"TTL","type":"uint256"}],"name":"SentMessage","type":"event"}]') 13 | contract = web3.eth.contract(address='0x000000000000000000000000000000000000002A', abi=abi) 14 | 15 | 16 | def format_transaction(tx, signed): 17 | if isinstance(tx["data"], bytes): 18 | data = tx["data"].hex() 19 | else: 20 | data = tx["data"] 21 | 22 | return { 23 | "gas": hex(tx["gas"]), 24 | "gasPrice": tx["gasPrice"], 25 | "hash": signed["hash"].hex(), 26 | "input": data, 27 | "nonce": tx["nonce"], 28 | "r": hex(signed["r"]), 29 | "s": hex(signed["s"]), 30 | "v": hex(signed["v"]), 31 | "to": tx["to"], 32 | "value": hex(tx["value"]), 33 | } 34 | 35 | 36 | # Alice sends cross shard transactions 37 | def gen_cross_shard_tx(nonce, shard_ID): 38 | cross_shard_tx = contract.functions.send(shard_ID, 300000, DEADBEEF, bytes(0)).buildTransaction({ "gas": 3000000, "gasPrice": "0x2", "nonce": hex(nonce), "value": 1}) 39 | 40 | cross_shard_tx_signed = web3.eth.account.signTransaction(cross_shard_tx, alice_key) 41 | cross_shard_tx_formatted = format_transaction(cross_shard_tx, cross_shard_tx_signed) 42 | return cross_shard_tx_formatted 43 | 44 | 45 | ''' 46 | # Bob sends simple transfers between account in the same shard 47 | def gen_in_shard_tx(nonce): 48 | private_key_bob = '0x5c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318' 49 | address_bob = web3.eth.account.privateKeyToAccount(private_key_bob).address.lower()[2:] 50 | in_shard_tx = { 51 | "gas": 3000000, 52 | "gasPrice": "0x2", 53 | "nonce": "0x0", # we will need to overwrite this by getting the nonce from the state 54 | "to": "0x000000000000000000000000000000000000002F", 55 | "value": 20, 56 | "data": "0x", 57 | } 58 | 59 | in_shard_tx_signed = web3.eth.account.signTransaction(in_shard_tx, private_key_bob) 60 | in_shard_tx_formatted = format_transaction(in_shard_tx, in_shard_tx_signed) 61 | return in_shard_tx_formatted 62 | 63 | 64 | def gen_payloads(): 65 | private_key_alice = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318' 66 | address_alice = web3.eth.account.privateKeyToAccount(private_key_alice).address.lower()[2:] 67 | 68 | payloadA = { 69 | "fromAddress": address_alice, 70 | "toAddress": "0x000000000000000000000000000000000000002A", 71 | "value": 100, 72 | "data": cross_shard_tx["data"] 73 | } 74 | 75 | # MessagePayload(address_alice, "0x000000000000000000000000000000000000002A", 100, cross_shard_tx["data"]) 76 | tx = [] 77 | for x in range(0, 100): 78 | tx.append(payloadA) 79 | return tx 80 | 81 | ''' 82 | 83 | def gen_alice_and_bob_tx(dest_shards = None): 84 | tx = [] 85 | if dest_shards is None: 86 | for x in range(0, NUM_TRANSACTIONS): 87 | tx.append(gen_cross_shard_tx(x, SHARD_IDS[x%len(SHARD_IDS)])) 88 | else: 89 | for x in range(0, NUM_TRANSACTIONS): 90 | tx.append(gen_cross_shard_tx(x, dest_shards[x % len(dest_shards)])) 91 | return tx 92 | -------------------------------------------------------------------------------- /genesis_state.py: -------------------------------------------------------------------------------- 1 | from web3 import Web3 2 | from config import DEADBEEF 3 | 4 | web3 = Web3() 5 | 6 | # same "pusher" address on each shard 7 | pusher_key = '0x6c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318' 8 | pusher_address = web3.eth.account.privateKeyToAccount(pusher_key).address.lower()[2:] 9 | 10 | # just gonna reuse this initial state on each shard. 11 | genesis_state = { 12 | "env": { 13 | "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", 14 | "currentDifficulty": "0x20000", 15 | "currentGasLimit": "0x750a163df65e8a", 16 | "currentNumber": "1", 17 | "currentTimestamp": "1000", # TODO: we may need this to change 18 | "previousHash": "dac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4" 19 | }, 20 | "pre": { 21 | pusher_address: { 22 | "balance": "0x5ffd4878be161d74", 23 | "code": "0x", 24 | "nonce": "0x0", 25 | "storage": {} 26 | }, 27 | DEADBEEF[2:].lower(): { 28 | "balance": "0x1", 29 | "code": "0x", 30 | "nonce": "0x0", 31 | "storage": {} 32 | }, 33 | "a94f5374fce5edbc8e2a8697c15331677e6ebf0b".lower(): { 34 | "balance": "0x5ffd4878be161d74", 35 | "code": "0x", 36 | "nonce": "0x0", 37 | "storage": {} 38 | }, 39 | "2c7536E3605D9C16a7a3D7b1898e529396a65c23".lower(): { 40 | "balance": "0x5ffd4878be161d74", 41 | "code": "0x", 42 | "nonce": "0x0", 43 | "storage": {} 44 | }, 45 | "c227e8f6eE49f35ddf4dd73F105cF743914B11Af".lower(): { 46 | "balance": "0x5ffd4878be161d74", 47 | "code": "0x", 48 | "nonce": "0x0", 49 | "storage": {} 50 | }, 51 | "8a8eafb1cf62bfbeb1741769dae1a9dd47996192".lower():{ 52 | "balance": "0xfeedbead", 53 | "nonce" : "0x00" 54 | }, 55 | "000000000000000000000000000000000000002a": { 56 | "balance": "0x00", 57 | "nonce": "0x00", 58 | "storage": {}, 59 | "code": "0x608060405260043610610041576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063e09ee87014610046575b600080fd5b6100d46004803603810190808035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001908201803590602001908080601f01602080910402602001604051908101604052809392919081815260200183838082843782019150505050505091929192905050506100d6565b005b6000806000439250349150339050438573ffffffffffffffffffffffffffffffffffffffff16887fe9fbdfd23831dbc2bdec9e9ef0d5ac734f56996d4211992cc083e97f2770ba428933348a600054604051808681526020018573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200184815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156101a957808201518184015260208101905061018e565b50505050905090810190601f1680156101d65780820380516001836020036101000a031916815260200191505b50965050505050505060405180910390a4505050505050505600a165627a7a7230582086844d62bfd54b247b20657c69410cefe95f27dcb63829d23c83f0d60883191e0029", 60 | } 61 | } 62 | } 63 | 64 | -------------------------------------------------------------------------------- /simulator.py: -------------------------------------------------------------------------------- 1 | import random 2 | import copy 3 | import json 4 | import hashlib 5 | from visualizer import report, init_plt 6 | 7 | from blocks import Block 8 | from validator import Validator 9 | from validator import ConsensusMessage 10 | from validator import UnresolvedDeps 11 | from validator import have_made_block 12 | from generate_transactions import gen_alice_and_bob_tx 13 | 14 | from config import * 15 | 16 | def add_switch_message(parent_shard, child_to_become_parent, child_to_move_down, position): 17 | global mempools 18 | mempools[parent_shard].insert(position, {'opcode': 'switch', 'child_to_become_parent': child_to_become_parent, 'child_to_move_down': child_to_move_down}) 19 | 20 | def add_inv_switch_message(initiating_shard, shard_to_become_parent, child_to_move_up, position): 21 | global mempools 22 | mempools[initiating_shard].insert(position, {'opcode': 'inv_switch', 'shard_to_become_parent': shard_to_become_parent, 'child_to_move_up': child_to_move_up}) 23 | 24 | 25 | def add_orbit_message(parent_shard, child_to_become_parent, shard_to_move_down, position): 26 | mempools[parent_shard].insert(position, {'opcode': 'orbit', 'child_to_become_parent': child_to_become_parent, 'shard_to_move_down': shard_to_move_down}) 27 | 28 | # Setup 29 | GENESIS_BLOCKS = {} 30 | GENESIS_MESSAGES = [] 31 | print("SHARD_IDS", SHARD_IDS) 32 | for ID in SHARD_IDS: 33 | GENESIS_BLOCKS[ID] = Block(ID, sources={}) # temporarily set sources to {}, since genesis blocks are not known yet 34 | print("GENESIS_BLOCKS[ID].shard_ID") 35 | print("ID", ID) 36 | print(GENESIS_BLOCKS[ID].shard_ID) 37 | GENESIS_MESSAGES.append(ConsensusMessage(GENESIS_BLOCKS[ID], 0, [])) # The watcher is the sender of the genesis blocks 38 | for ID2 in SHARD_IDS: 39 | print("len(GENESIS_BLOCKS[ID].sent_log.log[ID2]", len(GENESIS_BLOCKS[ID].sent_log[ID2])) 40 | 41 | 42 | for ID in SHARD_IDS: 43 | GENESIS_BLOCKS[ID].sources = {ID : GENESIS_BLOCKS[ID] for ID in SHARD_IDS} 44 | GENESIS_BLOCKS[ID].parent_ID = None 45 | for _ in SHARD_IDS: 46 | if ID in INITIAL_TOPOLOGY[_]: 47 | GENESIS_BLOCKS[ID].parent_ID = _ 48 | GENESIS_BLOCKS[ID].child_IDs = INITIAL_TOPOLOGY[ID] 49 | 50 | for ID in SHARD_IDS: 51 | GENESIS_BLOCKS[ID].compute_routing_table() 52 | 53 | validators = {} 54 | for name in VALIDATOR_NAMES: 55 | validators[name] = Validator(name) 56 | 57 | # Watcher lives at validator name 0 and receives all the messages 58 | watcher = validators[0] 59 | 60 | for v in VALIDATOR_NAMES: 61 | for genesis_message in GENESIS_MESSAGES: 62 | validators[v].receive_consensus_message(genesis_message) 63 | 64 | # GLOBAL MEMPOOLS 65 | mempools = {} 66 | if RESTRICT_ROUTING: 67 | for ID in SHARD_IDS: 68 | if ID in MSG_ROUTES: 69 | mempools[ID] = gen_alice_and_bob_tx(MSG_ROUTES[ID]) 70 | else: 71 | mempools[ID] = [] 72 | else: 73 | txs = gen_alice_and_bob_tx() 74 | for ID in SHARD_IDS: 75 | mempools[ID] = copy.copy(txs) 76 | 77 | 78 | # GLOBAL VIEWABLES 79 | viewables = {} 80 | for v in VALIDATOR_NAMES: 81 | viewables[v] = {} 82 | for w in VALIDATOR_NAMES: 83 | viewables[v][w] = [] 84 | 85 | max_height = 0 86 | 87 | # SIMULATION LOOP: 88 | for i in range(NUM_ROUNDS): 89 | # Make a new message from a random validator on a random shard 90 | rand_ID = i % len(SHARD_IDS) #random.choice(SHARD_IDS) 91 | next_proposer = random.choice(SHARD_VALIDATOR_ASSIGNMENT[rand_ID]) 92 | 93 | while next_proposer == 0: 94 | rand_ID = random.choice(SHARD_IDS) 95 | if MORE_BLOCKS_IN is not None: 96 | if random.choice([True, False]): 97 | rand_ID = random.choice(MORE_BLOCKS_IN) 98 | next_proposer = random.choice(SHARD_VALIDATOR_ASSIGNMENT[rand_ID]) 99 | 100 | if ORBIT_MODE: 101 | for k in range(10): 102 | if i == SWITCH_ROUND + 40*k: 103 | add_orbit_message(0, 1, 0, len(watcher.make_fork_choice(0, GENESIS_BLOCKS).txn_log) + 11) 104 | 105 | 106 | if i == SWITCH_ROUND + 20 + 40*k: 107 | add_orbit_message(1, 0, 1, len(watcher.make_fork_choice(1, GENESIS_BLOCKS).txn_log) + 11) 108 | 109 | else: 110 | if i == SWITCH_ROUND: 111 | add_switch_message(1, 4, 3, len(watcher.make_fork_choice(1, GENESIS_BLOCKS).txn_log) + 1) 112 | 113 | if not 'switch' in sys.argv: 114 | if i == ORBIT_ROUND_1: 115 | add_orbit_message(0, 1, 0, len(watcher.make_fork_choice(0, GENESIS_BLOCKS).txn_log) + 1) 116 | 117 | if i == ORBIT_ROUND_2: 118 | add_orbit_message(1, 0, 1, len(watcher.make_fork_choice(1, GENESIS_BLOCKS).txn_log) + 1) 119 | 120 | 121 | # MAKE CONSENSUS MESSAGE 122 | new_message = validators[next_proposer].make_new_consensus_message(rand_ID, mempools, drain_amount=MEMPOOL_DRAIN_RATE, genesis_blocks=GENESIS_BLOCKS) 123 | watcher.receive_consensus_message(new_message) # here the watcher is, receiving all the messages 124 | 125 | # keep max_height 126 | if new_message.height > max_height: 127 | max_height = new_message.height 128 | 129 | if FREE_INSTANT_BROADCAST: 130 | for v in VALIDATOR_NAMES: 131 | if v != 0: 132 | validators[v].receive_consensus_message(new_message) 133 | else: 134 | # MAKE NEW MESSAGE VIEWABLE 135 | for v in VALIDATOR_NAMES: 136 | if v == next_proposer or v == 0: 137 | continue 138 | viewables[v][next_proposer].append(new_message) # validators have the possibility of later viewing this message 139 | 140 | # RECEIVE CONSENSUS MESSAGES WITHIN SHARD 141 | for j in range(NUM_WITHIN_SHARD_RECEIPTS_PER_ROUND): 142 | 143 | next_receiver = random.choice(SHARD_VALIDATOR_ASSIGNMENT[rand_ID]) 144 | 145 | pool = copy.copy(SHARD_VALIDATOR_ASSIGNMENT[rand_ID]) 146 | pool.remove(next_receiver) 147 | 148 | new_received = False 149 | while(not new_received and len(pool) > 0): 150 | 151 | receiving_from = random.choice(pool) 152 | pool.remove(receiving_from) 153 | 154 | if len(viewables[next_receiver][receiving_from]) > 0: # if they have any viewables at all 155 | received_message = viewables[next_receiver][receiving_from][0] 156 | try: 157 | validators[next_receiver].receive_consensus_message(received_message) 158 | viewables[next_receiver][receiving_from].remove(received_message) 159 | new_received = True 160 | except UnresolvedDeps: 161 | pass 162 | 163 | # RECEIVE CONSENSUS MESSAGES BETWEEN SHARDS 164 | for j in range(NUM_BETWEEN_SHARD_RECEIPTS_PER_ROUND): 165 | 166 | pool = copy.copy(VALIDATOR_NAMES) 167 | pool.remove(0) 168 | 169 | next_receiver = random.choice(pool) 170 | pool.remove(next_receiver) 171 | 172 | new_received = False 173 | while(not new_received and len(pool) > 0): 174 | 175 | receiving_from = random.choice(pool) 176 | pool.remove(receiving_from) 177 | 178 | if len(viewables[next_receiver][receiving_from]) > 0: # if they have any viewables at all 179 | received_message = viewables[next_receiver][receiving_from][0] # receive the next one in the list 180 | try: 181 | validators[next_receiver].receive_consensus_message(received_message) 182 | viewables[next_receiver][receiving_from].remove(received_message) 183 | new_received = True 184 | except UnresolvedDeps: 185 | pass 186 | 187 | blocks = watcher.get_blocks_from_consensus_messages() 188 | #for b in blocks: 189 | # assert have_made_block(b) 190 | 191 | for v in validators.values(): 192 | assert v.check_have_made_blocks() 193 | 194 | # REPORTING: 195 | print("Step: ", i) 196 | if not REPORTING: 197 | continue 198 | if i == 0: 199 | init_plt(FIG_SIZE) 200 | if (i + 1) % REPORT_INTERVAL == 0: 201 | report(watcher, i, GENESIS_BLOCKS) 202 | -------------------------------------------------------------------------------- /validator.py: -------------------------------------------------------------------------------- 1 | from blocks import Block, Message, SwitchMessage_BecomeAParent, SwitchMessage_ChangeParent, SwitchMessage_Orbit 2 | 3 | from config import SHARD_IDS 4 | from config import VALIDATOR_NAMES 5 | from config import VALIDATOR_WEIGHTS, SWITCH_BLOCK_EXTRA 6 | from config import TTL_CONSTANT, TTL_SWITCH_CONSTANT 7 | from config import ORBIT_MODE 8 | from evm_transition import apply_to_state 9 | import random as rand 10 | from fork_choice import fork_choice 11 | 12 | import copy 13 | import sys 14 | import time 15 | 16 | BLOCKS = {} 17 | 18 | class UnresolvedDeps(Exception): 19 | pass 20 | 21 | 22 | def have_made_block(block): 23 | 24 | if block.height == 0: 25 | return True 26 | 27 | global BLOCKS 28 | if block.hash not in BLOCKS: 29 | print("block.hash") 30 | print(block.hash) 31 | sys.stdout.flush() 32 | return False 33 | for ID in SHARD_IDS: 34 | if block.sources[ID].hash != BLOCKS[block.hash][ID]: 35 | print("block.sources[ID].hash, BLOCKS[block.hash][ID]") 36 | print(block.sources[ID].hash, BLOCKS[block.hash][ID]) 37 | sys.stdout.flush() 38 | return False 39 | 40 | return True 41 | 42 | class ConsensusMessage: 43 | def __init__(self, block, name, justification=[]): 44 | self.estimate = block 45 | self.sender = name 46 | self.justification = justification 47 | self.hash = rand.randint(1, 10000000) 48 | 49 | assert isinstance(self.estimate, Block), "expected block" 50 | assert self.estimate.is_valid(), "expected block to be valid" 51 | 52 | assert self.sender in VALIDATOR_NAMES 53 | 54 | self.height = 0 55 | max_height = 0 56 | for m in self.justification: 57 | assert isinstance(m, ConsensusMessage), "expected justification to contain consensus messages" 58 | if m.height > max_height: 59 | if m.estimate.shard_ID == self.estimate.shard_ID: 60 | max_height = m.height 61 | 62 | self.height = max_height + 1 63 | 64 | def __hash__(self): 65 | return self.hash 66 | 67 | def __eq__(self, message): 68 | return self.hash == message.hash 69 | 70 | class Validator: 71 | def __init__(self, name): 72 | assert name in VALIDATOR_NAMES, "expected a validator name" 73 | self.name = name 74 | self.consensus_messages = [] # mutable data structure 75 | 76 | def receive_consensus_message(self, message): 77 | for m in message.justification: 78 | assert isinstance(m, ConsensusMessage), "expected consensus message" 79 | if m not in self.consensus_messages: 80 | raise UnresolvedDeps 81 | 82 | self.consensus_messages.append(message) 83 | 84 | # assumes no equivocations exist 85 | def latest_messages(self): 86 | max_heights = dict.fromkeys(VALIDATOR_NAMES) 87 | L_M = dict.fromkeys(VALIDATOR_NAMES) 88 | for v in VALIDATOR_NAMES: 89 | max_heights[v] = -1 90 | 91 | for m in self.consensus_messages: 92 | if m.height > max_heights[m.sender]: 93 | max_heights[m.sender] = m.height 94 | L_M[m.sender] = m 95 | 96 | return L_M 97 | 98 | def get_weighted_blocks(self): 99 | weighted_blocks = {} 100 | L_M = self.latest_messages() 101 | for v in VALIDATOR_NAMES: 102 | if L_M[v] is not None: 103 | if L_M[v].estimate in weighted_blocks.keys(): 104 | weighted_blocks[L_M[v].estimate] += VALIDATOR_WEIGHTS[v] 105 | else: 106 | weighted_blocks[L_M[v].estimate] = VALIDATOR_WEIGHTS[v] 107 | 108 | if L_M[v].estimate.switch_block: 109 | weighted_blocks[L_M[v].estimate] += VALIDATOR_WEIGHTS[v] * SWITCH_BLOCK_EXTRA 110 | 111 | return weighted_blocks 112 | 113 | def get_blocks_from_consensus_messages(self): 114 | blocks = [] 115 | for m in self.consensus_messages: 116 | blocks.append(m.estimate) 117 | return blocks 118 | 119 | def make_fork_choice(self, shard_ID, genesis_blocks): 120 | # the blocks in the view are the genesis blocks and blocks from consensus messages 121 | blocks = self.get_blocks_from_consensus_messages() 122 | weighted_blocks = self.get_weighted_blocks() 123 | 124 | next_fork_choice = fork_choice(shard_ID, genesis_blocks[shard_ID], blocks, weighted_blocks, genesis_blocks) 125 | 126 | assert next_fork_choice.shard_ID == shard_ID, "expected fork choice to be on requested shard" 127 | 128 | return next_fork_choice 129 | 130 | def make_all_fork_choices(self, genesis_blocks): 131 | fork_choices = {} 132 | for shard_ID in SHARD_IDS: 133 | fork_choices[shard_ID] = self.make_fork_choice(shard_ID, genesis_blocks) 134 | return fork_choices 135 | 136 | def next_hop(self, routing_table, target_shard_ID): 137 | return routing_table[target_shard_ID] if target_shard_ID in routing_table else None 138 | 139 | # 3 kinds of blocks: 140 | # regular block 141 | # switch sending block 142 | # switch receiving block 143 | 144 | def make_block(self, shard_ID, mempools, drain_amount, genesis_blocks, TTL=TTL_CONSTANT): 145 | 146 | global BLOCKS 147 | 148 | # First, the previous block pointer: 149 | prevblock = self.make_fork_choice(shard_ID, genesis_blocks) 150 | assert prevblock.shard_ID == shard_ID, "expected consistent IDs" 151 | 152 | new_received_log = {} 153 | new_sources = {} 154 | new_sent_log = {} 155 | new_routing_table = copy.deepcopy(prevblock.routing_table) 156 | new_parent_ID = copy.copy(prevblock.parent_ID) 157 | new_child_IDs = copy.copy(prevblock.child_IDs) 158 | 159 | # --------------------------------------------------------------------# 160 | # This part determines whether our block is a switch block: 161 | # --------------------------------------------------------------------# 162 | 163 | # Assume not, and look for switch messages as the next pending messages in tx and message queues: 164 | switch_block = False 165 | 166 | switch_tx = None 167 | switch_message = None 168 | # look in the mempool 169 | num_prev_txs = len(prevblock.txn_log) 170 | 171 | # look at sent messages of prevblock's neighbors 172 | neighbor_shard_IDs = prevblock.get_neighbors() 173 | print("prevblock.get_neighbors()") 174 | print(prevblock.get_neighbors()) 175 | print("prevblock.parent_ID") 176 | print(prevblock.parent_ID) 177 | print("prevblock.child_IDs") 178 | print(prevblock.child_IDs) 179 | temp_new_sources = {} 180 | for ID in SHARD_IDS: 181 | if ID not in neighbor_shard_IDs: 182 | assert ID not in temp_new_sources.keys() 183 | temp_new_sources[ID] = copy.copy(prevblock.sources[ID]) 184 | 185 | for ID in neighbor_shard_IDs: 186 | assert ID not in temp_new_sources.keys() 187 | temp_new_sources[ID] = copy.copy(self.make_fork_choice(ID, genesis_blocks)) 188 | print("ID in new_child_IDs") 189 | print(ID in new_child_IDs) 190 | print("ID == new_parent_IDs") 191 | print(ID == new_parent_ID) 192 | assert temp_new_sources[ID].is_in_chain(prevblock.sources[ID]), "expected monotonic sources - error 0, shard_ID: %s, ID: %s" % (shard_ID, ID) 193 | 194 | last_receive_log_length = len(prevblock.received_log[ID]) 195 | if len(temp_new_sources[ID].sent_log[shard_ID]) > last_receive_log_length: 196 | for next_message in temp_new_sources[ID].sent_log[shard_ID][last_receive_log_length:]: 197 | if isinstance(next_message, SwitchMessage_BecomeAParent) or isinstance(next_message, SwitchMessage_ChangeParent) or isinstance(next_message, SwitchMessage_Orbit): 198 | assert not switch_block 199 | switch_source_ID = ID 200 | switch_source = temp_new_sources[ID] 201 | switch_message = next_message 202 | switch_block = True 203 | 204 | # if we received a switch_message, do not initiate switch ourselves 205 | while switch_message and num_prev_txs < len(mempools[shard_ID]) and 'opcode' in mempools[shard_ID][num_prev_txs]: 206 | num_prev_txs += 1 207 | 208 | # skip orbits if the order of shards is wrong 209 | while num_prev_txs < len(mempools[shard_ID]) and 'opcode' in mempools[shard_ID][num_prev_txs] and mempools[shard_ID][num_prev_txs]['opcode'] == 'orbit' and mempools[shard_ID][num_prev_txs]['child_to_become_parent'] == prevblock.parent_ID: 210 | num_prev_txs += 1 211 | 212 | if num_prev_txs < len(mempools[shard_ID]): 213 | if 'opcode' in mempools[shard_ID][num_prev_txs]: 214 | switch_tx = mempools[shard_ID][num_prev_txs] 215 | print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") 216 | print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", shard_ID) 217 | print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", prevblock.height) 218 | 219 | switch_block = True 220 | 221 | if switch_block: 222 | 223 | assert switch_message is not None or switch_tx is not None 224 | assert switch_message is None or switch_tx is None 225 | if prevblock.switch_block: 226 | print("DEBUG: switch block follows another switch block") 227 | # --------------------------------------------------------------------# 228 | # If our block is a switch block, then we won't process anything 229 | # against the EVM, nor receiving or sending messages that are not switch messages 230 | # --------------------------------------------------------------------# 231 | 232 | # We will first process switch blocks: 233 | 234 | # BUILD SOURCES FOR PREVBLOCK NEIGHBORS 235 | neighbor_shard_IDs = prevblock.get_neighbors() 236 | 237 | new_sources = {} 238 | for ID in SHARD_IDS: 239 | if ID not in neighbor_shard_IDs: 240 | assert ID not in new_sources.keys() 241 | new_sources[ID] = (prevblock.sources[ID]) 242 | 243 | for ID in neighbor_shard_IDs: 244 | if ID == shard_ID: 245 | continue 246 | 247 | assert ID not in new_sources.keys() 248 | new_sources[ID] = self.make_fork_choice(ID, genesis_blocks) 249 | 250 | assert new_sources[ID].shard_ID == ID, "expected consistent IDs" 251 | 252 | print(str(new_sources[ID])) 253 | print(str(prevblock.sources[ID])) 254 | print("ID in new_child_IDs") 255 | print(ID in new_child_IDs) 256 | print("ID in new_parent_ID") 257 | print(ID == new_parent_ID) 258 | # fork choice should be orphaning any blocks that disagree with the prevblock's sources 259 | # the prevblock is the fork choice for this shard 260 | # which means that it is not filtered on this shard, meaning that it agrees with the fork choice of the parent 261 | # in the child, blocks that disagree with the fork choice are orphaned 262 | if ID == prevblock.parent_ID: 263 | assert new_sources[ID].is_in_chain(prevblock.sources[ID]), "expected monotonic consistent sources - error 1.1" 264 | elif ID in prevblock.child_IDs: 265 | assert new_sources[ID].is_in_chain(prevblock.sources[ID]), "expected monotonic consistent sources - error 1.2" 266 | else: 267 | assert False, "expected neighbor ID to be either parent or child ID" 268 | 269 | # check that fork choices have consistent sources 270 | # try to make sure that we don't make a block with a source that isn't in fork_choice's 271 | assert prevblock.shard_ID == shard_ID 272 | print("ID in new_child_IDs") 273 | print(ID in new_child_IDs) 274 | print("ID in new_parent_ID") 275 | print(ID == new_parent_ID) 276 | try: 277 | assert prevblock.is_in_chain(new_sources[ID].sources[shard_ID]), "expected - error 1, shard_ID: %s, ID: %s, parent_ID: %s, prev_parent_ID: %s, switch_block: %s" % (shard_ID, ID, new_parent_ID, prevblock.parent_ID, switch_block) 278 | except: 279 | prevblock.trace_history(ID) 280 | new_sources[ID].trace_history(shard_ID) 281 | raise 282 | 283 | if switch_block: 284 | 285 | # HACK: setting a source to something from which we didn't receive messages breaks 286 | # thigs during rotation. 287 | for ID in SHARD_IDS: 288 | while len(new_sources[ID].sent_log[shard_ID]) > len(prevblock.received_log[ID]): 289 | assert new_sources[ID] != prevblock.sources[ID] 290 | new_sources[ID] = new_sources[ID].prevblock 291 | 292 | if switch_tx is not None: 293 | 294 | if switch_tx['opcode'] == 'orbit': 295 | new_txn_log = prevblock.txn_log + [switch_tx] 296 | 297 | child_to_become_parent = mempools[shard_ID][num_prev_txs]['child_to_become_parent'] 298 | shard_to_move_down = mempools[shard_ID][num_prev_txs]['shard_to_move_down'] 299 | 300 | root_fork_choice = prevblock 301 | child_source = self.make_fork_choice(child_to_become_parent, genesis_blocks) 302 | 303 | new_sources[child_to_become_parent] = child_source 304 | new_sources[shard_to_move_down] = prevblock 305 | assert shard_to_move_down == prevblock.shard_ID 306 | 307 | msg1 = SwitchMessage_Orbit(child_source, TTL_SWITCH_CONSTANT, child_to_become_parent, shard_to_move_down, None) 308 | 309 | new_sent_log[child_to_become_parent] = prevblock.sent_log[child_to_become_parent] + [msg1] 310 | for ID in SHARD_IDS: 311 | if ID != child_to_become_parent: 312 | new_sent_log[ID] = prevblock.sent_log[ID] 313 | 314 | new_received_log = prevblock.received_log 315 | 316 | print("My shardID: %s, my children: %s, my old parent: %s, child_to_become_parent: %s" % (shard_ID, new_child_IDs, prevblock.parent_ID, child_to_become_parent)) 317 | new_child_IDs.remove(child_to_become_parent) 318 | new_parent_ID = child_to_become_parent 319 | 320 | for k, v in child_source.routing_table.items(): 321 | assert k in new_routing_table 322 | del new_routing_table[k] 323 | 324 | else: 325 | assert switch_tx['opcode'] == 'switch' 326 | new_txn_log = prevblock.txn_log + [switch_tx] 327 | child_to_become_parent = mempools[shard_ID][num_prev_txs]['child_to_become_parent'] 328 | child_to_move_down = mempools[shard_ID][num_prev_txs]['child_to_move_down'] 329 | 330 | # this could be a more conservative choice, using fork choice is a bit high risk bc we might have more switch blocks in here 331 | fork_choice_of_child_to_become_parent = self.make_fork_choice(child_to_become_parent, genesis_blocks) # new_sources[child_to_become_parent] 332 | fork_choice_of_child_to_move_down = self.make_fork_choice(child_to_move_down, genesis_blocks) # new_sources[child_to_move_down] 333 | 334 | msg1 = SwitchMessage_BecomeAParent(fork_choice_of_child_to_become_parent, TTL_SWITCH_CONSTANT, child_to_become_parent, child_to_move_down) 335 | msg2 = SwitchMessage_ChangeParent(fork_choice_of_child_to_move_down, TTL_SWITCH_CONSTANT, child_to_move_down, child_to_become_parent) 336 | 337 | # they have the switch messages in the sent message queues 338 | new_sent_log[child_to_become_parent] = prevblock.sent_log[child_to_become_parent] + [msg1] 339 | new_sent_log[child_to_move_down] = prevblock.sent_log[child_to_move_down] + [msg2] 340 | 341 | for ID in SHARD_IDS: 342 | if ID != child_to_move_down and ID != child_to_become_parent: 343 | new_sent_log[ID] = prevblock.sent_log[ID] 344 | 345 | new_received_log = prevblock.received_log 346 | 347 | # removing child from the switch block 348 | new_child_IDs.remove(child_to_move_down) 349 | 350 | # now the routing table 351 | for ID in new_routing_table.keys(): 352 | if new_routing_table[ID] == child_to_move_down: 353 | new_routing_table[ID] = child_to_become_parent 354 | 355 | # may be redundant, but won't hurt anyone: 356 | new_routing_table[child_to_move_down] = child_to_become_parent 357 | 358 | 359 | # parent_ID unchanged 360 | # received_log unchanged 361 | # sources unchanged 362 | 363 | elif switch_message is not None: 364 | #new_received_log[switch_source_ID] = prevblock.received_log[switch_source_ID] + [switch_message] 365 | 366 | new_sources[switch_source_ID] = switch_source.first_block_with_message_in_sent_log(shard_ID, switch_message) 367 | 368 | # COPYPASTE===== 369 | newly_received_messages = {} 370 | for ID in SHARD_IDS: 371 | newly_received_messages[ID] = [] 372 | for ID in SHARD_IDS: 373 | if ID == shard_ID: 374 | continue 375 | 376 | prev_received_log_length = len(prevblock.received_log[ID]) 377 | while(len(newly_received_messages[ID]) < len(new_sources[ID].sent_log[shard_ID]) - prev_received_log_length): 378 | log_length = len(newly_received_messages[ID]) 379 | new_message = new_sources[ID].sent_log[shard_ID][log_length + prev_received_log_length] 380 | newly_received_messages[ID].append(new_message) 381 | 382 | 383 | 384 | new_received_log = {} 385 | for ID in SHARD_IDS: 386 | new_received_log[ID] = prevblock.received_log[ID] + newly_received_messages[ID] 387 | 388 | new_sent_messages = {} # for now we're going to fill this with routed messages 389 | for ID in SHARD_IDS: 390 | new_sent_messages[ID] = [] 391 | for ID in neighbor_shard_IDs: 392 | for m in newly_received_messages[ID]: 393 | if m.target_shard_ID == shard_ID: 394 | pass # TODO: we are not processing payloads here, is it important? 395 | else: 396 | next_hop_ID = self.next_hop(new_routing_table, m.target_shard_ID) 397 | if next_hop_ID is not None: 398 | assert next_hop_ID in prevblock.child_IDs, "shard_ID: %s, destination: %s, next_hop: %s, children: %s" % (shard_ID, ID, next_hop_ID, prevblock.child_IDs) 399 | else: 400 | next_hop_ID = new_parent_ID 401 | assert next_hop_ID is not None 402 | new_sent_messages[next_hop_ID].append(Message(new_sources[next_hop_ID], m.TTL, m.target_shard_ID, m.payload)) 403 | 404 | new_sent_log = {} 405 | for ID in SHARD_IDS: 406 | new_sent_log[ID] = prevblock.sent_log[ID] + new_sent_messages[ID] 407 | # //COPYPASTE===== 408 | 409 | print("switch_source", switch_source) 410 | print("switch_message", switch_message) 411 | print("switch_source_ID", switch_source_ID) 412 | print("new_sources[switch_source_ID]", new_sources[switch_source_ID]) 413 | print("switch message in new_sources[switch_source_ID].sent_log[shard_ID]", switch_message in new_sources[switch_source_ID].sent_log[shard_ID]) 414 | print("switch message in new_sources[switch_source_ID].prevblock.sent_log[shard_ID]", switch_message in new_sources[switch_source_ID].prevblock.sent_log[shard_ID]) 415 | print("new_sources[switch_source_ID].switch_block", new_sources[switch_source_ID].switch_block) 416 | # assert new_sources[switch_source_ID].switch_block 417 | 418 | if isinstance(switch_message, (SwitchMessage_BecomeAParent, SwitchMessage_Orbit)): 419 | if switch_message.new_child_ID not in new_child_IDs: 420 | new_child_IDs.append(switch_message.new_child_ID) 421 | for ID in new_sources[switch_message.new_child_ID].routing_table.keys(): 422 | new_routing_table[ID] = switch_message.new_child_ID 423 | if isinstance(switch_message, (SwitchMessage_ChangeParent, SwitchMessage_Orbit)): 424 | new_parent_ID = switch_message.new_parent_ID 425 | 426 | new_txn_log = prevblock.txn_log 427 | 428 | else: 429 | assert False 430 | 431 | new_block = Block(shard_ID, prevblock, True, new_txn_log, new_sent_log, new_received_log, new_sources, new_parent_ID, new_child_IDs, new_routing_table, prevblock.vm_state) 432 | 433 | assert new_block.switch_block 434 | print("new_block", new_block) 435 | print("new_block.switch_block", new_block.switch_block) 436 | 437 | check = new_block.is_valid() 438 | if not check[0]: 439 | print("---------------------------------------------------------") 440 | print("---------------------------------------------------------") 441 | print("shard_ID", prevblock.shard_ID) 442 | print("---------------------------------------------------------") 443 | print("---------------------------------------------------------") 444 | print("txn_log", new_txn_log) 445 | print("---------------------------------------------------------") 446 | print("---------------------------------------------------------") 447 | print("self.sent_log", new_sent_log) 448 | print("---------------------------------------------------------") 449 | print("---------------------------------------------------------") 450 | print("self.received_log", new_received_log) 451 | print("---------------------------------------------------------") 452 | print("---------------------------------------------------------") 453 | print("shard_ID", shard_ID) 454 | print("---------------------------------------------------------") 455 | print("---------------------------------------------------------") 456 | print("txn_log", new_txn_log) 457 | print("---------------------------------------------------------") 458 | print("---------------------------------------------------------") 459 | print("self.sent_log", new_sent_log) 460 | print("---------------------------------------------------------") 461 | print("---------------------------------------------------------") 462 | print("self.received_log", new_received_log) 463 | print("---------------------------------------------------------") 464 | print("---------------------------------------------------------") 465 | print("receiving_opcode: ", switch_block) 466 | print("---------------------------------------------------------") 467 | print("---------------------------------------------------------") 468 | try: 469 | assert check[0], "Invalid Block: " + check[1] 470 | except: 471 | print(check[1]) 472 | time.sleep(1000) 473 | 474 | #sources_hashes = {} 475 | #for ID in SHARD_IDS: 476 | # sources_hashes[ID] = new_block.sources[ID].hash 477 | #BLOCKS[new_block.hash] = sources_hashes 478 | 479 | return new_block 480 | 481 | # --------------------------------------------------------------------# 482 | # --------------------------------------------------------------------# 483 | # --------------------------------------------------------------------# 484 | # --------------------------------------------------------------------# 485 | # --------------------------------------------------------------------# 486 | 487 | # And now for the rest of the blocks, the ones that don't change the routing table 488 | # But which do routing and execution of state against the EVM 489 | 490 | newly_received_txns = [] 491 | 492 | for i in range(drain_amount): 493 | if num_prev_txs + i < len(mempools[shard_ID]): 494 | new_tx = mempools[shard_ID][num_prev_txs + i] 495 | if 'opcode' in new_tx: 496 | # Don't add switch transaction to tx log 497 | break 498 | newly_received_txns.append(new_tx) 499 | 500 | # Construct new txn log 501 | new_txn_log = prevblock.txn_log + newly_received_txns 502 | 503 | # print("NEW TXN LEN: ", len(new_txn_log)) 504 | # print("PRE NEW RECEIPTS DATA LEN: ", len(newly_received_txns)) 505 | 506 | receiving_opcode = False 507 | # --------------------------------------------------------------------# 508 | # BUILD RECEIVED LOG WITH: 509 | newly_received_messages = {} 510 | for ID in SHARD_IDS: 511 | newly_received_messages[ID] = [] 512 | for ID in SHARD_IDS: 513 | if ID == shard_ID: 514 | continue 515 | 516 | prev_received_log_length = len(prevblock.received_log[ID]) 517 | while(len(newly_received_messages[ID]) < len(new_sources[ID].sent_log[shard_ID]) - prev_received_log_length): 518 | log_length = len(newly_received_messages[ID]) 519 | new_message = new_sources[ID].sent_log[shard_ID][log_length + prev_received_log_length] 520 | if isinstance(new_message, SwitchMessage_BecomeAParent) or isinstance(new_message, SwitchMessage_ChangeParent) or isinstance(new_message, SwitchMessage_Orbit): 521 | break #but only receive messages up to the first switch opcod 522 | 523 | newly_received_messages[ID].append(new_message) 524 | 525 | 526 | 527 | new_received_log = {} 528 | for ID in SHARD_IDS: 529 | new_received_log[ID] = prevblock.received_log[ID] + newly_received_messages[ID] 530 | 531 | # --------------------------------------------------------------------# 532 | 533 | # BUILD NEW SENT MESSAGES 534 | new_sent_messages = {} # for now we're going to fill this with routed messages 535 | for ID in SHARD_IDS: 536 | new_sent_messages[ID] = [] 537 | newly_received_payloads = {} # destined for this shard's evm 538 | for ID in SHARD_IDS: 539 | newly_received_payloads[ID] = [] 540 | 541 | # ROUTING 542 | for ID in neighbor_shard_IDs: 543 | for m in newly_received_messages[ID]: 544 | if m.target_shard_ID == shard_ID: 545 | if isinstance(m, SwitchMessage_BecomeAParent): 546 | continue 547 | elif isinstance(m, SwitchMessage_ChangeParent): 548 | continue 549 | elif isinstance(m, SwitchMessage_Orbit): 550 | continue 551 | else: 552 | newly_received_payloads[ID].append(m) 553 | else: 554 | next_hop_ID = self.next_hop(new_routing_table, m.target_shard_ID) 555 | if next_hop_ID is not None: 556 | assert next_hop_ID in prevblock.child_IDs, "shard_ID: %s, destination: %s, next_hop: %s, children: %s" % (shard_ID, ID, next_hop_ID, prevblock.child_IDs) 557 | else: 558 | next_hop_ID = new_parent_ID 559 | assert next_hop_ID is not None 560 | new_sent_messages[next_hop_ID].append(Message(new_sources[next_hop_ID], m.TTL, m.target_shard_ID, m.payload)) 561 | 562 | 563 | # --------------------------------------------------------------------# 564 | 565 | # EVM integration here 566 | 567 | # this is where we have this function that produces the new vm state and the new outgoing payloads 568 | # new_vm_state, new_outgoing_payloads = apply_to_state(prevblock.vm_state, newly_received_txns, newly_received_payloads) 569 | # 'newly_received_txns' is the new txn list 570 | 571 | new_vm_state, new_outgoing_payloads = apply_to_state(prevblock.vm_state, newly_received_txns, newly_received_payloads, genesis_blocks) 572 | 573 | 574 | # --------------------------------------------------------------------# 575 | 576 | # print("OUTGOING PAYLOAD LENGTH", len(new_outgoing_payloads.values())) 577 | # BUILD SENT LOG FROM NEW OUTGOING PAYLOADS 578 | # by this time new_sent_messages might already have some messages from rerouting above 579 | for ID in SHARD_IDS: 580 | if ID != shard_ID: 581 | for m in new_outgoing_payloads[ID]: 582 | # print("HERE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") 583 | first_hop_ID = self.next_hop(new_routing_table, ID) 584 | if first_hop_ID is not None: 585 | assert first_hop_ID in prevblock.child_IDs, "shard_ID: %s, target: %s, first_hop_ID: %s, parent: %s, children: %s, rtable: %s" % (shard_ID, ID, first_hop_ID, prevblock.parent_ID, prevblock.child_IDs, prevblock.routing_table) 586 | else: 587 | first_hop_ID = new_parent_ID 588 | assert first_hop_ID is not None 589 | new_sent_messages[first_hop_ID].append(Message(new_sources[first_hop_ID], TTL, ID, m.payload)) 590 | 591 | 592 | SUM = 0 593 | for k in new_sent_messages.keys(): 594 | SUM += len(new_sent_messages[k]) 595 | # print("NUM NEW SENT: ", SUM) 596 | 597 | new_sent_log = {} 598 | for ID in SHARD_IDS: 599 | new_sent_log[ID] = prevblock.sent_log[ID] + new_sent_messages[ID] 600 | 601 | 602 | # MAKE BLOCK AND CHECK VALIDITY 603 | # Block(ID, prevblock=None, txn_log=[], sent_log=None, received_log=None, sources=None, parent_ID=None, child_IDs=None, routing_table=None, vm_state=genesis_state): 604 | 605 | print(("LOL", new_sources)) 606 | ret = Block(shard_ID, prevblock, False, new_txn_log, new_sent_log, new_received_log, new_sources, new_parent_ID, new_child_IDs, new_routing_table, new_vm_state) 607 | 608 | assert not ret.switch_block 609 | 610 | check = ret.is_valid() 611 | if not check[0]: 612 | print("---------------------------------------------------------") 613 | print("---------------------------------------------------------") 614 | print("shard_ID", prevblock.shard_ID) 615 | print("---------------------------------------------------------") 616 | print("---------------------------------------------------------") 617 | print("txn_log", new_txn_log) 618 | print("---------------------------------------------------------") 619 | print("---------------------------------------------------------") 620 | print("self.sent_log", new_sent_log) 621 | print("---------------------------------------------------------") 622 | print("---------------------------------------------------------") 623 | print("self.received_log", newly_received_messages) 624 | print("---------------------------------------------------------") 625 | print("---------------------------------------------------------") 626 | print("---------------------------------------------------------") 627 | print("---------------------------------------------------------") 628 | print("shard_ID", shard_ID) 629 | print("---------------------------------------------------------") 630 | print("---------------------------------------------------------") 631 | print("txn_log", new_txn_log) 632 | print("---------------------------------------------------------") 633 | print("---------------------------------------------------------") 634 | print("self.sent_log", new_sent_log) 635 | print("---------------------------------------------------------") 636 | print("---------------------------------------------------------") 637 | print("self.received_log", newly_received_messages) 638 | print("---------------------------------------------------------") 639 | print("---------------------------------------------------------") 640 | print("receiving_opcode: ", receiving_opcode) 641 | print("---------------------------------------------------------") 642 | print("---------------------------------------------------------") 643 | assert check[0], "Invalid Block: " + check[1] 644 | 645 | sources_hashes = {} 646 | for ID in SHARD_IDS: 647 | sources_hashes[ID] = ret.sources[ID].hash 648 | BLOCKS[ret.hash] = sources_hashes 649 | 650 | return ret 651 | 652 | 653 | def make_new_consensus_message(self, shard_ID, mempools, drain_amount, genesis_blocks, TTL=TTL_CONSTANT): 654 | 655 | assert shard_ID in SHARD_IDS, "expected shard ID" 656 | assert isinstance(drain_amount, int), "expected int" 657 | assert isinstance(TTL, int), "expected int" 658 | assert isinstance(mempools, dict), "expected dict" 659 | new_block = self.make_block(shard_ID, mempools, drain_amount, genesis_blocks, TTL) 660 | # This copy is necessary because we use append on consensus messages when we receive messages 661 | new_message = ConsensusMessage(new_block, self.name, copy.copy(self.consensus_messages)) 662 | self.receive_consensus_message(new_message) 663 | return new_message 664 | 665 | def check_have_made_blocks(self): 666 | blocks = self.get_blocks_from_consensus_messages() 667 | #for b in blocks: 668 | # assert have_made_block(b) 669 | 670 | return True 671 | -------------------------------------------------------------------------------- /visualizer.py: -------------------------------------------------------------------------------- 1 | import random 2 | import hashlib 3 | import matplotlib as mpl 4 | mpl.use('TkAgg') 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import networkx as nx 8 | from blocks import Block, SwitchMessage_BecomeAParent, SwitchMessage_ChangeParent, SwitchMessage_Orbit 9 | from config import * 10 | import copy 11 | 12 | from PIL import Image, ImageFile 13 | ImageFile.LOAD_TRUNCATED_IMAGES = True 14 | #Image.open('image.jpg').load() 15 | 16 | import os 17 | import imageio as io 18 | from PIL import Image 19 | 20 | 21 | BASE = 10000000 22 | IMAGE_LIMIT = 500 23 | FRAMES = "graphs/" 24 | THUMBNAILS = "thumbs/" 25 | COLOURS = ["LightYellow", "Yellow", "Orange", "OrangeRed", "Red", "DarkRed", "Black"] 26 | 27 | 28 | def blocks_by_shard_display_height(blocks): 29 | heights = {} 30 | blocks_by_height = {} 31 | unsorted_blocks = blocks 32 | for b in unsorted_blocks: 33 | # Root shard has no parent 34 | if b.parent_ID is None: 35 | heights[b] = 0 36 | blocks_by_height[0] = [b] 37 | unsorted_blocks.remove(b) 38 | break 39 | 40 | while len(unsorted_blocks) > 0: 41 | for b in unsorted_blocks: 42 | # If we have the height of their parent 43 | if b.parent_ID in heights.keys(): 44 | # Then we can assign their height (parent height + 1) 45 | heights[b] = 1 + heights[b.parent_ID] 46 | 47 | if heights[b] not in blocks_by_height.keys(): 48 | blocks_by_height[heights[b]] = [b] 49 | else: 50 | blocks_by_height[heights[b]].append(b) 51 | 52 | unsorted_blocks.remove(b) 53 | 54 | return blocks_by_height 55 | 56 | 57 | # This function returns a map from height to a list of shards 58 | def sort_blocks_by_shard_height(fork_choice_by_shard): 59 | 60 | for b in fork_choice_by_shard.values(): 61 | assert isinstance(b, Block), "expected only blocks" 62 | 63 | for ID in fork_choice_by_shard.keys(): 64 | assert ID in SHARD_IDS, "expected shard ID" 65 | 66 | root_shard_tip = None 67 | for b in fork_choice_by_shard.values(): 68 | # Root shard has no parent 69 | if b.parent_ID is None: 70 | root_shard_tip = b 71 | break 72 | 73 | fork_choice_by_height = {} 74 | if root_shard_tip is not None: 75 | ret = recur_sort_shards(fork_choice_by_shard, [root_shard_tip], 0, fork_choice_by_height) 76 | else: 77 | ret = recur_sort_shards(fork_choice_by_shard, [fork_choice_by_shard[0],fork_choice_by_shard[1]], 0, fork_choice_by_height) 78 | 79 | extra_height = max(list(ret.keys())) + 1 80 | all_shards = [x.shard_ID for x in sum(ret.values(), [])] 81 | 82 | for shard_ID in SHARD_IDS: 83 | if shard_ID not in all_shards: 84 | if extra_height not in ret: 85 | ret[extra_height] = [] 86 | ret[extra_height].append(fork_choice_by_shard[shard_ID]) 87 | 88 | return ret 89 | 90 | 91 | # Implements a depth first search of the shard tree 92 | # The order of the search is determined by 'sorted' of shard_IDs 93 | def recur_sort_shards(fork_choice_by_shard, sorted_children, height, fork_choice_by_height, on_stack=None): 94 | if sorted_children == []: 95 | return 96 | 97 | if on_stack is None: 98 | on_stack = set() 99 | 100 | for b in sorted_children: 101 | assert isinstance(b, Block), "expected children to be blocks" 102 | 103 | for child in sorted_children: 104 | if child in on_stack: 105 | continue 106 | 107 | on_stack.add(child) 108 | 109 | if height not in fork_choice_by_height.keys(): 110 | fork_choice_by_height[height] = [child] 111 | else: 112 | fork_choice_by_height[height].append(child) 113 | 114 | sorted_child_IDs = sorted(child.child_IDs) 115 | children = [] 116 | for i in range(len(sorted_child_IDs)): 117 | children.append(fork_choice_by_shard[sorted_child_IDs[i]]) 118 | 119 | recur_sort_shards(fork_choice_by_shard, children, height + 1, fork_choice_by_height, on_stack) 120 | 121 | on_stack.remove(child) 122 | 123 | return fork_choice_by_height 124 | 125 | def init_plt(figsize=(30,20)): 126 | plt.figure(figsize=(30,20)) 127 | 128 | 129 | 130 | def report(watcher, round_number, genesis_blocks): 131 | plt.clf() 132 | 133 | # OUTSIDE BORDER BOX 134 | GraphBorder = nx.Graph(); 135 | CornersPos = {} 136 | CornersPos["topleft"] = (0, 0) 137 | CornersPos["topright"] = (DISPLAY_WIDTH, 0) 138 | CornersPos["bottomleft"] = (0, DISPLAY_HEIGHT) 139 | CornersPos["bottomright"] = (DISPLAY_WIDTH, DISPLAY_HEIGHT) 140 | 141 | GraphBorder.add_node("topleft") 142 | GraphBorder.add_node("topright") 143 | GraphBorder.add_node("bottomleft") 144 | GraphBorder.add_node("bottomright") 145 | GraphBorder.add_edge("topright", "topleft") 146 | GraphBorder.add_edge("topright", "bottomright") 147 | GraphBorder.add_edge("topleft", "bottomleft") 148 | GraphBorder.add_edge("bottomleft", "bottomright") 149 | 150 | nx.draw_networkx_nodes(GraphBorder, CornersPos, node_size=0) 151 | nx.draw_networkx_edges(GraphBorder, CornersPos, width=1.5) 152 | 153 | 154 | # SHARD BOXES 155 | ShardBorder = nx.Graph(); 156 | 157 | # The position of the shards may vary, so we get them from the fork choice: 158 | fork_choice = watcher.make_all_fork_choices(genesis_blocks) 159 | print("vvvvvvv") 160 | for k, v in fork_choice.items(): 161 | print("%s => %s, %s, %s" % (k, v.parent_ID, v.child_IDs, v.routing_table)) 162 | print("^^^^^^^") 163 | 164 | fork_choice_by_shard_height = sort_blocks_by_shard_height(fork_choice) 165 | 166 | num_layers = len(fork_choice_by_shard_height.keys()) 167 | 168 | num_shards_by_height = {} 169 | for i in range(num_layers): 170 | num_shards_by_height[i] = len(fork_choice_by_shard_height[i]) 171 | 172 | shard_display_width_by_height = {} 173 | for i in range(num_layers): 174 | shard_display_width_by_height[i] = (DISPLAY_WIDTH - 2*DISPLAY_MARGIN - (num_shards_by_height[i] - 1)*SHARD_X_SPACING)/num_shards_by_height[i] 175 | 176 | shard_display_height = (DISPLAY_HEIGHT - 2*DISPLAY_MARGIN - (num_layers - 1)*SHARD_Y_SPACING)/num_layers 177 | 178 | ShardBorderPos = {} 179 | print("vvvv") 180 | for h in range(num_layers): 181 | y_top = DISPLAY_HEIGHT - (DISPLAY_MARGIN + h*(shard_display_height + SHARD_Y_SPACING)) 182 | y_bottom = y_top - shard_display_height 183 | for i in range(len(fork_choice_by_shard_height[h])): 184 | assert isinstance(fork_choice_by_shard_height[h][i], Block), "expected block" 185 | shard_ID = fork_choice_by_shard_height[h][i].shard_ID 186 | print(shard_ID, end="") 187 | 188 | ShardBorder.add_node((shard_ID, "topleft")) 189 | ShardBorder.add_node((shard_ID, "topright")) 190 | ShardBorder.add_node((shard_ID, "bottomleft")) 191 | ShardBorder.add_node((shard_ID, "bottomright")) 192 | ShardBorder.add_edge((shard_ID, "topleft"), (shard_ID, "topright")) 193 | ShardBorder.add_edge((shard_ID, "topleft"), (shard_ID, "bottomleft")) 194 | ShardBorder.add_edge((shard_ID, "topright"), (shard_ID, "bottomright")) 195 | ShardBorder.add_edge((shard_ID, "bottomleft"), (shard_ID, "bottomright")) 196 | 197 | x_left = DISPLAY_MARGIN + i*(shard_display_width_by_height[h] + SHARD_X_SPACING) 198 | x_right = x_left + shard_display_width_by_height[h] 199 | 200 | ShardBorderPos[(shard_ID, "topleft")] = (x_left, y_top) 201 | ShardBorderPos[(shard_ID, "topright")] = (x_right, y_top) 202 | ShardBorderPos[(shard_ID, "bottomleft")] = (x_left, y_bottom) 203 | ShardBorderPos[(shard_ID, "bottomright")] = (x_right, y_bottom) 204 | print() 205 | 206 | print("^^^^") 207 | nx.draw_networkx_nodes(ShardBorder, ShardBorderPos, node_size=0) 208 | nx.draw_networkx_edges(ShardBorder, ShardBorderPos, width=1) 209 | 210 | 211 | # SHARD LAYOUT 212 | 213 | ShardLayout = nx.Graph() 214 | for ID in SHARD_IDS: 215 | ShardLayout.add_node(ID) 216 | 217 | for block in fork_choice.values(): 218 | if block.parent_ID is not None: 219 | ShardLayout.add_edge(block.parent_ID, block.shard_ID) 220 | 221 | labels = {} 222 | ShardPos = {} 223 | for shard_ID in SHARD_IDS: 224 | x = (ShardBorderPos[(shard_ID, "topleft")][0] + ShardBorderPos[(shard_ID, "topright")][0])/2 - DISPLAY_HEIGHT 225 | y = (ShardBorderPos[(shard_ID, "topleft")][1] + ShardBorderPos[(shard_ID, "bottomleft")][1])/2 226 | ShardPos[shard_ID] = (x/2, y/2 + DISPLAY_HEIGHT/4) 227 | labels[shard_ID] = shard_ID 228 | 229 | nx.draw_networkx_nodes(ShardLayout, ShardPos, node_color='#e6f3f7', edge_color='b', node_size=4000) 230 | nx.draw_networkx_edges(ShardLayout, ShardPos, alpha=0.5, edge_color='b', width=10) 231 | nx.draw_networkx_labels(ShardLayout,pos=ShardPos, label=labels, font_size=40) 232 | 233 | 234 | 235 | # VALIDATOR LINES 236 | ValidatorLines = nx.Graph(); 237 | for v in VALIDATOR_NAMES: 238 | if v != 0: 239 | ValidatorLines.add_node((v, "left")) 240 | ValidatorLines.add_node((v, "right")) 241 | ValidatorLines.add_edge((v, "left"), (v, "right")) 242 | 243 | 244 | validator_y_coordinate = {} 245 | validator_left_x_coordinate = {} 246 | ValidatorLinePoS = {} 247 | for ID in SHARD_IDS: 248 | x_left = ShardBorderPos[(ID, "topleft")][0] + DISPLAY_MARGIN 249 | x_right = ShardBorderPos[(ID, "topright")][0] - DISPLAY_MARGIN 250 | 251 | 252 | num_validators = len(SHARD_VALIDATOR_ASSIGNMENT[ID]) 253 | validator_y_spacing = (1.)/(num_validators + 1) 254 | 255 | for i in range(num_validators): 256 | v = SHARD_VALIDATOR_ASSIGNMENT[ID][i] 257 | relative_validator_display_height = (i + 1)*validator_y_spacing 258 | 259 | validator_y_coordinate[v] = ShardBorderPos[(ID, "topleft")][1] - shard_display_height*relative_validator_display_height 260 | validator_left_x_coordinate[v] = x_left 261 | 262 | y = validator_y_coordinate[v] 263 | 264 | ValidatorLinePoS[(v, "left")] = (x_left, y) 265 | ValidatorLinePoS[(v, "right")] = (x_right, y) 266 | 267 | 268 | nx.draw_networkx_nodes(ValidatorLines, ValidatorLinePoS, node_size=0) 269 | nx.draw_networkx_edges(ValidatorLines, ValidatorLinePoS, width=0.25) 270 | 271 | 272 | # PREVBLOCK POINTERS, FORK CHOICE AND SOURCES 273 | X_SPACE_PER_MESSAGE_HEIGHT = (DISPLAY_WIDTH - 2*DISPLAY_MARGIN)/CONSENSUS_MESSAGE_HEIGHTS_TO_DISPLAY_IN_ROOT 274 | 275 | window_size_by_shard_height = {} 276 | 277 | for h in range(num_layers): 278 | window_size_by_shard_height[h] = shard_display_width_by_height[h]/(X_SPACE_PER_MESSAGE_HEIGHT) + 4 279 | 280 | 281 | max_message_display_height_by_shard = {} 282 | for ID in SHARD_IDS: 283 | max_message_display_height_by_shard[ID] = 0 284 | 285 | for m in watcher.consensus_messages: 286 | if max_message_display_height_by_shard[m.estimate.shard_ID] < m.height: 287 | max_message_display_height_by_shard[m.estimate.shard_ID] = m.height 288 | 289 | shard_display_height_by_shard_ID = {} 290 | for h in range(num_layers): 291 | for b in fork_choice_by_shard_height[h]: 292 | shard_display_height_by_shard_ID[b.shard_ID] = h 293 | 294 | # messages in the shard windows 295 | displayable_messages = [] 296 | for m in watcher.consensus_messages: 297 | # checks if m is in the display window for its shard 298 | ID = m.estimate.shard_ID 299 | shard_height = shard_display_height_by_shard_ID[ID] 300 | if m.height >= max_message_display_height_by_shard[ID] - window_size_by_shard_height[shard_height]: 301 | displayable_messages.append(m) 302 | 303 | # prevblock pointers, fork choices, and sources 304 | PrevblockGraph = nx.DiGraph(); 305 | ForkChoiceGraph = nx.DiGraph(); 306 | SourcesGraph = nx.DiGraph(); 307 | 308 | messagesPos = {} 309 | senders = {} 310 | block_to_message = {} 311 | 312 | for m in displayable_messages: 313 | PrevblockGraph.add_node(m) 314 | ForkChoiceGraph.add_node(m) 315 | SourcesGraph.add_node(m) 316 | 317 | ID = m.estimate.shard_ID 318 | shard_height = shard_display_height_by_shard_ID[ID] 319 | 320 | if max_message_display_height_by_shard[ID] > window_size_by_shard_height[shard_display_height_by_shard_ID[ID]]: 321 | start_of_window = max_message_display_height_by_shard[ID] - window_size_by_shard_height[shard_height] 322 | else: 323 | start_of_window = 0 324 | 325 | used_window = max_message_display_height_by_shard[ID] - start_of_window 326 | relative_height = (m.height - start_of_window - 1)/used_window 327 | 328 | # get positions: 329 | assert relative_height <= 1, "expected relative height to be less than 1" 330 | xoffset = relative_height*(shard_display_width_by_height[shard_height] - 2*DISPLAY_MARGIN) + DISPLAY_MARGIN 331 | 332 | if m.sender != 0: 333 | messagesPos[m] = (validator_left_x_coordinate[m.sender] + xoffset, validator_y_coordinate[m.sender]) 334 | else: 335 | x = ShardBorderPos[(m.estimate.shard_ID, "topleft")][0] + DISPLAY_MARGIN 336 | y = (ShardBorderPos[(m.estimate.shard_ID, "topleft")][1] + ShardBorderPos[(m.estimate.shard_ID, "bottomleft")][1])/2 337 | messagesPos[m] = (x, y) 338 | 339 | 340 | # this map will help us draw nodes from prevblocks, sources, etc 341 | #print("m.estimate.hash: ", m.estimate.hash) 342 | #print("m.estimate: ", m.estimate) 343 | #print("m.sender: ", m.sender) 344 | assert m.estimate not in block_to_message, "expected unique blocks" 345 | block_to_message[m.estimate] = m 346 | 347 | for m in displayable_messages: 348 | # define edges for prevblock graph 349 | if m.estimate.prevblock is not None and m.estimate.prevblock in block_to_message: 350 | PrevblockGraph.add_edge(m, block_to_message[m.estimate.prevblock]) 351 | 352 | neighbor_shards = [] 353 | if m.estimate.parent_ID is not None: 354 | neighbor_shards.append(m.estimate.parent_ID) 355 | for ID in m.estimate.child_IDs: 356 | neighbor_shards.append(ID) 357 | 358 | for ID in neighbor_shards: 359 | # SourcesGraph define edges 360 | if m.estimate.sources[ID] is not None and m.estimate.sources[ID] in block_to_message: 361 | SourcesGraph.add_edge(m, block_to_message[m.estimate.sources[ID]]) 362 | 363 | # ForkChoiceGraph define edges 364 | for ID in SHARD_IDS: 365 | this_block = fork_choice[ID] 366 | while(this_block.prevblock is not None and this_block.prevblock in block_to_message): 367 | ForkChoiceGraph.add_edge(block_to_message[this_block], block_to_message[this_block.prevblock]) 368 | this_block = this_block.prevblock 369 | 370 | # Draw edges 371 | #nx.draw_networkx_edges(SourcesGraph, messagesPos, style='dashdot', edge_color='y', arrowsize=10, width=1) 372 | nx.draw_networkx_edges(ForkChoiceGraph, messagesPos, edge_color='#bdbdbd', alpha=1, arrowsize=25, width=15) 373 | nx.draw_networkx_edges(PrevblockGraph, messagesPos, width=3) 374 | nx.draw_networkx_nodes(PrevblockGraph, messagesPos, node_shape='s', node_color='#3c3c3d', node_size=300) 375 | 376 | 377 | # CROSS SHARD MESSAGES 378 | ShardMessagesGraph = nx.Graph(); 379 | ShardMessagesOriginGraph = nx.Graph(); 380 | shard_messagesPos = {} 381 | 382 | consensus_message_by_shard_message = {} 383 | for m in displayable_messages: 384 | ShardMessagesOriginGraph.add_node(m) 385 | shard_messagesPos[m] = messagesPos[m] 386 | 387 | # Messages to parents are displayed above their sending blocks 388 | if m.estimate.parent_ID is not None: 389 | ID = m.estimate.parent_ID 390 | for shard_message in m.estimate.newly_sent()[ID]: 391 | assert shard_message not in consensus_message_by_shard_message.keys(), "expected not to overwrite consensus message" 392 | consensus_message_by_shard_message[shard_message] = m 393 | ShardMessagesGraph.add_node(shard_message) 394 | ShardMessagesOriginGraph.add_node(shard_message) 395 | xoffset = (m.height % 3 - 1)*SHARD_MESSAGE_XOFFSET 396 | yoffset = SHARD_MESSAGE_YOFFSET 397 | shard_messagesPos[shard_message] = (messagesPos[m][0] + xoffset, messagesPos[m][1] + yoffset) 398 | ShardMessagesOriginGraph.add_edge(m, shard_message) 399 | 400 | # Messages to children are displayed below their sending blocks 401 | for ID in m.estimate.child_IDs: 402 | for shard_message in m.estimate.newly_sent()[ID]: 403 | assert shard_message not in consensus_message_by_shard_message.keys(), "expected not to overwrite consensus message" 404 | consensus_message_by_shard_message[shard_message] = m 405 | ShardMessagesGraph.add_node(shard_message) 406 | ShardMessagesOriginGraph.add_node(shard_message) 407 | yoffset = -SHARD_MESSAGE_YOFFSET 408 | xoffset = (m.height % 3 - 1)*SHARD_MESSAGE_XOFFSET 409 | shard_messagesPos[shard_message] = (messagesPos[m][0] + xoffset, messagesPos[m][1] + yoffset) 410 | ShardMessagesOriginGraph.add_edge(m, shard_message) 411 | 412 | nx.draw_networkx_nodes(ShardMessagesOriginGraph, shard_messagesPos, node_size=0) 413 | nx.draw_networkx_nodes(ShardMessagesGraph, shard_messagesPos, node_shape='o', node_color='#f6546a', node_size=250) 414 | nx.draw_networkx_edges(ShardMessagesOriginGraph, shard_messagesPos, width=6, style='dotted') 415 | 416 | # CROSS SHARD MESSAGE RECEIVE ARROWS 417 | RECEIVED_GRAPH_COLORS = ['#000000', '#600787', '#078760', '#876007', '#870760', 418 | '#076087', '#608707', '#FF6633', '#FFB399', '#FF33FF', 419 | '#E6B333', '#3366E6', '#999966', '#99FF99', '#B34D4D', 420 | '#80B300', '#809900', '#E6B3B3', '#6680B3', '#66991A', 421 | '#FF99E6', '#CCFF1A', '#FF1A66', '#E6331A', '#33FFCC', 422 | '#66994D', '#B366CC', '#4D8000', '#B33300', '#CC80CC', 423 | '#66664D', '#991AFF', '#E666FF', '#4DB3FF', '#1AB399', 424 | '#E666B3', '#33991A', '#CC9999', '#B3B31A', '#00E680', 425 | '#4D8066', '#809980', '#E6FF80', '#1AFF33', '#999933', 426 | '#FF3380', '#CCCC00', '#66E64D', '#4D80CC', '#9900B3', 427 | '#E64D66', '#4DB380', '#FF4D4D', '#99E6E6', '#6666FF'] 428 | 429 | OrphanedReceivedMessagesGraph = [nx.DiGraph() for _ in RECEIVED_GRAPH_COLORS]; 430 | AcceptedReceivedMessagesGraph = [nx.DiGraph() for _ in RECEIVED_GRAPH_COLORS]; 431 | 432 | for m in displayable_messages: 433 | neighbor_shards = [] 434 | if m.estimate.parent_ID is not None: 435 | neighbor_shards.append(m.estimate.parent_ID) 436 | for ID in m.estimate.child_IDs: 437 | neighbor_shards.append(ID) 438 | 439 | for i in range(len(RECEIVED_GRAPH_COLORS)): 440 | OrphanedReceivedMessagesGraph[i].add_node(m) 441 | AcceptedReceivedMessagesGraph[i].add_node(m) 442 | 443 | for ID in neighbor_shards: 444 | for new_received_message in m.estimate.newly_received()[ID]: 445 | 446 | for i in range(len(RECEIVED_GRAPH_COLORS)): 447 | OrphanedReceivedMessagesGraph[i].add_node(new_received_message) 448 | AcceptedReceivedMessagesGraph[i].add_node(new_received_message) 449 | OrphanedReceivedMessagesGraph[i].add_node(("r", new_received_message)) 450 | AcceptedReceivedMessagesGraph[i].add_node(("r", new_received_message)) 451 | 452 | assert m in shard_messagesPos.keys() 453 | shard_messagesPos[("r", new_received_message)] = shard_messagesPos[m] 454 | 455 | # Hypothesis is that this continue only occurs when the source of the new received is outside of the displayable messages 456 | if new_received_message not in consensus_message_by_shard_message.keys(): 457 | continue 458 | 459 | new_shard_message_origin = consensus_message_by_shard_message[new_received_message] 460 | sending_block = new_shard_message_origin.estimate 461 | 462 | COLOR_ID = hash((new_received_message.TTL, new_received_message.payload, new_received_message.target_shard_ID)) % (len(RECEIVED_GRAPH_COLORS) - 1) + 1 463 | 464 | if isinstance(new_received_message, (SwitchMessage_BecomeAParent, SwitchMessage_ChangeParent, SwitchMessage_Orbit)): 465 | COLOR_ID = 0 466 | else: # XXX 467 | pass 468 | #continue 469 | 470 | if fork_choice[m.estimate.shard_ID].is_in_chain(m.estimate): 471 | if fork_choice[sending_block.shard_ID].is_in_chain(sending_block): 472 | #print("m.estimate", m.estimate) 473 | #print("sending_block", sending_block) 474 | #print("fork_choice[m.estimate.shard_ID]", fork_choice[m.estimate.shard_ID]) 475 | #print("fork_choice[sending_block.shard_ID]", fork_choice[sending_block.shard_ID]) 476 | AcceptedReceivedMessagesGraph[COLOR_ID].add_edge(new_received_message, ("r", new_received_message)) 477 | continue 478 | 479 | OrphanedReceivedMessagesGraph[COLOR_ID].add_edge(new_received_message, ("r", new_received_message)) 480 | 481 | for i, clr in enumerate(RECEIVED_GRAPH_COLORS): 482 | nx.draw_networkx_edges(AcceptedReceivedMessagesGraph[i], shard_messagesPos, edge_color=clr, arrowsize=50, arrowstyle='->', width=8) 483 | nx.draw_networkx_edges(OrphanedReceivedMessagesGraph[i], shard_messagesPos, edge_color=clr, arrowsize=20, arrowstyle='->', width=1.25) 484 | 485 | ax = plt.axes() 486 | if SWITCH_ROUND - round_number >= 0: 487 | ax.text(0.1, 0.05, "Switch Countdown: " + str(SWITCH_ROUND - round_number), 488 | horizontalalignment='center', 489 | verticalalignment='bottom', 490 | transform=ax.transAxes, 491 | size=50) 492 | 493 | ax.text(0.1, 0.00, "Free instant broadcast: " + str(FREE_INSTANT_BROADCAST), 494 | horizontalalignment='center', 495 | verticalalignment='bottom', 496 | transform=ax.transAxes, 497 | size=50) 498 | 499 | ax.text(0.1, -0.05, "Validating: " + str(not VALIDITY_CHECKS_OFF), 500 | horizontalalignment='center', 501 | verticalalignment='bottom', 502 | transform=ax.transAxes, 503 | size=50) 504 | 505 | ax.text(0.1, -0.1, "Saving frames: " + str(SAVE_FRAMES), 506 | horizontalalignment='center', 507 | verticalalignment='bottom', 508 | transform=ax.transAxes, 509 | size=50) 510 | 511 | 512 | if SHOW_FRAMES: 513 | plt.axis('off') 514 | plt.draw() 515 | plt.pause(PAUSE_LENGTH) 516 | 517 | if SAVE_FRAMES: 518 | print('./graphs/' + str(10000000 + round_number) + ".png") 519 | plt.savefig('./graphs/' + str(10000000 + round_number) + ".png") 520 | 521 | 522 | IMAGE_LIMIT = 219 523 | class PlotTool(object): 524 | """A base object with functions for building, displaying, and saving viewgraphs""" 525 | 526 | def __init__(self): 527 | self.graph_path = os.path.dirname(os.path.abspath(__file__)) + '/graphs/' 528 | self.thumbnail_path = os.path.dirname(os.path.abspath(__file__)) + '/thumbnails/' 529 | 530 | 531 | def make_thumbnails(self, frame_count_limit=IMAGE_LIMIT, xsize=1000, ysize=1000): 532 | """Make thumbnail images in PNG format.""" 533 | 534 | file_names = sorted([fn for fn in os.listdir(self.graph_path) if fn.endswith('.png')]) 535 | print("len(file_names)", len(file_names)) 536 | 537 | if len(file_names) > frame_count_limit: 538 | raise Exception("Too many frames!") 539 | 540 | images = [] 541 | for file_name in file_names: 542 | images.append(Image.open(self.graph_path + file_name)) 543 | 544 | 545 | size = (xsize, ysize) 546 | iterator = 0 547 | for image in images: 548 | image.thumbnail(size)#, Image.ANTIALIAS) 549 | image.save(self.thumbnail_path + str(1000 + iterator) + "thumbnail.png", "PNG") 550 | iterator += 1 551 | 552 | 553 | def make_gif(self, frame_count_limit=IMAGE_LIMIT, gif_name="mygif.gif", frame_duration=0.4): 554 | """Make a GIF visualization of view graph.""" 555 | 556 | self.make_thumbnails(frame_count_limit=frame_count_limit) 557 | 558 | file_names = sorted([file_name for file_name in os.listdir(self.thumbnail_path) 559 | if file_name.endswith('thumbnail.png')]) 560 | 561 | images = [] 562 | for file_name in file_names: 563 | images.append(Image.open(self.thumbnail_path + file_name)) 564 | 565 | destination_filename = self.graph_path + gif_name 566 | 567 | iterator = 0 568 | with io.get_writer(destination_filename, mode='I', duration=frame_duration) as writer: 569 | for file_name in file_names: 570 | image = io.imread(self.thumbnail_path + file_name) 571 | writer.append_data(image) 572 | iterator += 1 573 | 574 | writer.close() 575 | 576 | 577 | ''' 578 | ax = plt.axes() 579 | # FLOATING TEXT 580 | for ID in SHARD_IDS: 581 | ax.text(ShardBorderPos[(ID,"bottomleft")][0], ShardBorderPos[(ID,"bottomleft")][1], ID, 582 | horizontalalignment='right', 583 | verticalalignment='center', 584 | size=25) 585 | 586 | plt.axis('off') 587 | plt.draw() 588 | plt.pause(PAUSE_LENGTH) 589 | ''' 590 | --------------------------------------------------------------------------------