├── README.md ├── global.py ├── network_status.py └── network.py /README.md: -------------------------------------------------------------------------------- 1 | ## Solana network simulation 2 | 3 | WIP simulation of Solana's network components, namely: 4 | 5 | - Network: controls message broadcast and slot progression 6 | - Node: staked validator nodes. Eligible for leader rotation and block voting 7 | - Block: Basic unit of message/data to broadcast (some duration of PoH) 8 | - BlockTransmission: where 'ticks' represent leader transmissions of previously failed slots. Leader transmits current 'block' and a set of ticks, if any, from it's history leading up it's rotation as Leader 9 | 10 | Simulation run/controlled from `global.py`. 11 | 12 | ### WIP/TODO 13 | - Lockout function tuning 14 | - Node BlockTransmission cache 15 | - Node save/tick/vote logic 16 | - Node stakes 17 | - Destaking/leakage 18 | - E&M 19 | -------------------------------------------------------------------------------- /global.py: -------------------------------------------------------------------------------- 1 | ######################## 2 | ## Network Sim 3 | ## TODO 4 | ## - update get_current_lockout using last *voted* block, not just block heights 5 | ## - pool size != network ticks 6 | ## - if leader is drop-out, no data in forks, need ability to 'request from network' 7 | ## - validate lockout calc 8 | ## - use virtual ticks when node received blokc 9 | ## - add node stakes 10 | ## - slot transmission times 11 | ## - destaking / leakage 12 | ## - higher destaking rate for non-voters in smaller partition 13 | ## - viz and monitoring 14 | ## - fix NetworkStatus bug due to data missing from dropouts 15 | ## - confirm timing alignment 16 | ######################## 17 | 18 | import network as solana 19 | reload(solana) 20 | 21 | import network_status as ns 22 | reload(ns) 23 | 24 | from random import randint 25 | 26 | from collections import Counter 27 | 28 | import numpy as np 29 | np.random.seed(11) 30 | from itertools import compress 31 | 32 | 33 | 34 | ## DEBUG 35 | from IPython.core.debugger import set_trace 36 | import time 37 | 38 | 39 | ######################## 40 | ## Network Constants 41 | ######################## 42 | POOL_SIZE = 50 43 | VALIDATOR_IDS = range(0, POOL_SIZE) 44 | AVG_LATENCY = 0 ## currently << transmission_time 45 | NETWORK_PARTITION = 0.15 ## tmp static partitionb 46 | ######################## 47 | ## Functions 48 | ######################## 49 | 50 | def poisson_latency(latency): 51 | return lambda: 1 + int(random.gammavariate(1, 1) * latency) 52 | 53 | ######################## 54 | ## Sim 55 | ######################## 56 | def run_simulation(network_status): 57 | 58 | ## Config network 59 | GENESIS = solana.Block(initial_validator_set = VALIDATOR_IDS) 60 | network = solana.Network(poisson_latency(AVG_LATENCY), GENESIS) 61 | 62 | ## Attach nodes to network 63 | nodes = [solana.Node(network, i) for i in VALIDATOR_IDS] 64 | 65 | 66 | ## Assign leader rotration 67 | leaders = np.random.choice(VALIDATOR_IDS, POOL_SIZE, replace = False) 68 | network.round_robin = leaders 69 | 70 | ## Set network partition 71 | ## Currently static... 72 | 73 | ## logging.info("Partitioned nodes: ",network.partition_nodes) 74 | ## run sim... 75 | cur_partition_time = -1 76 | network.partition_nodes = [] 77 | long_lived_partition = False 78 | 79 | for t in range(POOL_SIZE*2): 80 | 81 | ## each tick, some % chance of long-lived partition 82 | if long_lived_partition == False and cur_partition_time < 0: 83 | long_lived_partition = np.random.uniform() < 0.05 84 | 85 | 86 | 87 | ## generate partitions 88 | if long_lived_partition == True: 89 | network.partition_nodes = list(compress(VALIDATOR_IDS,\ 90 | [np.random.uniform() < NETWORK_PARTITION for _ in nodes])) 91 | cur_partition_time = randint(1,POOL_SIZE/5) ## next partition 92 | long_lived_partition = False 93 | 94 | 95 | print("Partition size: %s for: %s" % (len(network.partition_nodes), cur_partition_time)) 96 | 97 | 98 | network.tick() 99 | 100 | do_unique_chain_analysis = ((t + 1) % 25) == 0 101 | network_snapshot = network_status.update_status(network, chain_analysis = do_unique_chain_analysis, print_snapshot = False) 102 | 103 | # network_snapshot = network.snapshot(t) 104 | # network_status.print_snapshot(network_snapshot) 105 | 106 | ## if time is up, reset partition nodes 107 | if cur_partition_time <= 0: 108 | network.partition_nodes = [] 109 | cur_partition_time = -1 110 | else: 111 | cur_partition_time -= 1 112 | 113 | return network 114 | 115 | 116 | def main(): 117 | print("Run simulation...") 118 | t0 = time.time() 119 | network_status = ns.NetworkStatus() 120 | network = run_simulation(network_status) 121 | t1 = time.time() 122 | print("Simulation time: %.2f" % (t1 - t0)) 123 | set_trace() 124 | network_status.plot_branches() 125 | 126 | 127 | ## network_status.plot_unique_chains() 128 | 129 | if __name__ == '__main__': 130 | main() 131 | -------------------------------------------------------------------------------- /network_status.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | from collections import Counter 3 | import pygraphviz as pgv 4 | 5 | import matplotlib.pyplot as plt 6 | 7 | from IPython.core.debugger import set_trace 8 | 9 | class NetworkStatus(): 10 | def __init__(self): 11 | 12 | self.network_tick = [] 13 | ## {tick: [# of branches at depth (index)]} 14 | self.unique_chains = {} 15 | self.current_partition = {} 16 | ## current active set 17 | self.current_as = {} 18 | 19 | 20 | def print_snapshot(self, snapshot): 21 | ## snapshot of form {validator : {slot : block}} 22 | ## print tree of current network chain status 23 | ## nodes show % votes 24 | ## Nodes are blocks, edges time between block, labels are vote counts/% across given slot 25 | 26 | if snapshot.shape[0] < 2: return 27 | 28 | g = pgv.AGraph(strict = True, directed = True) 29 | 30 | edge_ctr = {} 31 | branch_ctr = {} 32 | for col_num in range(snapshot.shape[1]): 33 | 34 | 35 | cur_snapshot = snapshot[col_num] 36 | 37 | 38 | ## create node ids 39 | ## node IDs should be hash of all blocks in it's history --> unique branches 40 | 41 | # block_hashes = [] 42 | # for i, block in enumerate(cur_snapshot): 43 | # if block == '0': 44 | # block_hashes.append('0') 45 | # else: 46 | # cur_block_hash = block+'-'+'-'.join(cur_snapshot[:i]) 47 | # cur_block_hash = hashlib.sha256(cur_block_hash).hexdigest() 48 | # block_hashes.append(cur_block_hash) 49 | 50 | # cur_edges = zip(block_hashes, block_hashes[1:]) 51 | 52 | cur_edges = zip(cur_snapshot, cur_snapshot[1:]) 53 | 54 | ## count branch 55 | branch_ctr[tuple(cur_edges)] = 1 if tuple(cur_edges) not in branch_ctr else branch_ctr[tuple(cur_edges)] + 1 56 | 57 | for t, cur_edge in enumerate(cur_edges): 58 | ##ce = ["{}... T={}".format(node[:5],t) for node in cur_edge] 59 | ## converting to hex, display with slot time 60 | ## hacky way to avoid self loops (e.g. 0 -> 0) 61 | 62 | ce = tuple(["{}... T={}".format(node[:5], t + i) for i, node in enumerate(cur_edge)]) 63 | if ce in edge_ctr: 64 | edge_ctr[ce] += 1 65 | else: 66 | edge_ctr[ce] = 1 67 | 68 | ## add weight label 69 | ## t is key to identify time 70 | g.add_edge(ce[0], ce[1], str(t), 71 | weight = edge_ctr[ce], 72 | label = "{0:.0%}".format(1.*edge_ctr[ce]/snapshot.shape[1])) 73 | 74 | ## for e in range(len(g.edges())): 75 | ## g.get_edge(g.edges()[e][0],g.edges()[e][1]).attr["label"] = 1.*edge_ctr[g.get_edge(g.edges()[e][0],g.edges()[e][1])]/sum(edge_ctr.values()) 76 | 77 | ##print(g) 78 | 79 | g.layout(prog = "dot") 80 | network_file_name = "./figures/nwk_n{}_t{:02}".format(snapshot.shape[1],snapshot.shape[0]-1) 81 | g.draw(network_file_name+".png") 82 | 83 | def update_status(self, network, chain_analysis = False, print_snapshot = False): 84 | 85 | t = network.time 86 | snapshot = network.snapshot(t) 87 | 88 | if print_snapshot == True: self.print_snapshot(snapshot) 89 | 90 | self.network_tick.append(t) 91 | 92 | self.current_partition[t] = len(network.partition_nodes) 93 | self.current_as[t] = len(network.nodes) 94 | 95 | ## write # of unique chains 96 | 97 | if chain_analysis == True: 98 | cur_chains = {} 99 | for col_num in range(snapshot.shape[1]): 100 | cur_snapshot = snapshot[col_num] 101 | 102 | for depth_num, block in enumerate(cur_snapshot): 103 | str_chain_depth = cur_snapshot[:depth_num+1].to_string() 104 | cur_chain_hash = hashlib.sha256(str_chain_depth).hexdigest() 105 | 106 | if depth_num not in cur_chains: 107 | cur_chains[depth_num] = [cur_chain_hash] 108 | else: 109 | cur_chains[depth_num].append(cur_chain_hash) 110 | 111 | self.unique_chains[snapshot.shape[0]-1] = map(len,map(Counter,cur_chains.values())) 112 | 113 | # cur_snapshot = snapshot[col_num].to_string() 114 | # unique_chains.append(hashlib.sha256(cur_snapshot).hexdigest()) 115 | 116 | # self.unique_chains.append(len(dict(Counter(unique_chains)))) 117 | 118 | def plot_branches(self): 119 | 120 | plt.ion() 121 | ticks = self.unique_chains.keys() 122 | ticks.sort() 123 | 124 | fig, axarr = plt.subplots(2,len(ticks), sharex=True, figsize = (24, 12.8)) 125 | 126 | for i, tick in enumerate(ticks): 127 | axarr[0][i].scatter(range(len(self.unique_chains[tick])), self.unique_chains[tick]) 128 | ## other plot 129 | axarr[1][i].scatter(range(len(self.unique_chains[tick])), self.current_partition.values()[:(tick+1)]) 130 | fig.show() 131 | 132 | 133 | # marker = 'o', 134 | # c = 'r', 135 | # edgecolor = 'b' 136 | # ) 137 | -------------------------------------------------------------------------------- /network.py: -------------------------------------------------------------------------------- 1 | ###################################### 2 | ## Simulating Solana branch consensus 3 | ## TODO 4 | ## - reset block cache! 5 | ## - validate lockout calc 6 | ## - incorporate saved forks into chain 7 | ## - use virtual ticks when node received blokc 8 | ## - add node stakes 9 | ## - slot transmission times 10 | ## - destaking / leakage 11 | ## - higher destaking rate for non-voters in smaller partition 12 | ## - viz and monitoring 13 | ## - fix NetworkStatus bug due to data missing from dropouts 14 | ## - confirm timing alignment 15 | ###################################### 16 | 17 | import random 18 | random.seed(11) 19 | import numpy as np 20 | np.random.seed(11) 21 | import pandas as pd 22 | 23 | from collections import Counter 24 | 25 | import hashlib 26 | 27 | from IPython.core.debugger import set_trace 28 | 29 | import logging, sys 30 | logging.basicConfig(filename='global.log',filemode = 'w', level=logging.DEBUG) 31 | 32 | 33 | ######################## 34 | ## Lockout Function 35 | ######################## 36 | MIN_LOCKOUT = 0 ## TMP: unit is slot, to be PoH? 37 | MAX_LOCKOUT = 20736000 ## ~4 months in slot time (2 slots / second) 38 | 39 | def calc_lockout_time(current_time, prev_vote_time, k = 1, base = 2, 40 | min_lockout = MIN_LOCKOUT, max_lockout = MAX_LOCKOUT): 41 | 42 | z = (current_time - prev_vote_time) / (1.*k) 43 | exp_z = k * (base ** (z + 1)) 44 | lockout_time = int(min_lockout + exp_z) 45 | 46 | if lockout_time > max_lockout: 47 | lockout_time = max_lockout 48 | return lockout_time 49 | 50 | ######################## 51 | ## Network 52 | ######################## 53 | 54 | class Network(): 55 | def __init__(self, latency, genesis): 56 | self.nodes = [] 57 | self.round_robin = [] 58 | self.latency = latency 59 | self.transmission_time = 1e6 # currently >> than latency 60 | self.time = 0 61 | self.msg_arrivals = {} 62 | self.dropout_rate = 0.01 63 | self.partition_nodes = [] 64 | self.genesis = genesis 65 | self.active_set = len(self.nodes) 66 | 67 | def status(self): 68 | ## quick summary of network status 69 | 70 | ## Node agreement 71 | node_heads = [node.chain[max(node.chain.keys())] for node in self.nodes] 72 | print("Node agreement: %d%%" % (100*(1 - float(len(set(node_heads))-1)/len(node_heads)))) 73 | 74 | def snapshot(self, _time): 75 | ## DataFrame structure of node chains over time 76 | print("Tick: %d" %_time) 77 | chain_data = {} 78 | for i, node in enumerate(self.nodes): 79 | branch_chain = {} 80 | 81 | ## is latest block virtual 82 | ## Replace virtual blocks with cached 83 | ## if node.chain[_time] == 0 and len(node.cache) > 0: 84 | ## ## branch chain 85 | ## branch_chain = {int(k):str(v.get_block().hash) for k,v in node.cache.items()} 86 | 87 | chain = {int(k):str(v) for k,v in node.chain.items() if k not in branch_chain} 88 | chain = dict(chain.items() + branch_chain.items()) 89 | 90 | chain_data[i] = chain 91 | 92 | n_branches = len(Counter([str(cd.values()) for cd in chain_data.values()])) 93 | print("# of branches: %s:" % n_branches) 94 | 95 | return(pd.DataFrame(chain_data)) 96 | 97 | 98 | def broadcast(self, block_transmission, broadcast_nodes): 99 | ## Called by leader node to transmit block to rest of network 100 | 101 | ## replace with msg.block_time? 102 | ## send to next slot 103 | next_step = self.time + 1 104 | 105 | logging.debug("Leader broadcast: %s" % (block_transmission.get_block().hash)) 106 | 107 | for i, current_node in enumerate(self.nodes): 108 | 109 | if current_node.id not in broadcast_nodes: 110 | continue 111 | ## TMP: ignore delay 112 | ## delay = self.latency() 113 | 114 | if next_step not in self.msg_arrivals: 115 | self.msg_arrivals[next_step] = [] 116 | self.msg_arrivals[next_step].append((i, block_transmission)) 117 | 118 | ## Network::tick 119 | def tick(self): 120 | ## Deliver all data broadcast in this slot 121 | ## Random network dropouts at node level and 122 | ## partitioned nodes 123 | ## TODO: partitioned nodes not currently separate network 124 | ## they just miss any broadcasts currently 125 | 126 | 127 | ## PLACEHOLDER: set active set 128 | self.active_set = len(self.nodes) 129 | 130 | if self.time in self.msg_arrivals: ## messages to be sent 131 | for node_index, block_transmission in self.msg_arrivals[self.time]: 132 | if np.random.uniform() > self.dropout_rate: 133 | self.nodes[node_index].receive_block(block_transmission, self.time) 134 | del self.msg_arrivals[self.time] 135 | 136 | ## for node in self.nodes: 137 | ## logging.debug("Node %s received: %s" % (node.id, node.chain[max(node.chain.keys())])) 138 | 139 | ## not ideal 140 | for node in self.nodes: 141 | 142 | ## if no data was transmiktted 143 | ## add virtual tick to chain 144 | if self.time not in node.chain: 145 | node.chain[self.time] = 0 146 | 147 | ## find leader 148 | if np.random.uniform() > self.dropout_rate: 149 | node.tick(self.time) 150 | else: 151 | logging.debug("Dropout! Node: %d at time: %d" % (node.id, self.time)) 152 | 153 | self.time += 1 154 | 155 | class BlockTransmission(): 156 | ## Data transmission unit 157 | ## Data: previous virtual ticks and block 158 | 159 | def __init__(self, block = None, previous_ticks = []): 160 | self._previous_ticks = previous_ticks 161 | self._block = block 162 | 163 | def set_block(self, block): 164 | self._block = block 165 | 166 | def get_block(self): 167 | return self._block 168 | 169 | def set_previous_ticks(self, ticks): 170 | self._previous_ticks = ticks 171 | 172 | def get_previous_ticks(self): 173 | return self._previous_ticks 174 | 175 | 176 | class Block(): 177 | def __init__(self, initial_validator_set = [], parent=None, created_by = None, created_at = 0, nonce = ''): 178 | #self.hash = random.randrange(10**30) 179 | self.parent = parent 180 | self.hash = hashlib.sha256(str(random.randrange(10**30)) if parent is None else str(random.randrange(10**30)) + parent.hash).hexdigest() 181 | 182 | self.block_time = created_at 183 | if not self.parent: ## must be genesis 184 | self.prevhash = 0 185 | self.votes = {0:initial_validator_set} 186 | return 187 | # Set our block time and our prevhash 188 | self.prevhash = self.parent.hash 189 | self.votes = {self.block_time : [created_by]} ## creation of block is a vote 190 | 191 | 192 | def add_vote(self, vote_time, validator_id): 193 | if vote_time not in self.votes: ## first vote 194 | self.votes[vote_time] = [validator_id] 195 | else: 196 | cur_votes = self.votes[vote_time] 197 | if validator_id in cur_votes: 198 | ValueError("Double voting on block? Maybe during rollback.") 199 | else: 200 | self.votes[vote_time].append(validator_id) 201 | 202 | def get_hash_chain(self): 203 | ## returns a dict of time:hashes of blocks connected to self, excluding current block 204 | tmp_block = self 205 | block_hashes = {tmp_block.block_time:tmp_block.hash} 206 | while tmp_block.parent is not None: 207 | block_hashes[tmp_block.parent.block_time] = tmp_block.parent.hash 208 | tmp_block = tmp_block.parent 209 | 210 | ## backfill virtual blocks 211 | for j in range(self.block_time): 212 | if j not in block_hashes: 213 | block_hashes[j] = 0 214 | 215 | return(block_hashes) 216 | 217 | 218 | class Node(): 219 | def __init__(self, network, id): 220 | self.id = id 221 | self.network = network 222 | network.nodes.append(self) 223 | # Received blocks 224 | self.received = {network.genesis.hash : network.genesis} 225 | self.chain = {0 : network.genesis.hash} ## time:hash, helps keep self.received in order 226 | self.lockouts = {network.genesis.hash : MIN_LOCKOUT} ## lockouts assosiated with votes for blocks 227 | self.cache = {} ##{0 : BlockTransmission(block = network.genesis, previous_ticks = [])} ## when locked out, store current transmission 228 | self.finalized = {0 : network.genesis} ## TESTING - store finalized blocks when observes 2/3 votes 229 | self.active_set = {0 : network.active_set} 230 | 231 | def receive_block(self, block_transmission, time): 232 | 233 | if time <= max(self.chain.keys()): ## latest time 234 | raise ValueError("Node ", self.id, " cannot accept block at height ", time) 235 | 236 | ## save active set for future finality calcs 237 | self.active_set[time] = self.network.active_set 238 | 239 | 240 | block = block_transmission.get_block() 241 | previous_ticks = block_transmission.get_previous_ticks() 242 | 243 | ## need to check if locked out 244 | ## Locked out if i have a record of voting on a 245 | ## transmission that isn't included in leader's broadcast, 246 | ## and if any of my vote lockout times are past current PoH 247 | 248 | node_block_hashes = self.received.keys() 249 | leader_hash_chain = block.get_hash_chain() 250 | 251 | ## if I have any blocks that aren't in leader's block chain, 252 | ## leader is broadcasting a branch 253 | #on_same_branch = all([node_block in leader_hash_chain.values() for node_block in node_block_hashes]) 254 | on_same_branch = set(node_block_hashes).issubset(leader_hash_chain.values()) 255 | 256 | if not on_same_branch: 257 | 258 | ## what is Node's maximum lockout on earliest 259 | ## block not on leader branch 260 | 261 | branch_time = self.get_branch_split_time(block, time) 262 | lockout_time = self.get_current_lockout(branch_time) 263 | 264 | #max_lockout = max(self.lockouts.values()) 265 | 266 | if lockout_time > time: 267 | ## if locked out: don't vote, don't update lockouts, store transmission, 268 | ## virtual ticks stored later 269 | self.cache[time] = block_transmission 270 | return 271 | else: 272 | ## switching branches 273 | ## vote on latest block chain, fill in blocks if necessary from cache, register votes on all the blocks 274 | ## re-write / fill in blocks from cache 275 | ## Keep track of depth of rollback (E&M) 276 | ## TODO: how to update lockouts? 277 | 278 | rollback_times = [] 279 | for t in self.chain.keys(): 280 | 281 | ## only roll back blocks that are different 282 | ## and that are sooner than split point (branch_time 283 | if self.chain[t] == leader_hash_chain[t] or t < branch_time: 284 | continue 285 | else: 286 | ## remove current block from received 287 | if self.chain[t] != 0: del self.received[self.chain[t]] 288 | self.chain[t] = leader_hash_chain[t] 289 | 290 | ## FIXME: optimize 291 | err_reassigned = False 292 | ## find block associated with that hash 293 | if self.chain[t] != 0: 294 | cur_leader_block = block 295 | while cur_leader_block != self.network.genesis: 296 | if cur_leader_block.hash == self.chain[t]: 297 | self.received[self.chain[t]] = cur_leader_block 298 | cur_leader_block.add_vote(t, self.id) 299 | err_reassigned = True 300 | break 301 | else: 302 | cur_leader_block = cur_leader_block.parent 303 | if not err_reassigned: ValueError("Block re-assignment failed during rollback!") 304 | rollback_times.append(t) 305 | print("Rollback depth: %s at time: %s for node: %s" % (min(rollback_times), time, self.id)) 306 | 307 | ## receive head block 308 | self.received[block.hash] = block 309 | self.chain[time] = block.hash 310 | block.add_vote(time, self.id) 311 | 312 | ## update lockouts 313 | self.update_lockouts(time) 314 | ##self.lockouts[block.hash] = time + MIN_LOCKOUT ## block added and updated abov e 315 | 316 | ## clear cache 317 | self.cache = {} 318 | 319 | else: 320 | 321 | ## all of the blocks in the node's chain 322 | ## are contained in the leader chain 323 | ## backfill node branch w/ leader branch to last matched 324 | ## 325 | 326 | ## find deepest slot to replace 327 | ## either be virtual tick or current slot 328 | 329 | ## FIXME: shoudn't be any/all virtual slots, just those sense last shared block 330 | ## -- must be easier way 331 | 332 | ## find last non-virtual node block 333 | last_node_block_slot = max(self.chain.keys()) 334 | 335 | while self.chain[last_node_block_slot] == 0: 336 | last_node_block_slot -= 1 337 | last_node_block_slot += 1 338 | 339 | ## fill with leader blocks 340 | while last_node_block_slot <= time: 341 | 342 | replacement_hash = leader_hash_chain[last_node_block_slot] 343 | 344 | if replacement_hash != 0: 345 | 346 | ## get block (request from network) 347 | replacement_block = block 348 | 349 | if replacement_block.hash != replacement_hash: 350 | leader_parent_block = block.parent 351 | while leader_parent_block is not None: 352 | if leader_parent_block.hash == replacement_hash: 353 | replacement_block = leader_parent_block 354 | break 355 | else: 356 | leader_parent_block = leader_parent_block.parent 357 | 358 | if replacement_block is None: 359 | ValueError("Replacement block not found!") 360 | 361 | self.received[replacement_block.hash] = replacement_block 362 | block.add_vote(last_node_block_slot, self.id) 363 | 364 | self.chain[last_node_block_slot] = replacement_hash 365 | last_node_block_slot += 1 366 | 367 | ## update lockouts 368 | self.update_lockouts(time) 369 | ##self.lockouts[block.hash] = time + MIN_LOCKOUT ## block added and updated abov e 370 | 371 | def update_lockouts(self, time): 372 | ## run through votes (blocks), re-calc lockouts with current time 373 | ## re-writing lockouts entirely out of laziness 374 | ## could deal with rollbacks much better 375 | 376 | self.lockouts = {} 377 | for block_hash in self.chain.values(): 378 | if block_hash == 0: continue 379 | block_time = self.received[block_hash].block_time 380 | self.lockouts[block_hash] = time + calc_lockout_time(time, block_time, k = 2) 381 | 382 | 383 | def get_branch_split_time(self, current_block, time): 384 | 385 | 386 | ## FIXME: should chain history come from node, rather than block? 387 | ## !! chain from block != chain on node 388 | ## find last non virtual block 389 | previous_node_hash_time = len(self.chain) - 1 390 | for i in range(1, time + 1): 391 | previous_node_hash = self.chain[time - i] 392 | if previous_node_hash != 0: 393 | break 394 | else: 395 | previous_node_hash_time -= 1 396 | 397 | ## get chain history from last non-virtual block 398 | #node_block_hashes = self.received[previous_node_hash].get_hash_chain() 399 | 400 | node_block_hashes = {k: v for k, v in self.chain.iteritems() if k <= previous_node_hash_time} 401 | 402 | ## Get history of blocks from current block 403 | ## Compare to history from Node's most up-to-date block 404 | current_block_hashes = current_block.get_hash_chain() 405 | # current_block_hashes = {k: v for k, v in current_block_hashes.iteritems() if v != 0} 406 | 407 | ## loop through time, up to previous block time 408 | prev_block_time = min([max(current_block_hashes.keys()),\ 409 | max(node_block_hashes.keys())]) 410 | 411 | ## Find slot where/if branch has occured 412 | ## i.e. find first slot where two block histories differ 413 | branch_time = -1 414 | for i in range(prev_block_time+1): 415 | #if node_block_hashes[i] != current_block_hashes[i] and node_block_hashes[i] != 0: 416 | if node_block_hashes[i] != current_block_hashes[i]: 417 | branch_time = i 418 | break 419 | return branch_time 420 | 421 | def get_current_lockout(self, branch_time): 422 | 423 | ## returns time when lockout on current branch expires 424 | ## e.g. if current_time < lockout time, voting on leader block/branch is slashable 425 | ## curent_time => lockout time: okay to vote on leader block/branch 426 | ## 427 | ## lockout alg: 428 | ## - find earliest (lowest PoH) block in Node chain not included in block transmission 429 | ## - if lockout from that block is <= (=?) current block slot (PoH) vote on currrent chain 430 | 431 | ## branch_time is earliest slot that differs 432 | ## might be virtual block w/out lockout 433 | ## roll forward until 434 | 435 | ## get all lockouts, return max 436 | 437 | branch_slots = {k:v for k,v in self.chain.iteritems() if k >= branch_time} 438 | 439 | lockouts = [0] 440 | for branch_time in branch_slots: 441 | if self.chain[branch_time] == 0: 442 | continue 443 | else: 444 | lockouts.append(self.lockouts[self.chain[branch_time]]) 445 | 446 | return max(lockouts) 447 | # ## TODO: validate lockout 448 | # if branch_time < 0: 449 | ## same branch, no lockout 450 | # return 0 451 | # else: 452 | # return self.lockouts[self.chain[branch_time]] 453 | 454 | ## Node::tick 455 | def tick(self, _time): 456 | 457 | ## leader: 458 | if self.network.round_robin[_time % len(self.network.nodes)] == self.id: 459 | 460 | logging.debug("I'm the leader! Node: %s at time: %s" % (self.id, _time)) 461 | 462 | ## find last slot time with block (not ticks) 463 | last_block_time = max([block_time for block_time, block in self.chain.items() if block > 0]) 464 | 465 | # to be delived in next round 466 | ## need to change hash to block from cache? 467 | 468 | new_block = Block(parent = self.received[self.chain[last_block_time]], created_by = self.id, 469 | created_at = _time + 1)# nonce = str(self.chain[last_block_time])) 470 | 471 | 472 | 473 | ## bundle times of last N ticks (0s) 474 | previous_ticks = [] 475 | for key in self.chain.keys()[::-1]: 476 | if self.chain[key] == 0: 477 | previous_ticks.append(key) 478 | else: 479 | break 480 | 481 | new_block_transmission = BlockTransmission(block = new_block, previous_ticks = previous_ticks) 482 | 483 | 484 | ## determine what nodes to broadcast to 485 | ## i.e. broadcast only to leader partition 486 | 487 | current_partition = self.network.partition_nodes 488 | 489 | broadcast_partition = [node.id for node in self.network.nodes if node.id not in current_partition] ## TODO use active_set 490 | # if _time == 7: set_trace() 491 | if self.id in current_partition: 492 | broadcast_partition = current_partition 493 | 494 | ## generate delays and send to msg_arrivals of network 495 | ## to be received by network in _time + 1 496 | self.network.broadcast(new_block_transmission, broadcast_partition) 497 | 498 | ## TODO: does leader receive now? 499 | ## self.receive_block(new_block, _time) 500 | 501 | --------------------------------------------------------------------------------