├── .gitignore ├── README.md ├── mine.py ├── primitives ├── __init__.py ├── coins.py ├── connection.py ├── hashrate_meter.py ├── message_types.py ├── messages.py ├── miner.py ├── mining_params.py ├── pool.py ├── protocol.py ├── session.py └── types.py ├── requirements.txt └── simulate-pool.py /.gitignore: -------------------------------------------------------------------------------- 1 | .scons* 2 | *.bin 3 | *.asc 4 | *.out 5 | *.vcd 6 | .pio/* 7 | build/ 8 | **/__pycache__/ 9 | *.pyc 10 | test 11 | *.sim 12 | *.dot 13 | 14 | env/ 15 | __pycache__/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Mining controller 2 | 3 | Simple python bitcoin miner with stratum2. It sure isn't fast as it's written in python, but this is just a proof of concept for stratum2 mining as there's no other pythonic examples I could find on github. 4 | 5 | It supports the following scenarios: 6 | 7 | ``` 8 | Miner (V2) ----> pool (V2) 9 | ``` 10 | 11 | It includes a simulation of a pool and an actual miner client. Ideally, you run them both, you can also run the miner against a real stratum2 server (you can find some @ slushpool) but you may get banned for not producing any valid shares after a while. 12 | 13 | # Running 14 | 15 | - Create virtualenv with `python3 -m venv env`. 16 | - Run `source env/bin/activate`. 17 | - Run `pip install -r requirements.txt`. 18 | 19 | Run pool with `python3 simulate-pool.py` and miner with `python3 mine.py`. 20 | 21 | # Overview 22 | 23 | The protocol used is **Stratum V2**. The basis for this repository is https://github.com/braiins/braiins/tree/bos-devel/open/protocols/stratum/sim. 24 | 25 | # Features 26 | 27 | - Basic bitcoin mining via stratum 2 protocol 28 | 29 | 30 | ## Install 31 | 32 | Requires Python 3.7. 33 | -------------------------------------------------------------------------------- /mine.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import asyncio # new module 3 | import logging 4 | import time 5 | import traceback 6 | 7 | import numpy as np 8 | import simpy 9 | from colorama import Fore, init 10 | from event_bus import EventBus 11 | 12 | import primitives.coins as coins 13 | import primitives.mining_params as mining_params 14 | from primitives.connection import Connection 15 | from primitives.miner import Miner 16 | from primitives.pool import Pool 17 | 18 | init() 19 | bus = EventBus() 20 | 21 | 22 | async def connect(): 23 | np.random.seed(123) 24 | parser = argparse.ArgumentParser( 25 | prog="mine.py", 26 | description="Simulates interaction of a mining pool and two miners", 27 | ) 28 | parser.add_argument( 29 | "--realtime", 30 | help="run simulation in real-time (otherwise is run as fast as possible)", 31 | action="store_const", 32 | const=True, 33 | ) 34 | parser.add_argument( 35 | "--rt-factor", 36 | help="real-time simulation factor, default=1 (enter 0.5 to be twice as fast than the real-time", 37 | type=float, 38 | default=1, 39 | ) 40 | parser.add_argument( 41 | "--limit", 42 | type=int, 43 | help="simulation time limit in seconds, default = 500", 44 | default=50, 45 | ) 46 | parser.add_argument( 47 | "--verbose", 48 | help="display all events (warning: a lot of text is generated)", 49 | action="store_const", 50 | const=True, 51 | ) 52 | 53 | parser.add_argument( 54 | "--plain-output", 55 | help="Print just values to terminal: accepted shares, accepted submits," 56 | " stale shares, stale submits, rejected submits", 57 | action="store_true", 58 | ) 59 | 60 | args = parser.parse_args() 61 | 62 | if args.verbose: 63 | 64 | @bus.on("miner1") 65 | def subscribe_m1(ts, conn_uid, message): 66 | print( 67 | Fore.LIGHTRED_EX, 68 | "T+{0:.3f}:".format(ts), 69 | "(miner1)", 70 | conn_uid if conn_uid is not None else "", 71 | message, 72 | Fore.RESET, 73 | ) 74 | 75 | conn1 = Connection( 76 | "miner", 77 | "stratum", 78 | # pool_host="v2.eu.stratum.slushpool.com", 79 | # pool_port=3336, 80 | pool_host="localhost", 81 | pool_port=2000, 82 | ) 83 | 84 | m1 = Miner( 85 | "xtrinch.worker", 86 | bus, 87 | diff_1_target=mining_params.diff_1_target, 88 | device_information=dict( 89 | speed_ghps=0.000250, # 250.000 hashes per second 90 | vendor="python", 91 | hardware_version="PC", 92 | firmware="python-miner", 93 | device_id="xtrinch.worker", 94 | ), 95 | connection=conn1, 96 | ) 97 | 98 | await m1.connect_to_pool(conn1) 99 | 100 | return m1, conn1 101 | 102 | 103 | async def main(): 104 | (m1, conn1) = await connect() 105 | 106 | await m1.receive_loop() 107 | 108 | 109 | if __name__ == "__main__": 110 | # logging.basicConfig(level=logging.DEBUG) 111 | 112 | asyncio.run(main()) 113 | -------------------------------------------------------------------------------- /primitives/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /primitives/coins.py: -------------------------------------------------------------------------------- 1 | """Helper module with generic coin algorithms""" 2 | 3 | 4 | class Target: 5 | def __init__(self, target: int, diff_1_target: int): 6 | self.target = target 7 | self.diff_1_target = diff_1_target 8 | 9 | def to_difficulty(self): 10 | """Converts target to difficulty at the network specified by diff_1_target""" 11 | return self.diff_1_target // self.target 12 | 13 | @staticmethod 14 | def from_difficulty(diff, diff_1_target): 15 | 16 | """Converts difficulty to target at the network specified by diff_1_target""" 17 | return Target(diff_1_target // diff, diff_1_target) 18 | 19 | def div_by_factor(self, factor: float): 20 | self.target = self.target // factor 21 | 22 | def __str__(self): 23 | return "{}(diff={}, target={})".format( 24 | type(self).__name__, 25 | self.to_difficulty(), 26 | self.target.to_bytes(32, byteorder="big").hex(), 27 | ) 28 | 29 | def to_bytes(self): 30 | # TODO: convert to bytes 31 | return 1 32 | -------------------------------------------------------------------------------- /primitives/connection.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import random 3 | import socket 4 | import time 5 | from abc import ABC, abstractmethod 6 | 7 | import base58 8 | import ed25519 9 | import numpy as np 10 | import simpy 11 | from colorama import Fore, Style 12 | from dissononce.cipher.chachapoly import ChaChaPolyCipher 13 | from dissononce.dh.x25519.x25519 import X25519DH 14 | from dissononce.hash.blake2s import Blake2sHash 15 | from dissononce.processing.handshakepatterns.interactive.NX import NXHandshakePattern 16 | from dissononce.processing.impl.cipherstate import CipherState 17 | from dissononce.processing.impl.handshakestate import HandshakeState 18 | from dissononce.processing.impl.symmetricstate import SymmetricState 19 | from hashids import Hashids 20 | 21 | from primitives.messages import Message 22 | 23 | SLUSHPOOL_CA_PUBKEY = "u95GEReVMjK6k5YqiSFNqqTnKU4ypU2Wm8awa6tmbmDmk1bWt" 24 | 25 | 26 | def gen_uid(): 27 | hashids = Hashids() 28 | return hashids.encode(random.randint(0, 16777216)) 29 | 30 | 31 | class Connection: 32 | def __init__( 33 | self, 34 | type_name, 35 | port: str, 36 | pool_host="", 37 | pool_port=3336, 38 | ): 39 | self.type_name = type_name 40 | self.uid = gen_uid() 41 | self.port = port 42 | self.conn_target = None 43 | self.sock = None 44 | self.pool_host = pool_host 45 | self.pool_port = pool_port 46 | self.cipher_state: CipherState = None 47 | 48 | async def connect_to_pool(self): 49 | self.sock = await asyncio.open_connection(self.pool_host, self.pool_port) 50 | await self.connect_to_noise(self.pool_host != "localhost") 51 | 52 | def disconnect(self): 53 | # TODO: Review whether to use assert's or RuntimeErrors in simulation 54 | if self.conn_target is None: 55 | raise RuntimeError("Not connected") 56 | self.conn_target.disconnect(self) 57 | self.conn_target = None 58 | 59 | def is_connected(self): 60 | return self.conn_target is not None 61 | 62 | def send_msg(self, msg: Message): 63 | print( 64 | f"{Style.BRIGHT}{Fore.GREEN}Msg send: {Style.NORMAL}%s{Style.RESET_ALL}" 65 | % msg 66 | ) 67 | 68 | ciphertext = self.cipher_state.encrypt_with_ad(b"", msg.to_frame()) 69 | final_message = Connection.wrap(ciphertext) 70 | 71 | if self.conn_target: 72 | self.conn_target.send(final_message) 73 | else: 74 | self.sock[1].write(final_message) 75 | 76 | async def connect_to_noise(self, verify_connection: bool = True): 77 | # prepare handshakestate objects for initiator and responder 78 | our_handshakestate = HandshakeState( 79 | SymmetricState( 80 | CipherState( 81 | # AESGCMCipher() 82 | ChaChaPolyCipher() # chacha20poly1305 83 | ), 84 | Blake2sHash(), 85 | ), 86 | X25519DH(), 87 | ) 88 | 89 | our_handshakestate.initialize(NXHandshakePattern(), True, b"") 90 | 91 | # -> e which is really -> 2 byte length, 32 byte public key, 22 byte cleartext payload 92 | message_buffer = bytearray() 93 | our_handshakestate.write_message(b"", message_buffer) 94 | message_buffer = Connection.wrap(bytes(message_buffer)) 95 | num_sent = self.sock[1].write(message_buffer) # rpc send 96 | 97 | # <- e, ee, s, es, SIGNATURE_NOISE_MESSAGE 98 | message_buffer = bytearray() 99 | ciphertext = await self.sock[0].read(4096) # rpc recv 100 | print(ciphertext) 101 | frame, _ = Connection.unwrap(ciphertext) 102 | self.cipherstates = our_handshakestate.read_message(frame, message_buffer) 103 | self.cipher_state = self.cipherstates[0] 104 | self.decrypt_cipher_state = self.cipherstates[1] 105 | 106 | pool_static_server_key = our_handshakestate.rs.data 107 | 108 | if verify_connection: 109 | signature = SignatureMessage( 110 | message_buffer, pool_static_server_key, self.pool_host == "localhost" 111 | ) 112 | signature.verify() 113 | 114 | return True 115 | 116 | @staticmethod 117 | # adds 2 byte length 118 | def wrap(item: bytes) -> bytes: 119 | item_length = len(item) 120 | return item_length.to_bytes(2, byteorder="little") + item 121 | 122 | @staticmethod 123 | # removes 2 byte length 124 | def unwrap(item: bytes) -> (bytes, bytes): 125 | length_prefix = item[0:2] 126 | payload_length = int.from_bytes(length_prefix, byteorder="little") 127 | return (item[2 : 2 + payload_length], item[payload_length + 2 :]) 128 | 129 | def decrypt(self, ciphertext: bytes) -> bytes: 130 | frame, _ = Connection.unwrap(ciphertext) 131 | raw = self.decrypt_cipher_state.decrypt_with_ad(b"", frame) 132 | return raw 133 | 134 | async def receive(self) -> [Message]: 135 | if self.sock is None: 136 | return [] 137 | 138 | ciphertext = await self.sock[0].read(8192) 139 | if len(ciphertext) == 0: 140 | return [] 141 | 142 | print( 143 | f"{Style.BRIGHT}{Fore.YELLOW}Rcv raw: {Style.NORMAL}%d bytes{Style.RESET_ALL}" 144 | % len(ciphertext) 145 | ) 146 | 147 | # we may receive multiple messages in one noise message, we must decrypt 148 | # them separately 149 | remaining_length = len(ciphertext) 150 | decoded_msgs = [] 151 | 152 | while remaining_length > 0: 153 | raw = self.decrypt(ciphertext) 154 | msg_length = len(raw) 155 | 156 | decoded_msg = Message.from_frame(raw) 157 | decoded_msgs.append(decoded_msg) 158 | 159 | # noise overhead seems to be 18 bytes per message 160 | remaining_length = remaining_length - (msg_length + 18) 161 | # discard the message we decoded in this run of the while loop 162 | ciphertext = ciphertext[len(ciphertext) - (remaining_length) :] 163 | 164 | print( 165 | f"{Style.BRIGHT}{Fore.YELLOW}Msg rcv: {Style.NORMAL}%s{Style.RESET_ALL}" 166 | % decoded_msg 167 | ) 168 | 169 | return decoded_msgs 170 | 171 | 172 | class SignatureMessage: 173 | def __init__( 174 | self, raw_signature: bytes, noise_static_pubkey: bytes, is_localhost: bool 175 | ): 176 | if not is_localhost: 177 | self.authority_key = base58.b58decode_check(SLUSHPOOL_CA_PUBKEY) 178 | else: 179 | self.authority_key = base58.b58decode_check(SLUSHPOOL_CA_PUBKEY) 180 | 181 | self.noise_static_pubkey = noise_static_pubkey 182 | self.version = int.from_bytes(raw_signature[0:2], byteorder="little") 183 | self.valid_from = int.from_bytes(raw_signature[2:6], byteorder="little") 184 | self.not_valid_after = int.from_bytes(raw_signature[6:10], byteorder="little") 185 | signature_length = int.from_bytes(raw_signature[10:12], byteorder="little") 186 | self.signature = bytes(raw_signature[12 : 12 + signature_length]) 187 | 188 | def __serialize_for_verification(self): 189 | buffer = self.version.to_bytes(2, byteorder="little") 190 | buffer += self.valid_from.to_bytes(4, byteorder="little") 191 | buffer += self.not_valid_after.to_bytes(4, byteorder="little") 192 | buffer += len(self.noise_static_pubkey).to_bytes(2, byteorder="little") 193 | buffer += self.noise_static_pubkey 194 | buffer += len(self.authority_key).to_bytes(2, byteorder="little") 195 | buffer += self.authority_key 196 | return bytes(buffer) 197 | 198 | def verify(self): 199 | pool_pubkey = ed25519.VerifyingKey(self.authority_key) 200 | message = self.__serialize_for_verification() 201 | pool_pubkey.verify(self.signature, message) 202 | assert int(time.time()) < self.not_valid_after, "Expired certificate" 203 | -------------------------------------------------------------------------------- /primitives/hashrate_meter.py: -------------------------------------------------------------------------------- 1 | """ 2 | this class estimates miner speed from reported shares 3 | implemented using rolling time window 4 | the HashrateMeter.roll method is called automatically each 5 seconds by default (granularity = 5) 5 | """ 6 | import time 7 | 8 | import numpy as np 9 | import simpy 10 | 11 | 12 | class HashrateMeter(object): 13 | def __init__( 14 | self, 15 | window_size: int = 60, 16 | granularity: int = 5, 17 | auto_hold_threshold=None, 18 | ): 19 | self.time_started = ( 20 | self.get_time() 21 | ) # was originally zero, as simpy starts from 0 22 | self.window_size = window_size 23 | self.granularity = granularity 24 | self.pow_buffer = np.zeros(self.window_size // self.granularity) 25 | self.submit_buffer = np.zeros(self.window_size // self.granularity) 26 | self.frozen_time_buffer = np.zeros(self.window_size // self.granularity) 27 | self.roll_proc = None 28 | # self.roll_proc = env.process(self.roll()) 29 | self.auto_hold_threshold = auto_hold_threshold 30 | self.on_hold = False 31 | self.put_on_hold_proc = None 32 | 33 | def get_time(self): 34 | return int(time.time()) 35 | 36 | def reset(self, time_started): 37 | self.pow_buffer = np.zeros(self.window_size // self.granularity) 38 | self.submit_buffer = np.zeros(self.window_size // self.granularity) 39 | self.frozen_time_buffer = np.zeros(self.window_size // self.granularity) 40 | self.time_started = time_started 41 | if self.put_on_hold_proc: 42 | self.put_on_hold_proc.interrupt() # terminate the current auto-on-hold process if exists 43 | 44 | def roll(self): 45 | while True: 46 | try: 47 | yield self.env.timeout(self.granularity) 48 | if not self.on_hold: 49 | self.pow_buffer = np.roll(self.pow_buffer, 1) 50 | self.pow_buffer[0] = 0 51 | self.submit_buffer = np.roll(self.submit_buffer, 1) 52 | self.submit_buffer[0] = 0 53 | self.frozen_time_buffer = np.roll(self.frozen_time_buffer, 1) 54 | self.frozen_time_buffer[0] = 0 55 | else: 56 | self.frozen_time_buffer[0] += self.granularity 57 | except simpy.Interrupt: 58 | break 59 | 60 | def on_hold_after_timeout(self): 61 | try: 62 | yield self.env.timeout(self.auto_hold_threshold) 63 | self.on_hold = True 64 | self.put_on_hold_proc = None 65 | except simpy.Interrupt: 66 | pass # do nothing 67 | 68 | def measure(self, share_diff: int): 69 | """Account for the shares 70 | 71 | TODO: consider changing the interface to accept the difficulty target directly 72 | """ 73 | self.pow_buffer[0] += share_diff 74 | self.submit_buffer[0] += 1 75 | self.on_hold = False # reset frozen status whenever a share is submitted 76 | if self.auto_hold_threshold: 77 | if self.put_on_hold_proc: 78 | self.put_on_hold_proc.interrupt() # terminate the current auto-on-hold process if exists 79 | self.put_on_hold_proc = self.env.process( 80 | self.on_hold_after_timeout() 81 | ) # will trigger after the threshold 82 | 83 | def get_speed(self): 84 | total_time_held = np.sum(self.frozen_time_buffer) 85 | time_elapsed = self.get_time() - self.time_started - total_time_held 86 | if time_elapsed > self.window_size: 87 | time_elapsed = self.window_size 88 | total_work = np.sum(self.pow_buffer) 89 | if time_elapsed < 1 or total_work == 0: 90 | return None 91 | 92 | return total_work * 4.294967296 / time_elapsed 93 | 94 | # time_elapsed = self.env.now - self.time_started - total_time_held 95 | # if time_elapsed > self.window_size: 96 | # time_elapsed = self.window_size 97 | # total_work = np.sum(self.pow_buffer) 98 | # if time_elapsed < 1 or total_work == 0: 99 | # return None 100 | 101 | # return total_work * 4.294967296 / time_elapsed 102 | 103 | def get_submit_per_secs(self): 104 | total_time_held = np.sum(self.frozen_time_buffer) 105 | time_elapsed = self.env.now - self.time_started - total_time_held 106 | if time_elapsed < 1: 107 | return None 108 | elif time_elapsed > self.window_size: 109 | time_elapsed = self.window_size 110 | return np.sum(self.submit_buffer) / time_elapsed 111 | 112 | def is_on_hold(self): 113 | return self.on_hold 114 | 115 | def terminate(self): 116 | self.roll_proc.interrupt() 117 | if self.put_on_hold_proc: 118 | self.put_on_hold_proc.interrupt() # terminate the current auto-on-hold process if exists 119 | -------------------------------------------------------------------------------- /primitives/message_types.py: -------------------------------------------------------------------------------- 1 | import array 2 | import binascii 3 | import ctypes 4 | import struct 5 | 6 | 7 | def BOOL(bool): 8 | 9 | if bool: 10 | bool = True 11 | else: 12 | bool = False 13 | 14 | s = struct.Struct("<" + " ?") 15 | b = ctypes.create_string_buffer(1) 16 | s.pack_into(b, 0, bool) 17 | 18 | return b.raw 19 | 20 | 21 | def U8(inter): 22 | 23 | assert type(inter) is int, "U8: not integer" 24 | 25 | if inter >= 2 ** 8: 26 | raise Exception("Overflow") 27 | 28 | return (inter).to_bytes(1, byteorder="little") 29 | 30 | 31 | def U16(inter): 32 | 33 | assert type(inter) is int, "U16: not integer" 34 | 35 | if inter >= 2 ** 16: 36 | raise Exception("Overflow") 37 | 38 | return (inter).to_bytes(2, byteorder="little") 39 | 40 | 41 | def U24(inter): 42 | 43 | assert type(inter) is int, "U24: not integer" 44 | 45 | if inter >= 2 ** 24: 46 | raise Exception("Overflow") 47 | 48 | return (inter).to_bytes(3, byteorder="little") 49 | 50 | 51 | def U32(inter): 52 | assert type(inter) is int, "U32: not integer" 53 | 54 | if inter >= 2 ** 32: 55 | raise Exception("Overflow") 56 | 57 | return (inter).to_bytes(4, byteorder="little") 58 | 59 | 60 | def F32(inter): 61 | 62 | assert type(inter) is float, "F32: not float" 63 | 64 | # little endian 65 | return struct.pack("= 2 ** 64: 73 | raise Exception("Overflow") 74 | 75 | return (inter).to_bytes(8, byteorder="little") 76 | 77 | 78 | def U256(inter): 79 | if type(inter) is bytes: 80 | return inter 81 | 82 | assert type(inter) is int, "U256: not integer" 83 | 84 | if inter >= 2 ** 256: 85 | raise Exception("Overflow") 86 | 87 | return (inter).to_bytes(32, byteorder="little") 88 | 89 | 90 | def STR0_255(string): 91 | 92 | assert type(string) is str, "STR0_255: not string" 93 | 94 | length = string.__len__() 95 | 96 | if length not in range(0, 2 ** 8): 97 | raise Exception("Overflow") 98 | 99 | s = struct.Struct("<" + " " + str(length) + "s") 100 | 101 | b = ctypes.create_string_buffer(length) 102 | 103 | s.pack_into(b, 0, string.encode("utf-8")) 104 | 105 | return U8(length) + b.raw 106 | 107 | 108 | def B0_32(_bytes): 109 | assert type(_bytes) is bytes, "B0_32: not bytes" 110 | length = _bytes.__len__() 111 | if length not in range(0, 2 ** 8): 112 | raise Exception("Overflow") 113 | 114 | return U8(length) + _bytes 115 | 116 | 117 | def B0_255(_bytes): 118 | assert type(_bytes) is bytes, "B0_255: not bytes" 119 | 120 | length = _bytes.__len__() 121 | 122 | if length not in range(0, 2 ** 8): 123 | raise Exception("Overflow") 124 | 125 | return U8(length) + _bytes 126 | 127 | 128 | def B0_64K(_bytes): 129 | assert type(_bytes) is bytes, "B0_64K: not bytes" 130 | 131 | length = _bytes.__len__() 132 | 133 | if length not in range(0, 2 ** 16): 134 | raise Exception("Overflow") 135 | 136 | return U16(length) + _bytes 137 | 138 | 139 | def B0_16M(_bytes): 140 | assert type(_bytes) is bytes, "B0_16M: not bytes" 141 | 142 | length = _bytes.__len__() 143 | 144 | if length not in range(0, 2 ** 24): 145 | raise Exception("Overflow") 146 | 147 | return U24(length) + _bytes 148 | 149 | 150 | def BYTES(_bytes): 151 | assert type(_bytes) is bytes, "BYTES: not bytes" 152 | 153 | return _bytes 154 | 155 | 156 | def PUBKEY(pubKey): 157 | return 158 | 159 | 160 | def SEQ0_255(): 161 | return 162 | 163 | 164 | def SEQ0_64K(): 165 | return 166 | 167 | 168 | """def msgTypesConverter(message_type,channel_msg_bit): 169 | #just to make the task easier (copy from spec) 170 | 171 | assert (channel_msg_bit==0 or channel_msg_bit==1) 172 | if channel_msg_bit == 1: 173 | channel_msg_bit = 0b10000000 174 | 175 | result = message_type | channel_msg_bit 176 | 177 | return result""" 178 | 179 | 180 | def FRAME(extension_type, msg_type_name, payload): 181 | 182 | msg_type_list = { 183 | "SetupConnection": [0x00, 0], 184 | "SetupConnectionSuccess": [0x01, 0], 185 | "SetupConnectionError": [0x02, 0], 186 | "ChannelEndpointChanged": [0x03, 1], 187 | "OpenStandardMiningChannel": [0x10, 0], 188 | "OpenStandardMiningChannelSuccess": [0x11, 0], 189 | "OpenStandardMiningChannelError": [0x12, 0], 190 | "OpenExtendedMiningChannel": [0x13, 0], 191 | "OpenExtendedMiningChannelSuccess": [0x14, 0], 192 | "OpenExtendedMiningChannelError": [0x15, 0], 193 | "UpdateChannel": [0x16, 1], 194 | "UpdateChannelError": [0x17, 1], 195 | "CloseChannel": [0x18, 1], 196 | "SetExtranoncePrefix": [0x19, 1], 197 | "SubmitSharesStandard": [0x1A, 1], 198 | "SubmitSharesExtended": [0x1B, 1], 199 | "SubmitSharesSuccess": [0x1C, 1], 200 | "SubmitSharesError": [0x1D, 1], 201 | "NewMiningJob": [0x1E, 1], 202 | "NewExtendedMiningJob": [0x1F, 1], 203 | "SetNewPrevHash": [0x20, 1], 204 | "SetTarget": [0x21, 1], 205 | "SetCustomMiningJob": [0x22, 0], 206 | "SetCustomMiningJobSuccess": [0x23, 0], 207 | "SetCustomMiningJobError": [0x24, 0], 208 | "Reconnect": [0x25, 0], 209 | "SetGroupChannel": [0x26, 0], 210 | "AllocateMiningJobToken": [0x50, 0], 211 | "AllocateMiningJobTokenSuccess": [0x51, 0], 212 | "AllocateMiningJobTokenError": [0x52, 0], 213 | "IdentifyTransactions": [0x53, 0], 214 | "IdentifyTransactionsSuccess": [0x54, 0], 215 | "ProvideMissingTransactions": [0x55, 0], 216 | "ProvideMissingTransactionsSuccess": [0x56, 0], 217 | "CoinbaseOutputDataSize": [0x70, 0], 218 | "NewTemplate": [0x71, 0], 219 | "SetNewPrevHashTDP": [0x72, 0], 220 | "RequestTransactionData": [0x73, 0], 221 | "RequestTransactionDataSuccess": [0x74, 0], 222 | "RequestTransactionDataError": [0x75, 0], 223 | "SubmitSolution": [0x76, 0], 224 | } 225 | msg_type_pair = msg_type_list[msg_type_name] 226 | 227 | msg_type = msg_type_pair[0] # msgTypesConverter(msg_type_pair[0],msg_type_pair[1]) 228 | 229 | extension_type = extension_type 230 | 231 | channel_msg_bit = msg_type_pair[1] 232 | 233 | assert channel_msg_bit == 0 or channel_msg_bit == 1 234 | if channel_msg_bit == 1: 235 | channel_msg_bit = 0b10000000 236 | 237 | extension_type = extension_type | channel_msg_bit 238 | 239 | msg_length = payload.__len__() 240 | 241 | return U16(extension_type) + U8(msg_type) + U24(msg_length) + BYTES(payload) 242 | 243 | 244 | def parse_bytes_to_int(frame, *args): 245 | 246 | # if just une argument, take the byte or bytes 247 | # if 2 arguments, first is the start and second is the end 248 | 249 | if len(args) == 1: 250 | end = args[0] 251 | start = args[0] 252 | elif len(args) == 2: 253 | start = args[0] 254 | end = args[1] 255 | else: 256 | start = 0 257 | end = frame.__len__() 258 | # raise Exception("Missing Arguments") 259 | 260 | data = int.from_bytes(frame[start:end], byteorder="little") 261 | return data 262 | -------------------------------------------------------------------------------- /primitives/messages.py: -------------------------------------------------------------------------------- 1 | # see https://github.com/stratumv2/stratumv2/blob/master/messages.py it has some parsing already 2 | 3 | import typing 4 | from abc import ABC, abstractmethod 5 | 6 | import stringcase 7 | 8 | from .message_types import * 9 | 10 | 11 | class Message: 12 | """Generic message that accepts visitors and dispatches their processing.""" 13 | 14 | class VisitorMethodNotImplemented(Exception): 15 | """Custom handling to report if visitor method is missing""" 16 | 17 | def __init__(self, method_name): 18 | self.method_name = method_name 19 | 20 | def __str__(self): 21 | return self.method_name 22 | 23 | def __init__(self, req_id=None): 24 | self.req_id = req_id 25 | 26 | def accept(self, visitor): 27 | """Call visitor method based on the actual message type.""" 28 | method_name = "visit_{}".format(stringcase.snakecase(type(self).__name__)) 29 | 30 | try: 31 | visit_method = getattr(visitor, method_name) 32 | except AttributeError: 33 | raise self.VisitorMethodNotImplemented(method_name) 34 | 35 | visit_method(self) 36 | 37 | def _format(self, content): 38 | return "{}({})".format(type(self).__name__, content) 39 | 40 | def to_frame(self): 41 | payload = self.to_bytes() 42 | # self.__class__.__name__ will return the derived class name 43 | frame = FRAME(0x0, self.__class__.__name__, payload) 44 | return frame 45 | 46 | # accepts an already decrypted message 47 | @staticmethod 48 | def from_frame(raw: bytes): 49 | extension_type = raw[0:1] 50 | msg_type = raw[2] # U8 51 | msg_length = raw[3:5] # U24 52 | raw = raw[6:] # remove the common bytes 53 | 54 | msg_class = msg_type_class_map[msg_type] 55 | decoded_msg = msg_class.from_bytes(raw) 56 | return decoded_msg 57 | 58 | @abstractmethod 59 | def to_bytes(self): 60 | pass 61 | 62 | @abstractmethod 63 | def from_bytes(self): 64 | pass 65 | 66 | 67 | class ChannelMessage(Message): 68 | """Message specific for a channel identified by its channel_id""" 69 | 70 | def __init__(self, channel_id: int, *args, **kwargs): 71 | self.channel_id = channel_id 72 | super().__init__(*args, **kwargs) 73 | 74 | 75 | # Initiates the connection. This MUST be the first message sent by the client on the newly 76 | # opened connection. Server MUST respond with either a SetupConnection.Success or 77 | # SetupConnection.Error message. Clients that are not configured to provide telemetry data 78 | # to the upstream node SHOULD set device_id to 0-length strings. However, they MUST always 79 | # set vendor to a string describing the manufacturer/developer and firmware version and 80 | # SHOULD always set hardware_version to a string describing, at least, the particular 81 | # hardware/software package in use. 82 | class SetupConnection(Message): 83 | def __init__( 84 | self, 85 | protocol: int, 86 | max_version: int, 87 | min_version: int, 88 | flags: int, 89 | endpoint_host: str, 90 | endpoint_port: int, 91 | vendor: str, 92 | hardware_version: str, 93 | firmware: str, 94 | device_id: str = "", 95 | ): 96 | # 0 = Mining Protocol 97 | # 1 = Job Negotiation Protocol 98 | # 2 = Template Distribution Protocol 99 | # 3 = Job Distribution Protocol 100 | self.protocol = protocol 101 | 102 | # The minimum protocol version the client supports (currently must be 2). 103 | self.max_version = max_version 104 | 105 | # The maximum protocol version the client supports (currently must be 2). 106 | self.min_version = min_version 107 | 108 | # Flags indicating optional protocol features the client supports. Each protocol 109 | # from protocol field has its own values/flags. 110 | self.flags = flags 111 | 112 | # ASCII text indicating the hostname or IP address. 113 | self.endpoint_host = endpoint_host 114 | self.endpoint_port = endpoint_port 115 | # Device information 116 | self.vendor = vendor 117 | self.hardware_version = hardware_version 118 | self.firmware = firmware 119 | self.device_id = device_id 120 | super().__init__() 121 | 122 | def __str__(self): 123 | return self._format( 124 | "protocol={}, max_version={}, min_version={}, flags={}, endpoint_host={}, endpoint_port={}, vendor={}, hardware_version={}, firmware={}, device_id={}".format( 125 | self.protocol, 126 | self.max_version, 127 | self.min_version, 128 | self.flags, 129 | self.endpoint_host, 130 | self.endpoint_port, 131 | self.vendor, 132 | self.hardware_version, 133 | self.firmware, 134 | self.device_id, 135 | ) 136 | ) 137 | 138 | def to_bytes(self): 139 | protocol = U8(self.protocol) 140 | min_version = U16(self.min_version) 141 | max_version = U16(self.max_version) 142 | flags = U32(self.flags) 143 | endpoint_host = STR0_255(self.endpoint_host) 144 | endpoint_port = U16(self.endpoint_port) 145 | vendor = STR0_255(self.vendor) 146 | hardware_version = STR0_255((self.hardware_version)) 147 | firmware = STR0_255(self.firmware) 148 | device_id = STR0_255(self.device_id) 149 | 150 | payload = ( 151 | protocol 152 | + min_version 153 | + max_version 154 | + flags 155 | + endpoint_host 156 | + endpoint_port 157 | + vendor 158 | + hardware_version 159 | + firmware 160 | + device_id 161 | ) 162 | return payload 163 | 164 | @staticmethod 165 | def from_bytes(bytes: bytearray): 166 | length_offset = 0 167 | 168 | protocol = bytes[0] # 1 byte 169 | min_version = int.from_bytes(bytes[1:3], byteorder="little") # 2 bytes 170 | max_version = int.from_bytes(bytes[3:5], byteorder="little") # 2 bytes 171 | flags = int.from_bytes(bytes[5:9], byteorder="little") # 4 bytes 172 | 173 | endpoint_length = bytes[9] 174 | endpoint_host = bytes[10 : 10 + endpoint_length].decode("utf-8") 175 | endpoint_port = int.from_bytes( 176 | bytes[10 + endpoint_length : 12 + endpoint_length], byteorder="little" 177 | ) 178 | length_offset += endpoint_length 179 | 180 | vendor_length = bytes[12 + length_offset] 181 | vendor = bytes[13 + length_offset : 13 + length_offset + vendor_length].decode( 182 | "utf-8" 183 | ) 184 | length_offset += vendor_length 185 | 186 | hardware_version_length = bytes[13 + length_offset] 187 | hardware_version = bytes[ 188 | 14 + length_offset : 14 + length_offset + hardware_version_length 189 | ].decode("utf-8") 190 | length_offset += hardware_version_length 191 | 192 | firmware_length = bytes[14 + length_offset] 193 | firmware = bytes[ 194 | 15 + length_offset : 15 + length_offset + firmware_length 195 | ].decode("utf-8") 196 | length_offset += firmware_length 197 | 198 | device_id_length = bytes[15 + length_offset] 199 | device_id = bytes[ 200 | 16 + length_offset : 16 + length_offset + device_id_length 201 | ].decode("utf-8") 202 | 203 | msg = SetupConnection( 204 | protocol=protocol, 205 | min_version=min_version, 206 | max_version=max_version, 207 | flags=flags, 208 | endpoint_host=endpoint_host, 209 | endpoint_port=endpoint_port, 210 | vendor=vendor, 211 | hardware_version=hardware_version, 212 | firmware=firmware, 213 | device_id=device_id, 214 | ) 215 | return msg 216 | 217 | 218 | # Response to SetupConnection message if the server accepts the connection. The client is 219 | # required to verify the set of feature flags that the server supports and act accordingly. 220 | class SetupConnectionSuccess(Message): 221 | def __init__(self, used_version: int, flags: int): 222 | # Selected version proposed by the connecting node that the upstream node supports. 223 | # This version will be used on the connection for the rest of its life. 224 | self.used_version = used_version 225 | 226 | # Flags indicating optional protocol features the server supports. Each protocol from 227 | # protocol field has its own values/flags. 228 | self.flags = flags 229 | super().__init__() 230 | 231 | def __str__(self): 232 | return self._format( 233 | "used_version={}, flags={}".format( 234 | self.used_version, 235 | self.flags, 236 | ) 237 | ) 238 | 239 | def to_bytes(self): 240 | used_version = U16(self.used_version) 241 | flags = U32(self.flags) 242 | payload = used_version + flags 243 | 244 | return payload 245 | 246 | @staticmethod 247 | def from_bytes(bytes: bytearray): 248 | used_version = int.from_bytes(bytes[0:2], byteorder="little") # 2 bytes 249 | flags = int.from_bytes(bytes[2:6], byteorder="little") # bytes 250 | 251 | msg = SetupConnectionSuccess( 252 | used_version=used_version, 253 | flags=flags, 254 | ) 255 | return msg 256 | 257 | 258 | # When protocol version negotiation fails (or there is another reason why 259 | # the upstream node cannot setup the connection) the server sends this 260 | # message with a particular error code prior to closing the connection 261 | class SetupConnectionError(Message): 262 | def __init__(self, flags: list, error_code: str): 263 | # Flags indicating features causing an error. 264 | self.flags = flags 265 | 266 | # Human-readable error code(s). See Error Codes section, below. 267 | self.error_code = error_code 268 | super().__init__() 269 | 270 | def __str__(self): 271 | return self._format( 272 | "flags={}, error_code={}".format(self.flags, self.error_code) 273 | ) 274 | 275 | def to_bytes(self): 276 | flags = U32(self.channel_id) 277 | error_code = STR0_255(self.error_code) 278 | 279 | payload = flags + error_code 280 | 281 | return payload 282 | 283 | @staticmethod 284 | def from_bytes(bytes: bytearray): 285 | flags = int.from_bytes(bytes[:4], byteorder="little") 286 | error_code_length = bytes[4] 287 | error_code = bytes[5 : 5 + error_code_length].decode("utf-8") 288 | 289 | msg = SetupConnectionError( 290 | flags=flags, 291 | error_code=error_code, 292 | ) 293 | return msg 294 | 295 | 296 | # This message requests to open a standard channel to the upstream node. 297 | # After receiving a SetupConnection.Success message, the client SHOULD respond by opening channels 298 | # on the connection. If no channels are opened within a reasonable period the server SHOULD close 299 | # the connection for inactivity. 300 | class OpenStandardMiningChannel(Message): 301 | def __init__( 302 | self, 303 | req_id: typing.Any, 304 | user_identity: str, 305 | nominal_hash_rate: int, 306 | max_target: int, 307 | ): 308 | # Unconstrained sequence of bytes. Whatever is needed by upstream node to identify/authenticate 309 | # the client, e.g. “braiinstest.worker1”. Additional restrictions can be imposed by the upstream 310 | # node (e.g. a pool). It is highly recommended that UTF-8 encoding is used. 311 | self.user_identity = user_identity 312 | 313 | # Expected hash rate of the device (or cumulative hashrate on the channel if multiple devices 314 | # are connected downstream) in h/s 315 | self.nominal_hash_rate = nominal_hash_rate 316 | 317 | # Maximum target which can be accepted by the connected device or devices. Server MUST accept 318 | # the target or respond by sending OpenMiningChannel.Error message. 319 | self.max_target = max_target 320 | self.new_job_class = NewMiningJob 321 | 322 | # req_id is Client-specified identifier for matching responses from upstream server. The value 323 | # MUST be connection-wide unique and is not interpreted by the server. 324 | super().__init__(req_id) 325 | 326 | def __str__(self): 327 | return self._format( 328 | "req_id={}, user_identity={}, nominal_hash_rate={}, max_target={}, new_job_class={}".format( 329 | self.req_id, 330 | self.user_identity, 331 | self.nominal_hash_rate, 332 | self.max_target, 333 | self.new_job_class, 334 | ) 335 | ) 336 | 337 | def to_bytes(self): 338 | req_id = U32(self.req_id) 339 | user_identity = STR0_255(self.user_identity) 340 | nominal_hash_rate = U32(self.nominal_hash_rate) 341 | max_target = U256(self.max_target) 342 | 343 | payload = req_id + user_identity + nominal_hash_rate + max_target 344 | 345 | return payload 346 | 347 | @staticmethod 348 | def from_bytes(bytes: bytearray): 349 | req_id = int.from_bytes(bytes[0:4], byteorder="little") 350 | 351 | l = bytes[4] 352 | 353 | user_identity = bytes[5 : 5 + l].decode("utf-8") 354 | nominal_hash_rate = int.from_bytes(bytes[5 + l : 5 + l + 4], byteorder="little") 355 | max_target = int.from_bytes( 356 | bytes[5 + l + 4 : 5 + l + 4 + 4], byteorder="little" 357 | ) 358 | 359 | msg = OpenStandardMiningChannel( 360 | req_id=req_id, 361 | user_identity=user_identity, 362 | nominal_hash_rate=nominal_hash_rate, 363 | max_target=max_target, 364 | ) 365 | return msg 366 | 367 | 368 | # Sent as a response for opening a standard channel 369 | class OpenStandardMiningChannelError(Message): 370 | def __init__(self, req_id, error_code: str): 371 | self.req_id = req_id 372 | self.error_code = error_code 373 | super().__init__(req_id) 374 | 375 | 376 | # Sent as a response for opening a standard channel, if successful. 377 | class OpenStandardMiningChannelSuccess(ChannelMessage): 378 | def __init__( 379 | self, 380 | req_id: typing.Any, 381 | channel_id: int, 382 | target: int, 383 | extranonce_prefix: bytes, 384 | group_channel_id: int, 385 | ): 386 | # Client-specified request ID from OpenStandardMiningChannel message, so that the client 387 | # can pair responses with open channel requests 388 | self.req_id = req_id 389 | 390 | # Initial target for the mining channel 391 | self.target = target 392 | 393 | # Newly assigned identifier of the channel, stable for the whole lifetime of the connection. 394 | # E.g. it is used for broadcasting new jobs by NewExtendedMiningJob 395 | self.channel_id = channel_id 396 | 397 | # Group channel into which the new channel belongs. See SetGroupChannel for details 398 | self.group_channel_id = group_channel_id 399 | 400 | # Bytes used as implicit first part of extranonce for the scenario when extended job is 401 | # served by the upstream node for a set of standard channels that belong to the same group. 402 | self.extranonce_prefix = extranonce_prefix 403 | super().__init__(channel_id=channel_id, req_id=req_id) 404 | 405 | def __str__(self): 406 | return self._format( 407 | "req_id={}, channel_id={}, target={}, extranonce_prefix={}, group_channel_id={}".format( 408 | self.req_id, 409 | self.channel_id, 410 | self.target, 411 | self.extranonce_prefix, 412 | self.group_channel_id, 413 | ) 414 | ) 415 | 416 | @staticmethod 417 | def from_bytes(bytes: bytearray): 418 | req_id = int.from_bytes(bytes[0:4], byteorder="little") 419 | channel_id = int.from_bytes(bytes[4:8], byteorder="little") # this is correct!! 420 | target = int.from_bytes(bytes[8 : 8 + 32], byteorder="little") 421 | 422 | l = bytes[40] 423 | 424 | extranonce_prefix = bytes[41 : 41 + l] 425 | group_channel_id = int.from_bytes(bytes[41 + l : 45 + l], byteorder="little") 426 | 427 | msg = OpenStandardMiningChannelSuccess( 428 | req_id=req_id, 429 | channel_id=channel_id, 430 | target=target, 431 | extranonce_prefix=extranonce_prefix, 432 | group_channel_id=group_channel_id, 433 | ) 434 | return msg 435 | 436 | def to_bytes(self): 437 | req_id = U32(self.req_id) 438 | channel_id = U32(self.channel_id) 439 | target = U256(self.target) 440 | extranonce_prefix = B0_32(self.extranonce_prefix) 441 | group_channel_id = U32(self.group_channel_id) 442 | 443 | payload = req_id + channel_id + target + extranonce_prefix + group_channel_id 444 | 445 | return payload 446 | 447 | 448 | # Changes downstream node’s extranonce prefix. It is applicable for all jobs sent 449 | # after this message on a given channel (both jobs provided by the upstream or jobs 450 | # introduced by SetCustomMiningJob message). This message is applicable only for 451 | # explicitly opened extended channels or standard channels (not group channels). 452 | class SetExtranoncePrefix(ChannelMessage): 453 | def __init__(self, channel_id: int, extranonce_prefix: bytes): 454 | # Bytes used as implicit first part of extranonce 455 | self.extranonce_prefix = extranonce_prefix 456 | 457 | # Extended or standard channel identifier 458 | super().__init__(channel_id=channel_id) 459 | 460 | 461 | # Client notifies the server about changes on the specified channel. 462 | # If a client performs device/connection aggregation (i.e. it is a proxy), 463 | # it MUST send this message when downstream channels change 464 | class UpdateChannel(ChannelMessage): 465 | def __init__(self, channel_id: int, nominal_hash_rate: float, maximum_target: int): 466 | self.nominal_hash_rate = nominal_hash_rate 467 | self.maximum_target = maximum_target 468 | super().__init__(channel_id=channel_id) 469 | 470 | 471 | # Sent only when UpdateChannel message is invalid. When it is accepted by the 472 | # server, no response is sent back. 473 | class UpdateChannelError(ChannelMessage): 474 | def __init__(self, channel_id: int, error_code: str): 475 | self.error_code = error_code 476 | super().__init__(channel_id=channel_id) 477 | 478 | 479 | # Client -> Server, Server -> Client 480 | # Client sends this message when it ends its operation. The server MUST stop sending 481 | # messages for the channel. A proxy MUST send this message on behalf of all opened 482 | # channels from a downstream connection in case of downstream connection closure. 483 | class CloseChannel(ChannelMessage): 484 | def __init__(self, channel_id: int, reason_code: str): 485 | self.reason_code = reason_code 486 | super().__init__(channel_id=channel_id) 487 | 488 | 489 | # Client -> Server 490 | # Client sends result of its hashing work to the server 491 | class SubmitSharesStandard(ChannelMessage): 492 | def __init__( 493 | self, 494 | channel_id: int, 495 | sequence_number: int, 496 | job_id: int, 497 | nonce: int, 498 | ntime: int, 499 | version: int, 500 | ): 501 | # Unique sequential identifier of the submit within the channel 502 | self.sequence_number = sequence_number 503 | 504 | # Identifier of the job as provided by NewMiningJob or NewExtendedMiningJob message 505 | self.job_id = job_id 506 | 507 | # Nonce leading to the hash being submitted 508 | self.nonce = nonce 509 | 510 | # The nTime field in the block header. This MUST be greater than or equal to the 511 | # header_timestamp field in the latest SetNewPrevHash message and lower than or 512 | # equal to that value plus the number of seconds since the receipt of that message. 513 | self.ntime = ntime 514 | 515 | # Full nVersion field 516 | self.version = version 517 | super().__init__(channel_id) 518 | 519 | def __str__(self): 520 | return self._format( 521 | "channel_id={}, job_id={}".format(self.channel_id, self.job_id) 522 | ) 523 | 524 | def to_bytes(self): 525 | channel_id = U32(self.channel_id) 526 | sequence_number = U32(self.sequence_number) 527 | job_id = U32(self.job_id) 528 | nonce = U32(self.nonce) 529 | ntime = U32(self.ntime) 530 | version = U32(self.version) 531 | 532 | payload = channel_id + sequence_number + job_id + nonce + ntime + version 533 | 534 | return payload 535 | 536 | @staticmethod 537 | def from_bytes(bytes: bytearray): 538 | channel_id = int.from_bytes(bytes[0:4], byteorder="little") 539 | sequence_number = int.from_bytes(bytes[4:8], byteorder="little") 540 | job_id = int.from_bytes(bytes[8:12], byteorder="little") 541 | nonce = int.from_bytes(bytes[12:16], byteorder="little") 542 | ntime = int.from_bytes(bytes[16:20], byteorder="little") 543 | version = int.from_bytes(bytes[20:24], byteorder="little") 544 | 545 | msg = SubmitSharesStandard( 546 | channel_id=channel_id, 547 | sequence_number=sequence_number, 548 | job_id=job_id, 549 | nonce=nonce, 550 | ntime=ntime, 551 | version=version, 552 | ) 553 | return msg 554 | 555 | 556 | # Response to SubmitShares or SubmitSharesExtended, accepting results from the miner. 557 | # Because it is a common case that shares submission is successful, this response can 558 | # be provided for multiple SubmitShare messages aggregated together. 559 | class SubmitSharesSuccess(ChannelMessage): 560 | def __init__( 561 | self, 562 | channel_id: int, 563 | last_sequence_number: int, 564 | new_submits_accepted_count: int, 565 | new_shares_sum: int, 566 | ): 567 | # Most recent sequence number with a correct result 568 | self.last_sequence_number = last_sequence_number 569 | 570 | # Count of new submits acknowledged within this batch 571 | self.new_submits_accepted_count = new_submits_accepted_count 572 | 573 | # Sum of shares acknowledged within this batch 574 | self.new_shares_sum = new_shares_sum 575 | super().__init__(channel_id) 576 | 577 | def __str__(self): 578 | return self._format( 579 | "channel_id={}, last_seq_num={}, accepted_submits={}, accepted_shares={}".format( 580 | self.channel_id, 581 | self.last_sequence_number, 582 | self.new_submits_accepted_count, 583 | self.new_shares_sum, 584 | ) 585 | ) 586 | 587 | def to_bytes(self): 588 | channel_id = U32(self.channel_id) 589 | last_sequence_number = U32(self.last_sequence_number) 590 | new_submits_accepted_count = U32(self.new_submits_accepted_count) 591 | new_shares_sum = U32(self.new_shares_sum) 592 | 593 | payload = ( 594 | channel_id 595 | + last_sequence_number 596 | + new_submits_accepted_count 597 | + new_shares_sum 598 | ) 599 | 600 | return payload 601 | 602 | @staticmethod 603 | def from_bytes(bytes: bytearray): 604 | channel_id = int.from_bytes(bytes[0:4], byteorder="little") 605 | last_sequence_number = int.from_bytes(bytes[4:8], byteorder="little") 606 | new_submits_accepted_count = int.from_bytes(bytes[8:12], byteorder="little") 607 | new_shares_sum = int.from_bytes(bytes[12:16], byteorder="little") 608 | 609 | msg = SubmitSharesSuccess( 610 | channel_id=channel_id, 611 | last_sequence_number=last_sequence_number, 612 | new_submits_accepted_count=new_submits_accepted_count, 613 | new_shares_sum=new_shares_sum, 614 | ) 615 | return msg 616 | 617 | 618 | class SubmitSharesError(ChannelMessage): 619 | def __init__(self, channel_id: int, sequence_number: int, error_code: str): 620 | self.sequence_number = sequence_number 621 | self.error_code = error_code 622 | super().__init__(channel_id) 623 | 624 | def __str__(self): 625 | return self._format( 626 | "channel_id={}, sequence_number={}, error_code={}".format( 627 | self.channel_id, self.sequence_number, self.error_code 628 | ) 629 | ) 630 | 631 | def to_bytes(self): 632 | channel_id = U32(self.channel_id) 633 | sequence_number = U32(self.sequence_number) 634 | error_code = STR0_255(self.error_code) 635 | 636 | payload = channel_id + sequence_number + error_code 637 | 638 | return payload 639 | 640 | @staticmethod 641 | def from_bytes(bytes: bytearray): 642 | channel_id = int.from_bytes(bytes[:4], byteorder="little") 643 | sequence_number = int.from_bytes(bytes[4:8], byteorder="little") 644 | error_code_length = bytes[8] 645 | error_code = bytes[9 : 9 + error_code_length].decode("utf-8") 646 | 647 | msg = SubmitSharesError( 648 | channel_id=channel_id, 649 | sequence_number=sequence_number, 650 | error_code=error_code, 651 | ) 652 | return msg 653 | 654 | 655 | # The server provides an updated mining job to the client through a standard channel. 656 | # If the future_job field is set to False, the client MUST start to mine on the new job 657 | # as soon as possible after receiving this message. 658 | class NewMiningJob(ChannelMessage): 659 | def __init__( 660 | self, 661 | channel_id: int, 662 | job_id: int, 663 | future_job: bool, 664 | version: int, 665 | merkle_root: bytes, 666 | ): 667 | # Server’s identification of the mining job. This identifier must be provided to 668 | # the server when shares are submitted later in the mining process. 669 | self.job_id = job_id 670 | 671 | # True if the job is intended for a future SetNewPrevHash message sent on this 672 | # channel. If False, the job relates to the last sent SetNewPrevHash message on 673 | # the channel and the miner should start to work on the job immediately. 674 | self.future_job = future_job 675 | 676 | # Valid version field that reflects the current network consensus. The general 677 | # purpose bits (as specified in BIP320) can be freely manipulated by the downstream 678 | # node. The downstream node MUST NOT rely on the upstream node to set the BIP320 679 | # bits to any particular value. 680 | self.version = version 681 | 682 | # Merkle root field as used in the bitcoin block header. 683 | self.merkle_root = merkle_root 684 | 685 | super().__init__(channel_id=channel_id) 686 | 687 | def __str__(self): 688 | return self._format( 689 | "channel_id={}, job_id={}, future_job={}, version={}, merkle_root={}".format( 690 | self.channel_id, 691 | self.job_id, 692 | self.future_job, 693 | self.version, 694 | self.merkle_root, 695 | ) 696 | ) 697 | 698 | def to_bytes(self): 699 | channel_id = U32(self.channel_id) 700 | job_id = U32(self.job_id) 701 | future_job = BOOL(self.future_job) 702 | version = U32(self.version) 703 | merkle_root = B0_32(self.merkle_root) 704 | 705 | payload = channel_id + job_id + future_job + version + merkle_root 706 | 707 | return payload 708 | 709 | @staticmethod 710 | def from_bytes(bytes: bytearray): 711 | channel_id = int.from_bytes(bytes[:4], byteorder="little") 712 | job_id = int.from_bytes(bytes[4:8], byteorder="little") 713 | future_job = bytes[8] == 1 714 | version = int.from_bytes(bytes[9:13], byteorder="little") 715 | merkle_root = bytes[13:45] 716 | 717 | msg = NewMiningJob( 718 | channel_id=channel_id, 719 | job_id=job_id, 720 | future_job=future_job, 721 | version=version, 722 | merkle_root=merkle_root, 723 | ) 724 | return msg 725 | 726 | 727 | # Server -> Client, broadcast 728 | # Prevhash is distributed whenever a new block is detected in the network by an upstream 729 | # node. This message MAY be shared by all downstream nodes (sent only once to each 730 | # channel group). Clients MUST immediately start to mine on the provided prevhash. When 731 | # a client receives this message, only the job referenced by Job ID is valid. The 732 | # remaining jobs already queued by the client have to be made invalid. 733 | class SetNewPrevHash(ChannelMessage): 734 | def __init__( 735 | self, channel_id: int, job_id: int, prev_hash: bytes, min_ntime: int, nbits: int 736 | ): 737 | # Group channel or channel that this prevhash is valid for 738 | self.channel_id = channel_id 739 | 740 | # ID of a job that is to be used for mining with this prevhash. A pool may have 741 | # provided multiple jobs for the next block height (e.g. an empty block or a block 742 | # with transactions that are complementary to the set of transactions present in 743 | # the current block template). 744 | self.job_id = job_id 745 | 746 | # Previous block’s hash, block header field 747 | self.prev_hash = prev_hash 748 | 749 | # Smallest nTime value available for hashing. 750 | self.min_ntime = min_ntime 751 | 752 | # Block header field 753 | self.nbits = nbits 754 | super().__init__(channel_id) 755 | 756 | def __str__(self): 757 | return self._format( 758 | "channel_id={}, job_id={}, prev_hash={}, min_ntime={}, nbits={}".format( 759 | self.channel_id, self.job_id, self.prev_hash, self.min_ntime, self.nbits 760 | ) 761 | ) 762 | 763 | def to_bytes(self): 764 | channel_id = U32(self.channel_id) 765 | job_id = U32(self.job_id) 766 | prev_hash = U256(self.prev_hash) 767 | min_ntime = U32(self.min_ntime) 768 | nbits = U32(self.nbits) 769 | 770 | payload = channel_id + job_id + prev_hash + min_ntime + nbits 771 | 772 | return payload 773 | 774 | @staticmethod 775 | def from_bytes(bytes: bytearray): 776 | channel_id = int.from_bytes(bytes[:4], byteorder="little") 777 | job_id = int.from_bytes(bytes[4:8], byteorder="little") 778 | prev_hash = bytes[8:40] 779 | min_ntime = int.from_bytes(bytes[40:44], byteorder="little") 780 | nbits = int.from_bytes(bytes[44:48], byteorder="little") 781 | 782 | msg = SetNewPrevHash( 783 | channel_id=channel_id, 784 | job_id=job_id, 785 | prev_hash=prev_hash, 786 | min_ntime=min_ntime, 787 | nbits=nbits, 788 | ) 789 | return msg 790 | 791 | 792 | # The server controls the submission rate by adjusting the difficulty target on a specified 793 | # channel. All submits leading to hashes higher than the specified target will be rejected 794 | # by the server. 795 | # Maximum target is valid until the next SetTarget message is sent and is applicable for all 796 | # jobs received on the channel in the future or already received with flag future_job=True. 797 | # The message is not applicable for already received jobs with future_job=False, as their 798 | # maximum target remains stable. 799 | class SetTarget(ChannelMessage): 800 | def __init__(self, channel_id: int, max_target: int): 801 | # Maximum value of produced hash that will be accepted by a server to accept shares; 802 | # numeric value that a hashed block header must be less than or equal to in order for 803 | # a new block to be awarded to a miner 804 | self.max_target = max_target 805 | super().__init__(channel_id=channel_id) 806 | 807 | def __str__(self): 808 | return self._format( 809 | "channel_id={}, max_target={}".format(self.channel_id, self.max_target) 810 | ) 811 | 812 | def to_bytes(self): 813 | channel_id = U32(self.channel_id) 814 | max_target = U256(self.max_target) 815 | 816 | payload = channel_id + max_target 817 | 818 | return payload 819 | 820 | @staticmethod 821 | def from_bytes(bytes: bytearray): 822 | channel_id = int.from_bytes(bytes[:4], byteorder="little") 823 | max_target = int.from_bytes(bytes[4 : 4 + 32], byteorder="little") 824 | 825 | msg = SetTarget( 826 | channel_id=channel_id, 827 | max_target=max_target, 828 | ) 829 | return msg 830 | 831 | 832 | class SetCustomMiningJob(ChannelMessage): 833 | def __init__( 834 | self, 835 | channel_id: int, 836 | request_id: int, 837 | mining_job_token: bytes, 838 | version: int, 839 | prev_hash: bytes, 840 | min_ntime: int, 841 | nbits: int, 842 | coinbase_tx_version: int, 843 | coinbase_prefix: bytes, 844 | coinbase_tx_input_nsequence: int, 845 | coinbase_tx_value_remaining: int, 846 | coinbase_tx_output: typing.Any, 847 | coinbase_tx_locktime: int, 848 | merkle_path: typing.Any, 849 | extranonce_size: int, 850 | future_job: bool, 851 | ): 852 | self.request_id = request_id 853 | self.mining_job_token = mining_job_token 854 | self.version = version 855 | self.prev_hash = prev_hash 856 | self.min_ntime = min_ntime 857 | self.nbits = nbits 858 | self.coinbase_tx_version = coinbase_tx_version 859 | self.coinbase_prefix = coinbase_prefix 860 | self.coinbase_tx_input_nsequence = coinbase_tx_input_nsequence 861 | self.coinbase_tx_value_remaining = coinbase_tx_value_remaining 862 | self.coinbase_tx_output = coinbase_tx_output 863 | self.coinbase_tx_locktime = coinbase_tx_locktime 864 | self.merkle_path = merkle_path 865 | self.extranonce_size = extranonce_size 866 | self.future_job = future_job 867 | super().__init__(channel_id=channel_id) 868 | 869 | 870 | class SetCustomMiningJobSuccess(ChannelMessage): 871 | def __init__( 872 | self, 873 | channel_id: int, 874 | request_id: int, 875 | job_id: int, 876 | coinbase_tx_prefix: bytes, 877 | coinbase_tx_suffix: bytes, 878 | ): 879 | self.request_id = request_id 880 | self.job_id = job_id 881 | self.coinbase_tx_prefix = coinbase_tx_prefix 882 | self.coinbase_tx_suffix = coinbase_tx_suffix 883 | super().__init__(channel_id=channel_id) 884 | 885 | 886 | class SetCustomMiningJobError(ChannelMessage): 887 | def __init__(self, channel_id: int, request_id: int, error_code: str): 888 | self.request_id = request_id 889 | self.error_code = error_code 890 | super().__init__(channel_id=channel_id) 891 | 892 | 893 | class Reconnect(Message): 894 | def __init__(self, new_host: str, new_port: int): 895 | self.new_host = new_host 896 | self.new_port = new_port 897 | super().__init__() 898 | 899 | 900 | class SetGroupChannel(Message): 901 | def __init__(self, group_channel_id: int, channel_ids: typing.List): 902 | self.group_channel_id = group_channel_id 903 | self.channel_ids = channel_ids 904 | super().__init__() 905 | 906 | 907 | # NOT USED 908 | # Extended and group channels only 909 | class NewExtendedMiningJob(ChannelMessage): 910 | def __init__( 911 | self, 912 | channel_id: int, 913 | job_id: int, 914 | future_job: bool, 915 | version: int, 916 | version_rolling_allowed: bool, 917 | merkle_path: bytes, # MerklePath, 918 | cb_prefix: bytes, # CoinBasePrefix, 919 | cb_suffix: bytes, # CoinBaseSuffix, 920 | ): 921 | self.job_id = job_id 922 | self.future_job = future_job 923 | self.version = version 924 | self.version_rolling_allowed = version_rolling_allowed 925 | self.merkle_path = merkle_path 926 | self.cb_prefix = cb_prefix 927 | self.cb_suffix = cb_suffix 928 | super().__init__(channel_id=channel_id) 929 | 930 | 931 | # NOT USED 932 | # Similar to OpenStandardMiningChannel but requests to open an extended channel instead 933 | # of standard channel 934 | class OpenExtendedMiningChannel(OpenStandardMiningChannel): 935 | def __init__(self, min_extranonce_size: int, *args, **kwargs): 936 | self.min_extranonce_size = min_extranonce_size 937 | self.new_job_class = NewExtendedMiningJob 938 | super().__init__(*args, **kwargs) 939 | 940 | 941 | # NOT USED 942 | # Sent as a response for opening an extended channel 943 | class OpenExtendedMiningChannelSuccess(ChannelMessage): 944 | def __init__( 945 | self, 946 | req_id, 947 | channel_id: int, 948 | target: int, 949 | extranonce_size: int, 950 | extranonce_prefix: bytes, 951 | ): 952 | self.target = target 953 | self.extranonce_prefix = extranonce_prefix 954 | self.extranonce_size = extranonce_size 955 | super().__init__(channel_id=channel_id, req_id=req_id) 956 | 957 | 958 | # NOT USED 959 | # Sent as a response for opening an extended channel 960 | class OpenMiningChannelError(Message): 961 | def __init__(self, req_id, error_code: str): 962 | self.req_id = req_id 963 | self.error_code = error_code 964 | super().__init__(req_id) 965 | 966 | 967 | # NOT USED 968 | # Only relevant for extended channels. The message is the same as SubmitShares, 969 | # with some additional fields 970 | class SubmitSharesExtended(SubmitSharesStandard): 971 | def __init__(self, extranonce, *args, **kwargs): 972 | self.extranonce = extranonce 973 | super().__init__(*args, **kwargs) 974 | 975 | 976 | class ChannelEndpointChanged(Message): 977 | pass 978 | 979 | 980 | msg_type_class_map = { 981 | 0x00: SetupConnection, 982 | 0x01: SetupConnectionSuccess, 983 | 0x02: SetupConnectionError, 984 | 0x03: ChannelEndpointChanged, 985 | 0x10: OpenStandardMiningChannel, 986 | 0x11: OpenStandardMiningChannelSuccess, 987 | 0x12: OpenStandardMiningChannelError, 988 | 0x1E: NewMiningJob, 989 | 0x21: SetTarget, 990 | 0x20: SetNewPrevHash, 991 | 0x1A: SubmitSharesStandard, 992 | 0x1C: SubmitSharesSuccess, 993 | 0x1D: SubmitSharesError, 994 | } 995 | -------------------------------------------------------------------------------- /primitives/miner.py: -------------------------------------------------------------------------------- 1 | import asyncio # new module 2 | import concurrent.futures 3 | import enum 4 | import math 5 | import time 6 | from hashlib import sha256 7 | 8 | import numpy as np 9 | import simpy 10 | from colorama import Fore, Style 11 | from event_bus import EventBus 12 | 13 | import primitives.coins as coins 14 | from primitives.connection import Connection 15 | from primitives.hashrate_meter import HashrateMeter 16 | from primitives.messages import ( 17 | NewMiningJob, 18 | OpenMiningChannelError, 19 | OpenStandardMiningChannel, 20 | OpenStandardMiningChannelSuccess, 21 | SetNewPrevHash, 22 | SetTarget, 23 | SetupConnection, 24 | SetupConnectionError, 25 | SetupConnectionSuccess, 26 | SubmitSharesError, 27 | SubmitSharesStandard, 28 | SubmitSharesSuccess, 29 | ) 30 | from primitives.protocol import ConnectionProcessor 31 | from primitives.session import MiningJob, MiningSession, PoolMiningChannel 32 | from primitives.types import DownstreamConnectionFlags, ProtocolType 33 | 34 | 35 | class Miner(ConnectionProcessor): 36 | class States(enum.Enum): 37 | INIT = 0 38 | CONNECTION_SETUP = 1 39 | 40 | def __init__( 41 | self, 42 | name: str, 43 | bus: EventBus, 44 | diff_1_target: int, 45 | device_information: dict, 46 | connection: Connection, 47 | *args, 48 | **kwargs, 49 | ): 50 | self.name = name 51 | self.bus = bus 52 | self.diff_1_target = diff_1_target 53 | self.device_information = device_information 54 | self.work_meter = HashrateMeter() 55 | self.mine_proc = None 56 | self.job_uid = None 57 | self.share_diff = None 58 | self.recv_loop_process = None 59 | 60 | self.state = self.States.INIT 61 | self.channel = None 62 | self.connection_config = None 63 | self.job = None 64 | self.is_mining = False 65 | 66 | super().__init__(self.name, self.bus, connection) 67 | 68 | def get_actual_speed(self): 69 | return self.device_information.get("speed_ghps") if self.is_mining else 0 70 | 71 | def _send_msg(self, msg): 72 | self.connection.send_msg(msg) 73 | 74 | def int_to_reverse_bytes(self, num: int, byteno: int): 75 | reverse_bytes = num.to_bytes(byteno, byteorder="little") 76 | return reverse_bytes 77 | 78 | # assemble header without nonce, so we can just append it 79 | def assemble_header( 80 | self, 81 | version: int, 82 | prev_hash: bytes, 83 | merkle_root: bytes, 84 | ntime: int, 85 | nbits: int, 86 | ): 87 | header = ( 88 | self.int_to_reverse_bytes(version, 4) 89 | + prev_hash # 32 bytes 90 | + merkle_root # 32 bytes 91 | + self.int_to_reverse_bytes(ntime, 4) 92 | + self.int_to_reverse_bytes(nbits, 4) 93 | ) 94 | return header 95 | 96 | def mine(self, job: MiningJob): 97 | share_diff = job.diff_target.to_difficulty() 98 | avg_time = share_diff * 4.294967296 / self.device_information.get("speed_ghps") 99 | 100 | # Report the current hashrate at the beginning when of mining 101 | self.__emit_hashrate_msg_on_bus(job, avg_time) 102 | 103 | nonce = 0 104 | min_hash = 0xFFFF << 224 105 | 106 | # version: from NewMiningJob message 107 | # prev_hash: from SetNewPrevHash message 108 | # merkle_root: from NewMiningJob message 109 | # ntime: from SetNewPrevHash message (min_ntime) 110 | # nbits: from SetNewPrevHash message 111 | # nonce: auto incremented value 112 | header_without_nonce = self.assemble_header( 113 | version=job.version, 114 | prev_hash=self.channel.session.prev_hash, 115 | merkle_root=job.merkle_root, 116 | ntime=self.channel.session.min_ntime, 117 | nbits=self.channel.session.nbits, 118 | ) 119 | 120 | job.started_at = int(time.time()) 121 | print("Max target:") 122 | print((0xFFFF << 208).to_bytes(32, byteorder="big").hex()) 123 | print("Curr target:") 124 | print( 125 | self.channel.session.curr_target.target.to_bytes(32, byteorder="big").hex() 126 | ) 127 | print("--------------") 128 | while not job.is_cancelled: 129 | # assemble the header 130 | full_header = header_without_nonce + self.int_to_reverse_bytes(nonce, 4) 131 | 132 | hash_bytes = sha256(sha256(full_header).digest()).digest() 133 | hash = int.from_bytes(hash_bytes, byteorder="little") 134 | 135 | if hash < min_hash: 136 | print(hash.to_bytes(32, byteorder="big").hex()) 137 | min_hash = hash 138 | 139 | if hash < self.channel.session.curr_target.target: 140 | self.__emit_aux_msg_on_bus("solution found for job {}".format(job.uid)) 141 | self.work_meter.measure(share_diff) 142 | self.__emit_hashrate_msg_on_bus(job, avg_time) 143 | self.submit_mining_solution(job) 144 | 145 | nonce += 1 146 | 147 | job.finished_at = int(time.time()) 148 | print( 149 | "Job duration: %d sec, nonce is at %d" % job.finished_at - job.started_at, 150 | nonce, 151 | ) 152 | 153 | async def connect_to_pool(self, connection: Connection): 154 | self.__emit_aux_msg_on_bus( 155 | "Connecting to pool {}:{}".format( 156 | connection.pool_host, connection.pool_port 157 | ) 158 | ) 159 | 160 | await connection.connect_to_pool() 161 | 162 | self.__emit_aux_msg_on_bus("Connected!") 163 | 164 | # Intializes MinerV2 instance 165 | self.setup_connection() 166 | 167 | def disconnect(self): 168 | self.__emit_aux_msg_on_bus("Disconnecting from pool") 169 | if self.mine_proc: 170 | self.mine_proc.interrupt() 171 | # Mining is shutdown, terminate any protocol message processing 172 | self.terminate() 173 | self.disconnect() 174 | self.miner = None 175 | 176 | def new_mining_session(self, diff_target: coins.Target): 177 | """Creates a new mining session""" 178 | session = MiningSession( 179 | name=self.name, 180 | bus=self.bus, 181 | # TODO remove once the backlinks are not needed 182 | owner=None, 183 | diff_target=diff_target, 184 | enable_vardiff=False, 185 | ) 186 | self.__emit_aux_msg_on_bus("NEW MINING SESSION ()".format(session)) 187 | return session 188 | 189 | def mine_on_new_job(self, job: MiningJob, flush_any_pending_work=True): 190 | """Start working on a new job 191 | 192 | TODO implement more advanced flush policy handling (e.g. wait for the current 193 | job to finish if flush_flush_any_pending_work is not required) 194 | """ 195 | # Interrupt the mining process for now 196 | if self.mine_proc is not None: 197 | self.job.is_cancelled = True 198 | self.mine_proc.cancel() 199 | # Restart the process with a new job 200 | self.job = job 201 | self.set_is_mining(True) 202 | 203 | # create the mining task for this job 204 | loop = asyncio.get_event_loop() 205 | executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) 206 | 207 | def m(): 208 | self.mine(job) 209 | 210 | task = loop.run_in_executor(executor, m) 211 | self.mine_proc = task 212 | 213 | def set_is_mining(self, is_mining): 214 | self.is_mining = is_mining 215 | 216 | def __emit_aux_msg_on_bus(self, msg: str): 217 | print( 218 | f"{Fore.BLUE}{Style.BRIGHT}%s: {Style.NORMAL}%s{Style.RESET_ALL}" 219 | % (self.name, msg) 220 | ) 221 | 222 | def __emit_hashrate_msg_on_bus(self, job: MiningJob, avg_share_time): 223 | """Reports hashrate statistics on the message bus 224 | 225 | :param job: current job that is being mined 226 | :return: 227 | """ 228 | self.__emit_aux_msg_on_bus( 229 | "mining with diff. {} | speed {} Gh/s | avg share time {} | job uid {}".format( 230 | job.diff_target.to_difficulty(), 231 | self.work_meter.get_speed(), 232 | avg_share_time, 233 | job.uid, 234 | ) 235 | ) 236 | 237 | def setup_connection(self): 238 | self.connection.send_msg( 239 | SetupConnection( 240 | protocol=ProtocolType.MINING_PROTOCOL, 241 | max_version=2, 242 | min_version=2, 243 | flags=0, # TODO: 244 | endpoint_host=self.connection.pool_host, 245 | endpoint_port=self.connection.pool_port, 246 | vendor=self.device_information.get("vendor", "unknown"), 247 | hardware_version=self.device_information.get( 248 | "hardware_version", "unknown" 249 | ), 250 | firmware=self.device_information.get("firmware", "unknown"), 251 | device_id=self.device_information.get("device_id", ""), 252 | ) 253 | ) 254 | 255 | class ConnectionConfig: 256 | """Stratum V2 connection configuration. 257 | 258 | For now, it is sufficient to record the SetupConnectionSuccess to have full 259 | connection configuration available. 260 | """ 261 | 262 | def __init__(self, msg: SetupConnectionSuccess): 263 | self.setup_msg = msg 264 | 265 | def _recv_msg(self): 266 | return self.connection.incoming.get() 267 | 268 | def disconnect(self): 269 | """Downstream node may initiate disconnect""" 270 | self.connection.disconnect() 271 | 272 | def _on_invalid_message(self, msg): 273 | pass 274 | 275 | def visit_setup_connection_success(self, msg: SetupConnectionSuccess): 276 | self.connection_config = self.ConnectionConfig(msg) 277 | self.state = self.States.CONNECTION_SETUP 278 | 279 | req = OpenStandardMiningChannel( 280 | req_id=1, 281 | user_identity=self.name, 282 | nominal_hash_rate=math.floor( 283 | self.device_information.get("speed_ghps") * 1e9 284 | ), 285 | # TODO: figure this out 286 | max_target=self.diff_1_target, 287 | ) 288 | # We expect a paired response to our open channel request 289 | self.send_request(req) 290 | 291 | def visit_setup_connection_error(self, msg: SetupConnectionError): 292 | """Setup connection has failed. 293 | 294 | TODO: consider implementing reconnection attempt with exponential backoff or 295 | something similar 296 | """ 297 | self._emit_protocol_msg_on_bus("Connection setup failed", msg) 298 | 299 | def visit_open_standard_mining_channel_success( 300 | self, msg: OpenStandardMiningChannelSuccess 301 | ): 302 | req = self.request_registry.pop(msg.req_id) 303 | 304 | if req is not None: 305 | session = self.new_mining_session( 306 | coins.Target(msg.target, self.diff_1_target) 307 | ) 308 | # TODO find some reasonable extraction of the channel configuration, for now, 309 | # we just retain the OpenMiningChannel and OpenMiningChannelSuccess message 310 | # pair that provides complete information 311 | self.channel = PoolMiningChannel( 312 | session=session, 313 | # cfg=(req, msg), 314 | cfg=msg, 315 | conn_uid=self.connection.uid, 316 | channel_id=msg.channel_id, 317 | ) 318 | session.run() 319 | else: 320 | self._emit_protocol_msg_on_bus( 321 | "Cannot find matching OpenMiningChannel request", msg 322 | ) 323 | 324 | def visit_open_extended_mining_channel_success( 325 | self, msg: OpenStandardMiningChannelSuccess 326 | ): 327 | pass 328 | 329 | def visit_open_mining_channel_error(self, msg: OpenMiningChannelError): 330 | req = self.request_registry.pop(msg.req_id) 331 | self._emit_protocol_msg_on_bus( 332 | "Open mining channel failed (orig request: {})".format(req), msg 333 | ) 334 | 335 | def visit_set_target(self, msg: SetTarget): 336 | if self.__is_channel_valid(msg): 337 | self.channel.session.set_target( 338 | coins.Target( 339 | msg.max_target, self.channel.session.curr_diff_target.diff_1_target 340 | ) 341 | ) 342 | 343 | def visit_set_new_prev_hash(self, msg: SetNewPrevHash): 344 | if self.__is_channel_valid(msg): 345 | if self.channel.session.job_registry.contains(msg.job_id): 346 | job = self.channel.session.job_registry.get_job(msg.job_id) 347 | # retire all other jobs, as only the referenced job is valid 348 | self.channel.session.job_registry.retire_all_jobs() 349 | self.channel.session.set_prev_hash(msg) 350 | 351 | self.mine_on_new_job( 352 | job=job, 353 | flush_any_pending_work=True, 354 | ) 355 | 356 | def visit_new_mining_job(self, msg: NewMiningJob): 357 | if self.__is_channel_valid(msg): 358 | # Prepare a new job with the current session difficulty target 359 | job = self.channel.session.new_mining_job( 360 | version=msg.version, merkle_root=msg.merkle_root, job_uid=msg.job_id 361 | ) 362 | # Schedule the job for mining 363 | if not msg.future_job: 364 | self.mine_on_new_job(job) 365 | 366 | def visit_submit_shares_success(self, msg: SubmitSharesSuccess): 367 | if self.__is_channel_valid(msg): 368 | self.channel.session.account_diff_shares(msg.new_shares_sum) 369 | 370 | def visit_submit_shares_error(self, msg: SubmitSharesError): 371 | if self.__is_channel_valid(msg): 372 | # TODO implement accounting for rejected shares 373 | pass 374 | # self.channel.session.account_rejected_shares(msg.new_shares_sum) 375 | 376 | def submit_mining_solution(self, job: MiningJob): 377 | """Callback from the physical miner that succesfully simulated mining some shares 378 | 379 | :param job: Job that the miner has been working on and found solution for it 380 | """ 381 | # TODO: seq_num is currently unused, we should use it for tracking 382 | # accepted/rejected shares 383 | self._send_msg( 384 | SubmitSharesStandard( 385 | channel_id=self.channel.id, 386 | sequence_number=0, # unique sequential identifier within the channel. 387 | job_id=job.uid, 388 | nonce=0, 389 | ntime=0, # self.env.now, 390 | version=0, # full nVersion field 391 | ) 392 | ) 393 | 394 | def _on_invalid_message(self, msg): 395 | self._emit_protocol_msg_on_bus("Received invalid message", msg) 396 | 397 | def __is_channel_valid(self, msg): 398 | """Validates channel referenced in the message is the open channel of the miner""" 399 | if self.channel is None: 400 | bus_error_msg = ( 401 | "Mining Channel not established yet, received channel " 402 | "message with channel ID({})".format(msg.channel_id) 403 | ) 404 | is_valid = False 405 | self._emit_protocol_msg_on_bus(bus_error_msg, msg) 406 | elif self.channel.channel_id != msg.channel_id: 407 | bus_error_msg = "Unknown channel (expected: {}, actual: {})".format( 408 | self.channel.id, msg.channel_id 409 | ) 410 | is_valid = False 411 | self._emit_protocol_msg_on_bus(bus_error_msg, msg) 412 | else: 413 | is_valid = True 414 | 415 | return is_valid 416 | -------------------------------------------------------------------------------- /primitives/mining_params.py: -------------------------------------------------------------------------------- 1 | """This module gathers mining parameters""" 2 | # this is a bitcoin constant, maximum possible target, 00000000ffff0000000000000000000000000000000000000000000000000000 3 | # (0xFFFF << 208).to_bytes(32, byteorder="big").hex() 4 | # larger target means lower difficulty, so this is the lowest possible target! 5 | diff_1_target = 0xFFFF << 208 6 | -------------------------------------------------------------------------------- /primitives/pool.py: -------------------------------------------------------------------------------- 1 | """Generic pool module""" 2 | import base64 3 | import hashlib 4 | import socket 5 | import time 6 | 7 | import numpy as np 8 | import simpy 9 | from colorama import Fore, Style 10 | from cryptography.hazmat.primitives.asymmetric import x25519 11 | from dissononce.cipher.chachapoly import ChaChaPolyCipher 12 | from dissononce.dh.x25519.x25519 import X25519DH 13 | from dissononce.hash.blake2s import Blake2sHash 14 | from dissononce.processing.handshakepatterns.interactive.NX import NXHandshakePattern 15 | from dissononce.processing.impl.cipherstate import CipherState 16 | from dissononce.processing.impl.handshakestate import HandshakeState 17 | from dissononce.processing.impl.symmetricstate import SymmetricState 18 | from event_bus import EventBus 19 | 20 | import primitives.coins as coins 21 | from primitives.connection import Connection 22 | from primitives.hashrate_meter import HashrateMeter 23 | from primitives.protocol import ConnectionProcessor 24 | 25 | """Stratum V2 pool implementation 26 | 27 | """ 28 | import asyncio 29 | import random 30 | from asyncio import StreamReader, StreamWriter 31 | 32 | import primitives.coins as coins 33 | from primitives.messages import * 34 | from primitives.protocol import ConnectionProcessor 35 | from primitives.session import ( 36 | ChannelRegistry, 37 | MiningChannel, 38 | MiningJob, 39 | MiningJobRegistry, 40 | MiningSession, 41 | PoolMiningChannel, 42 | ) 43 | from primitives.types import DownstreamConnectionFlags, UpstreamConnectionFlags 44 | 45 | 46 | class Pool(ConnectionProcessor): 47 | """Represents a generic mining pool. 48 | 49 | It handles connections and delegates work to actual protocol specific object 50 | 51 | The pool keeps statistics about: 52 | 53 | - accepted submits and shares: submit count and difficulty sum (shares) for valid 54 | solutions 55 | - stale submits and shares: submit count and difficulty sum (shares) for solutions 56 | that have been sent after new block is found 57 | - rejected submits: submit count of invalid submit attempts that don't refer any 58 | particular job 59 | """ 60 | 61 | meter_period = 60 62 | 63 | def __init__( 64 | self, 65 | name: str, 66 | bus: EventBus, 67 | default_target: coins.Target, 68 | extranonce2_size: int = 8, 69 | avg_pool_block_time: float = 60, 70 | enable_vardiff: bool = False, 71 | desired_submits_per_sec: float = 0.3, 72 | ): 73 | """ 74 | 75 | :type pool_v2: 76 | """ 77 | self.name = name 78 | self.bus = bus 79 | self.default_target = default_target 80 | self.extranonce2_size = extranonce2_size 81 | self.avg_pool_block_time = avg_pool_block_time 82 | 83 | # Prepare initial prevhash for the very first 84 | self.__generate_new_prev_hash() 85 | # Per connection message processors 86 | self.connection_processors = dict() 87 | 88 | self.meter_accepted = HashrateMeter() 89 | self.meter_rejected_stale = HashrateMeter() 90 | # self.meter_process = env.process(self.__pool_speed_meter()) 91 | self.meter_process = None 92 | self.enable_vardiff = enable_vardiff 93 | self.desired_submits_per_sec = desired_submits_per_sec 94 | 95 | self.extra_meters = [] 96 | 97 | self.accepted_submits = 0 98 | self.stale_submits = 0 99 | self.rejected_submits = 0 100 | 101 | self.accepted_shares = 0 102 | self.stale_shares = 0 103 | self._mining_channel_registry = None 104 | self.server = None 105 | 106 | async def client_connected_cb( 107 | self, client_reader: StreamReader, client_writer: StreamWriter 108 | ): 109 | print("Accepted client connection") 110 | 111 | # our_private = base64.b64decode('WAmgVYXkbT2bCtdcDwolI88/iVi/aV3/PHcUBTQSYmo=') 112 | # private = x25519.X25519PrivateKey.from_private_bytes(our_private) 113 | 114 | # prepare handshakestate objects for initiator and responder 115 | our_handshakestate = HandshakeState( 116 | SymmetricState( 117 | CipherState( 118 | # AESGCMCipher() 119 | ChaChaPolyCipher() # chacha20poly1305 120 | ), 121 | Blake2sHash(), 122 | ), 123 | X25519DH(), 124 | ) 125 | 126 | pool_s = X25519DH().generate_keypair() 127 | our_handshakestate.initialize(NXHandshakePattern(), False, b"", s=pool_s) 128 | 129 | # wait for empty message receive 130 | ciphertext = await client_reader.read(4096) 131 | frame, _ = Connection.unwrap(ciphertext) 132 | message_buffer = bytearray() 133 | our_handshakestate.read_message(frame, message_buffer) 134 | 135 | # when we do, respond 136 | ## in the buffer, there should be Signature Noise Message, but we 137 | ## obviously don't really know how to construct it, so we'll skip it for localhost 138 | message_buffer = bytearray() 139 | self.connection.cipherstates = our_handshakestate.write_message( 140 | b"", message_buffer 141 | ) 142 | self.connection.cipher_state = self.connection.cipherstates[1] 143 | self.connection.decrypt_cipher_state = self.connection.cipherstates[0] 144 | 145 | message_buffer = Connection.wrap(bytes(message_buffer)) 146 | num_sent = client_writer.write(message_buffer) 147 | 148 | self.connection.sock = (client_reader, client_writer) 149 | print("Handshake done!") 150 | 151 | # # create the POW task only after the client is connected 152 | # loop = asyncio.get_event_loop() 153 | # print("Beofre create") 154 | # task = loop.create_task(self.pow_update()) 155 | # print("After create") 156 | 157 | async def start_server(self): 158 | self.server = await asyncio.start_server( 159 | self.client_connected_cb, host="localhost", port=2000 160 | ) 161 | await self.server.serve_forever() 162 | 163 | async def make_handshake(self, connection: Connection): 164 | self.connection = connection 165 | self._mining_channel_registry = ChannelRegistry(connection.uid) 166 | 167 | def reset_stats(self): 168 | self.accepted_submits = 0 169 | self.stale_submits = 0 170 | self.rejected_submits = 0 171 | self.accepted_shares = 0 172 | self.stale_shares = 0 173 | 174 | def disconnect(self, connection: Connection): 175 | if connection.uid not in self.connection_processors: 176 | return 177 | self.connection_processors[connection.uid].terminate() 178 | del self.connection_processors[connection.uid] 179 | 180 | def new_mining_session(self, owner, on_vardiff_change, clz=MiningSession): 181 | """Creates a new mining session""" 182 | session = clz( 183 | name=self.name, 184 | bus=self.bus, 185 | owner=owner, 186 | diff_target=self.default_target, 187 | enable_vardiff=self.enable_vardiff, 188 | vardiff_time_window=self.meter_accepted.window_size, 189 | vardiff_desired_submits_per_sec=self.desired_submits_per_sec, 190 | on_vardiff_change=on_vardiff_change, 191 | ) 192 | self.__emit_aux_msg_on_bus("NEW MINING SESSION ()".format(session)) 193 | 194 | return session 195 | 196 | def add_extra_meter(self, meter: HashrateMeter): 197 | self.extra_meters.append(meter) 198 | 199 | def account_accepted_shares(self, diff_target: coins.Target): 200 | self.accepted_submits += 1 201 | self.accepted_shares += diff_target.to_difficulty() 202 | self.meter_accepted.measure(diff_target.to_difficulty()) 203 | 204 | def account_stale_shares(self, diff_target: coins.Target): 205 | self.stale_submits += 1 206 | self.stale_shares += diff_target.to_difficulty() 207 | self.meter_rejected_stale.measure(diff_target.to_difficulty()) 208 | 209 | def account_rejected_submits(self): 210 | self.rejected_submits += 1 211 | 212 | def process_submit( 213 | self, submit_job_uid, session: MiningSession, on_accept, on_reject 214 | ): 215 | if session.job_registry.contains(submit_job_uid): 216 | diff_target = session.job_registry.get_job_diff_target(submit_job_uid) 217 | # Global accounting 218 | self.account_accepted_shares(diff_target) 219 | # Per session accounting 220 | session.account_diff_shares(diff_target.to_difficulty()) 221 | on_accept(diff_target) 222 | elif session.job_registry.contains_invalid(submit_job_uid): 223 | diff_target = session.job_registry.get_invalid_job_diff_target( 224 | submit_job_uid 225 | ) 226 | self.account_stale_shares(diff_target) 227 | on_reject(diff_target) 228 | else: 229 | self.account_rejected_submits() 230 | on_reject(None) 231 | 232 | async def pow_update(self): 233 | """This process simulates finding new blocks based on pool's hashrate""" 234 | while True: 235 | if not self.connection.sock: 236 | await asyncio.sleep(5) 237 | continue 238 | 239 | self.__generate_new_prev_hash() 240 | 241 | self.__emit_aux_msg_on_bus("NEW_BLOCK: {}".format(self.prev_hash.hex())) 242 | 243 | for connection_processor in self.connection_processors.values(): 244 | connection_processor.on_new_block() 245 | 246 | await asyncio.sleep(5) 247 | 248 | def __generate_new_prev_hash(self): 249 | """Generates a new prevhash based on current time.""" 250 | # TODO: this is not very precise as to events that would trigger this method in 251 | # the same second would yield the same prev hash value, we should consider 252 | # specifying prev hash as a simple sequence number 253 | self.prev_hash = hashlib.sha256(bytes(random.randint(0, 16777216))).digest() 254 | 255 | def __pool_speed_meter(self): 256 | while True: 257 | # yield self.env.timeout(self.meter_period) 258 | speed = self.meter_accepted.get_speed() 259 | submit_speed = self.meter_accepted.get_submit_per_secs() 260 | if speed is None or submit_speed is None: 261 | self.__emit_aux_msg_on_bus("SPEED: N/A Gh/s, N/A submits/s") 262 | else: 263 | self.__emit_aux_msg_on_bus( 264 | "SPEED: {0:.2f} Gh/s, {1:.4f} submits/s".format(speed, submit_speed) 265 | ) 266 | 267 | def __emit_aux_msg_on_bus(self, msg: str): 268 | print( 269 | f"{Fore.BLUE}{Style.BRIGHT}%s: {Style.NORMAL}%s{Style.RESET_ALL}" 270 | % (self.name, msg) 271 | ) 272 | 273 | def _send_msg(self, msg): 274 | self.connection.send_msg(msg) 275 | 276 | def _recv_msg(self): 277 | return self.connection.outgoing.get() 278 | 279 | def terminate(self): 280 | super().terminate() 281 | for channel in self._mining_channel_registry.channels: 282 | channel.terminate() 283 | 284 | def _on_invalid_message(self, msg): 285 | """Ignore any unrecognized messages. 286 | 287 | TODO-DOC: define protocol handling of unrecognized messages 288 | """ 289 | pass 290 | 291 | def visit_setup_connection(self, msg: SetupConnection): 292 | # response_flags = set() 293 | 294 | # arbitrary for now 295 | # if DownstreamConnectionFlags.REQUIRES_VERSION_ROLLING not in msg.flags: 296 | # response_flags.add(UpstreamConnectionFlags.REQUIRES_FIXED_VERSION) 297 | self._send_msg( 298 | SetupConnectionSuccess( 299 | used_version=min(msg.min_version, msg.max_version), 300 | flags=0, 301 | ) 302 | ) 303 | 304 | def visit_open_standard_mining_channel(self, msg: OpenStandardMiningChannel): 305 | # Open only channels compatible with this node's configuration 306 | if msg.max_target <= self.default_target.diff_1_target: 307 | # Create the channel and build back-links from session to channel and from 308 | # channel to connection 309 | mining_channel = PoolMiningChannel( 310 | cfg=msg, conn_uid=self.connection.uid, channel_id=None, session=None 311 | ) 312 | # Appending assigns the channel a unique ID within this connection 313 | self._mining_channel_registry.append(mining_channel) 314 | 315 | # TODO use partial to bind the mining channel to the _on_vardiff_change and eliminate the need for the 316 | # backlink 317 | session = self.new_mining_session( 318 | owner=mining_channel, on_vardiff_change=self._on_vardiff_change 319 | ) 320 | mining_channel.set_session(session) 321 | 322 | self._send_msg( 323 | OpenStandardMiningChannelSuccess( 324 | req_id=msg.req_id, 325 | channel_id=mining_channel.id, 326 | target=session.curr_target.target, 327 | extranonce_prefix=b"", 328 | group_channel_id=0, # pool currently doesn't support grouping 329 | ) 330 | ) 331 | 332 | # TODO-DOC: explain the (mandatory?) setting 'future_job=True' in 333 | # the message since the downstream has no prev hash 334 | # immediately after the OpenStandardMiningChannelSuccess 335 | # Update the flow diagram in the spec including specifying the 336 | # future_job attribute 337 | new_job_msg = self.__build_new_job_msg(mining_channel, is_future_job=True) 338 | # Take the future job from the channel so that we have space for producing a new one right away 339 | future_job = mining_channel.take_future_job() 340 | assert ( 341 | future_job.uid == new_job_msg.job_id 342 | ), "BUG: future job on channel {} doesn't match the produced message job ID {}".format( 343 | future_job.uid, new_job_msg.job_id 344 | ) 345 | self._send_msg(new_job_msg) 346 | self._send_msg( 347 | self.__build_set_new_prev_hash_msg( 348 | channel_id=mining_channel.id, future_job_id=new_job_msg.job_id 349 | ) 350 | ) 351 | # Send out another future job right away 352 | future_job_msg = self.__build_new_job_msg( 353 | mining_channel, is_future_job=True 354 | ) 355 | self._send_msg(future_job_msg) 356 | 357 | # All messages sent, start the session 358 | session.run() 359 | 360 | else: 361 | self._send_msg( 362 | OpenMiningChannelError( 363 | msg.req_id, "Cannot open mining channel: {}".format(msg) 364 | ) 365 | ) 366 | 367 | def visit_submit_shares_standard(self, msg: SubmitSharesStandard): 368 | """ 369 | TODO: implement aggregation of sending SubmitSharesSuccess for a batch of successful submits 370 | """ 371 | channel = self._mining_channel_registry.get_channel(msg.channel_id) 372 | channel = self._mining_channel_registry.get_channel(msg.channel_id) 373 | 374 | assert channel, "Channel {} is not defined".format(msg.channel_id) 375 | 376 | assert ( 377 | channel.conn_uid == self.connection.uid 378 | ), "Channel conn UID({}) doesn't match current conn UID({})".format( 379 | channel.conn_uid, self.connection.uid 380 | ) 381 | self.__emit_channel_msg_on_bus(msg) 382 | 383 | def on_accept(diff_target: coins.Target): 384 | resp_msg = SubmitSharesSuccess( 385 | channel.id, 386 | last_sequence_number=msg.sequence_number, 387 | new_submits_accepted_count=1, 388 | new_shares_sum=diff_target.to_difficulty(), 389 | ) 390 | self._send_msg(resp_msg) 391 | self.__emit_channel_msg_on_bus(resp_msg) 392 | 393 | def on_reject(_diff_target: coins.Target): 394 | resp_msg = SubmitSharesError( 395 | channel.id, 396 | sequence_number=msg.sequence_number, 397 | error_code="Share rejected", 398 | ) 399 | self._send_msg(resp_msg) 400 | self.__emit_channel_msg_on_bus(resp_msg) 401 | 402 | self.process_submit( 403 | msg.job_id, channel.session, on_accept=on_accept, on_reject=on_reject 404 | ) 405 | 406 | def visit_submit_shares_extended(self, msg: SubmitSharesStandard): 407 | pass 408 | 409 | def _on_vardiff_change(self, session: MiningSession): 410 | """Handle difficulty change for the current session. 411 | 412 | Note that to enforce difficulty change as soon as possible, 413 | the message is accompanied by generating new mining job 414 | """ 415 | channel = session.owner 416 | self._send_msg(SetTarget(channel.id, session.curr_target.to_bytes())) 417 | 418 | new_job_msg = self.__build_new_job_msg(channel, is_future_job=False) 419 | self._send_msg(new_job_msg) 420 | 421 | def on_new_block(self): 422 | """Sends an individual SetNewPrevHash message to all channels 423 | 424 | TODO: it is not quite clear how to handle the case where downstream has 425 | open multiple channels with the pool. The following needs to be 426 | answered: 427 | - Is any downstream node that opens more than 1 mining channel considered a 428 | proxy = it understands grouping? MAYBE/YES but see next questions 429 | - Can we send only 1 SetNewPrevHash message even if the channels are 430 | standard? NO - see below 431 | - if only 1 group SetNewPrevHash message is sent what 'future' job should 432 | it reference? The problem is that we have no defined way where a future 433 | job is being shared by multiple channels. 434 | """ 435 | # Pool currently doesn't support grouping channels, all channels belong to 436 | # group 0. We set the prev hash for all channels at once 437 | # Retire current jobs in the registries of all channels 438 | for channel in self._mining_channel_registry.channels: 439 | future_job = channel.take_future_job() 440 | prev_hash_msg = self.__build_set_new_prev_hash_msg( 441 | channel.id, future_job.uid 442 | ) 443 | channel.session.job_registry.retire_all_jobs() 444 | channel.session.job_registry.add_job(future_job) 445 | # Now, we can send out the new prev hash, since all jobs are 446 | # invalidated. Any further submits for the invalidated jobs will be 447 | # rejected 448 | self._send_msg(prev_hash_msg) 449 | 450 | # We can now broadcast future jobs to all channels for the upcoming block 451 | for channel in self._mining_channel_registry.channels: 452 | future_new_job_msg = self.__build_new_job_msg(channel, is_future_job=True) 453 | self._send_msg(future_new_job_msg) 454 | 455 | def __build_set_new_prev_hash_msg(self, channel_id, future_job_id): 456 | return SetNewPrevHash( 457 | channel_id=channel_id, 458 | job_id=future_job_id, 459 | prev_hash=self.prev_hash if self.prev_hash else 0, 460 | min_ntime=int(time.time()), # self.env.now, 461 | nbits=0, # TODO: None? 462 | ) 463 | 464 | @staticmethod 465 | def __build_new_job_msg(mining_channel: PoolMiningChannel, is_future_job: bool): 466 | """Builds NewMiningJob or NewExtendedMiningJob depending on channel type. 467 | 468 | The method also builds the actual job and registers it as 'future' job within 469 | the channel if requested. 470 | 471 | :param channel: determines the channel and thus message type 472 | :param is_future_job: when true, the job won't be considered for the current prev 473 | hash known to the downstream node but for any future prev hash that explicitly 474 | selects it 475 | :return New{Extended}MiningJob 476 | """ 477 | version = 1 478 | merkle_root = bytes(random.getrandbits(8) for _ in range(32)) 479 | 480 | new_job = mining_channel.session.new_mining_job(version, merkle_root) 481 | if is_future_job: 482 | mining_channel.add_future_job(new_job) 483 | 484 | # Compose the protocol message based on actual channel type 485 | if isinstance(mining_channel.cfg, OpenStandardMiningChannel): 486 | msg = NewMiningJob( 487 | channel_id=mining_channel.id, 488 | job_id=new_job.uid, 489 | future_job=is_future_job, 490 | version=version, 491 | merkle_root=merkle_root, 492 | ) 493 | elif isinstance(mining_channel.cfg, OpenExtendedMiningChannel): 494 | msg = NewExtendedMiningJob( 495 | channel_id=mining_channel.id, 496 | job_id=new_job.uid, 497 | future_job=is_future_job, 498 | version=version, 499 | version_rolling_allowed=True, # TODO 500 | merkle_path=MerklePath(), 501 | cb_prefix=CoinBasePrefix(), 502 | cb_suffix=CoinBaseSuffix(), 503 | ) 504 | else: 505 | assert False, "Unsupported channel type: {}".format( 506 | mining_channel.cfg.channel_type 507 | ) 508 | 509 | return msg 510 | 511 | def __emit_channel_msg_on_bus(self, msg: ChannelMessage): 512 | """Helper method for reporting a channel oriented message on the debugging bus.""" 513 | self._emit_protocol_msg_on_bus("Channel ID: {}".format(msg.channel_id), msg) 514 | -------------------------------------------------------------------------------- /primitives/protocol.py: -------------------------------------------------------------------------------- 1 | """Generic protocol primitives""" 2 | import asyncio # new module 3 | import socket 4 | from abc import abstractmethod 5 | 6 | import simpy 7 | import stringcase 8 | from colorama import Back, Cursor, Fore, Style 9 | from event_bus import EventBus 10 | 11 | from primitives.connection import Connection 12 | from primitives.messages import ( 13 | Message, 14 | NewMiningJob, 15 | OpenStandardMiningChannel, 16 | OpenStandardMiningChannelSuccess, 17 | SetNewPrevHash, 18 | SetTarget, 19 | SetupConnection, 20 | SetupConnectionError, 21 | SetupConnectionSuccess, 22 | SubmitSharesStandard, 23 | msg_type_class_map, 24 | ) 25 | 26 | 27 | class RequestRegistry: 28 | """Generates unique request ID for messages and provides simple registry""" 29 | 30 | def __init__(self): 31 | self.next_req_id = 0 32 | self.requests = dict() 33 | 34 | def push(self, req: Message): 35 | """Assigns a unique request ID to a message and registers it""" 36 | req.req_id = self.__next_req_id() 37 | assert ( 38 | self.requests.get(req.req_id) is None 39 | ), "BUG: request ID already present {}".format(req.req_id) 40 | self.requests[req.req_id] = req 41 | 42 | def pop(self, req_id): 43 | return self.requests.pop(req_id, None) 44 | 45 | def __next_req_id(self): 46 | curr_req_id = self.next_req_id 47 | self.next_req_id += 1 48 | return curr_req_id 49 | 50 | 51 | class ConnectionProcessor: 52 | """Receives and dispatches a message on a single connection.""" 53 | 54 | def __init__(self, name: str, bus: EventBus, connection: Connection): 55 | self.name = name 56 | self.bus = bus 57 | self.connection = connection 58 | self.request_registry = RequestRegistry() 59 | self.receive_loop_process = None 60 | 61 | def terminate(self): 62 | self.receive_loop_process.interrupt() 63 | 64 | def send_request(self, req): 65 | self.request_registry.push(req) 66 | self.connection.send_msg(req) 67 | 68 | @abstractmethod 69 | def _send_msg(self, msg): 70 | pass 71 | 72 | @abstractmethod 73 | def _recv_msg(self): 74 | pass 75 | 76 | @abstractmethod 77 | def _on_invalid_message(self, msg): 78 | pass 79 | 80 | def _emit_aux_msg_on_bus(self, log_msg: str): 81 | print(("{}: {}").format(self.name, log_msg)) 82 | 83 | def _emit_protocol_msg_on_bus(self, log_msg: str, msg: Message): 84 | self._emit_aux_msg_on_bus("{}: {}".format(log_msg, msg)) 85 | 86 | async def receive_one(self): 87 | messages = await self.connection.receive() 88 | 89 | try: 90 | for msg in messages: 91 | msg.accept(self) 92 | except Message.VisitorMethodNotImplemented as e: 93 | print( 94 | "{} doesn't implement:{}() for".format(type(self).__name_, e), 95 | msg, 96 | ) 97 | await asyncio.sleep(0) 98 | 99 | async def receive_loop(self): 100 | """Receive process for a particular connection dispatches each received message""" 101 | while True: 102 | try: 103 | await self.receive_one() 104 | except socket.timeout as e: 105 | print(e) 106 | await asyncio.sleep(0) 107 | continue 108 | -------------------------------------------------------------------------------- /primitives/session.py: -------------------------------------------------------------------------------- 1 | """Generic pool module""" 2 | import base64 3 | import hashlib 4 | import socket 5 | 6 | import numpy as np 7 | import simpy 8 | from cryptography.hazmat.primitives.asymmetric import x25519 9 | from dissononce.cipher.chachapoly import ChaChaPolyCipher 10 | from dissononce.dh.x25519.x25519 import X25519DH 11 | from dissononce.hash.blake2s import Blake2sHash 12 | from dissononce.processing.handshakepatterns.interactive.NX import NXHandshakePattern 13 | from dissononce.processing.impl.cipherstate import CipherState 14 | from dissononce.processing.impl.handshakestate import HandshakeState 15 | from dissononce.processing.impl.symmetricstate import SymmetricState 16 | from event_bus import EventBus 17 | 18 | import primitives.coins as coins 19 | from primitives.connection import Connection 20 | from primitives.hashrate_meter import HashrateMeter 21 | from primitives.protocol import ConnectionProcessor 22 | 23 | """Stratum V2 pool implementation 24 | 25 | """ 26 | import random 27 | 28 | import primitives.coins as coins 29 | from primitives.messages import * 30 | from primitives.protocol import ConnectionProcessor 31 | from primitives.types import DownstreamConnectionFlags, UpstreamConnectionFlags 32 | 33 | 34 | class MiningJob: 35 | """This class allows the simulation to track per job difficulty target for 36 | correct accounting""" 37 | 38 | def __init__( 39 | self, uid: int, diff_target: coins.Target, version: int, merkle_root: bytes 40 | ): 41 | """ 42 | :param uid: 43 | :param diff_target: difficulty target 44 | """ 45 | self.uid = uid 46 | self.diff_target = diff_target 47 | self.version = version 48 | self.merkle_root = merkle_root 49 | self.is_cancelled = False 50 | # mining start as unix timestamp 51 | self.started_at = None 52 | # mining end as unix timestamp 53 | self.finished_at = None 54 | 55 | def _format(self, content): 56 | return "{}({})".format(type(self).__name__, content) 57 | 58 | def __str__(self): 59 | return self._format( 60 | "uid={}, diff_target={}".format( 61 | self.uid, 62 | self.diff_target, 63 | ) 64 | ) 65 | 66 | 67 | class MiningJobRegistry: 68 | """Registry of jobs that have been assigned for mining. 69 | 70 | The registry intentionally doesn't remove any jobs from the simulation so that we 71 | can explicitly account for 'stale' hashrate. When this requirement is not needed, 72 | the retire_all_jobs() can be adjusted accordingly""" 73 | 74 | def __init__(self): 75 | # Tracking minimum valid job ID 76 | self.next_job_uid = 0 77 | # Registered jobs based on their uid 78 | self.jobs = dict() 79 | # Invalidated jobs just for accounting reasons 80 | self.invalid_jobs = dict() 81 | 82 | def new_mining_job( 83 | self, diff_target: coins.Target, version: int, merkle_root: bytes, job_id=None 84 | ): 85 | """Prepares new mining job and registers it internally. 86 | 87 | :param diff_target: difficulty target of the job to be constructed 88 | :param job_id: optional identifier of a job. If not specified, the registry 89 | chooses its own identifier. 90 | :return new mining job or None if job with the specified ID already exists 91 | """ 92 | if job_id is None: 93 | job_id = self.__next_job_uid() 94 | if job_id not in self.jobs: 95 | new_job = MiningJob( 96 | uid=job_id, 97 | diff_target=diff_target, 98 | version=version, 99 | merkle_root=merkle_root, 100 | ) 101 | self.jobs[new_job.uid] = new_job 102 | else: 103 | new_job = None 104 | return new_job 105 | 106 | def get_job(self, job_uid): 107 | """ 108 | :param job_uid: job_uid to look for 109 | :return: Returns the job or None 110 | """ 111 | return self.jobs.get(job_uid) 112 | 113 | def get_job_diff_target(self, job_uid): 114 | return self.jobs[job_uid].diff_target 115 | 116 | def get_invalid_job_diff_target(self, job_uid): 117 | return self.invalid_jobs[job_uid].diff_target 118 | 119 | def contains(self, job_uid): 120 | """Job ID presence check 121 | :return True when when such Job ID exists in the registry (it may still not 122 | be valid)""" 123 | return job_uid in self.jobs 124 | 125 | def contains_invalid(self, job_uid): 126 | """Check the invalidated job registry 127 | :return True when when such Job ID exists in the registry (it may still not 128 | be valid)""" 129 | return job_uid in self.invalid_jobs 130 | 131 | def retire_all_jobs(self): 132 | """Make all jobs invalid, while storing their copy for accounting reasons""" 133 | self.invalid_jobs.update(self.jobs) 134 | self.jobs = dict() 135 | 136 | def add_job(self, job: MiningJob): 137 | """ 138 | Appends a job with an assigned ID into the registry 139 | :param job: 140 | :return: 141 | """ 142 | assert ( 143 | self.get_job(job.uid) is None 144 | ), "Job {} already exists in the registry".format(job) 145 | self.jobs[job.uid] = job 146 | 147 | def __next_job_uid(self): 148 | """Initializes a new job ID for this session.""" 149 | curr_job_uid = self.next_job_uid 150 | self.next_job_uid += 1 151 | 152 | return curr_job_uid 153 | 154 | 155 | class MiningSession: 156 | """Represents a mining session that can adjust its difficulty target""" 157 | 158 | min_factor = 0.25 159 | max_factor = 4 160 | 161 | def __init__( 162 | self, 163 | name: str, 164 | bus: EventBus, 165 | owner, 166 | diff_target: coins.Target, 167 | enable_vardiff, 168 | vardiff_time_window=None, 169 | vardiff_desired_submits_per_sec=None, 170 | on_vardiff_change=None, 171 | ): 172 | """ """ 173 | self.name = name 174 | self.bus = bus 175 | self.owner = owner 176 | self.curr_diff_target = diff_target 177 | self.enable_vardiff = enable_vardiff 178 | self.meter = None 179 | self.vardiff_process = None 180 | self.vardiff_time_window_size = vardiff_time_window 181 | self.vardiff_desired_submits_per_sec = vardiff_desired_submits_per_sec 182 | self.on_vardiff_change = on_vardiff_change 183 | 184 | self.job_registry = MiningJobRegistry() 185 | self.prev_hash = None 186 | self.min_ntime = None 187 | self.nbits = None 188 | 189 | @property 190 | def curr_target(self): 191 | """Derives target from current difficulty on the session""" 192 | return self.curr_diff_target 193 | 194 | def set_target(self, target: coins.Target): 195 | self.curr_diff_target = target 196 | 197 | def set_prev_hash(self, msg: SetNewPrevHash): 198 | self.prev_hash = msg.prev_hash 199 | self.min_ntime = msg.min_ntime 200 | self.nbits = msg.nbits 201 | 202 | def new_mining_job(self, version: int, merkle_root: bytes, job_uid=None): 203 | """Generates a new job using current session's target""" 204 | return self.job_registry.new_mining_job( 205 | self.curr_diff_target, version, merkle_root, job_uid 206 | ) 207 | 208 | def run(self): 209 | """Explicit activation starts any simulation processes associated with the session""" 210 | self.meter = HashrateMeter() 211 | # if self.enable_vardiff: 212 | # self.vardiff_process = self.env.process(self.__vardiff_loop()) 213 | 214 | def account_diff_shares(self, diff: int): 215 | assert ( 216 | self.meter is not None 217 | ), "BUG: session not running yet, cannot account shares" 218 | self.meter.measure(diff) 219 | 220 | def terminate(self): 221 | """Complete shutdown of the session""" 222 | self.meter.terminate() 223 | if self.enable_vardiff: 224 | self.vardiff_process.interrupt() 225 | 226 | def __vardiff_loop(self): 227 | while True: 228 | try: 229 | submits_per_sec = self.meter.get_submit_per_secs() 230 | if submits_per_sec is None: 231 | # no accepted shares, we will halve the diff 232 | factor = 0.5 233 | else: 234 | factor = submits_per_sec / self.vardiff_desired_submits_per_sec 235 | if factor < self.min_factor: 236 | factor = self.min_factor 237 | elif factor > self.max_factor: 238 | factor = self.max_factor 239 | self.curr_diff_target.div_by_factor(factor) 240 | self.__emit_aux_msg_on_bus( 241 | "DIFF_UPDATE(target={})".format(self.curr_diff_target) 242 | ), 243 | self.on_vardiff_change(self) 244 | # yield self.env.timeout(self.vardiff_time_window_size) 245 | except simpy.Interrupt: 246 | break 247 | 248 | def __emit_aux_msg_on_bus(self, msg): 249 | self.bus.emit(self.name, None, self.owner, msg) 250 | 251 | 252 | class MiningChannel: 253 | def __init__(self, cfg, conn_uid, channel_id): 254 | """ 255 | :param cfg: configuration is represented by the full OpenStandardMiningChannel or 256 | OpenStandardMiningChannelSuccess message depending on which end of the channel we are on 257 | :param conn_uid: backlink to the connection this channel is on 258 | :param channel_id: unique identifier for the channel 259 | """ 260 | self.cfg = cfg 261 | self.conn_uid = conn_uid 262 | self.id = channel_id 263 | self.channel_id = channel_id 264 | 265 | def set_id(self, channel_id): 266 | self.id = channel_id 267 | 268 | 269 | class PoolMiningChannel(MiningChannel): 270 | """This mining channel contains mining session and future job. 271 | 272 | Currently, the channel holds only 1 future job. 273 | """ 274 | 275 | def __init__(self, session, *args, **kwargs): 276 | """ 277 | :param session: optional mining session process (TODO: review if this is the right place) 278 | """ 279 | self.future_job = None 280 | self.session = session 281 | super().__init__(*args, **kwargs) 282 | 283 | def terminate(self): 284 | self.session.terminate() 285 | 286 | def set_session(self, session): 287 | self.session = session 288 | 289 | def take_future_job(self): 290 | """Takes future job from the channel.""" 291 | assert ( 292 | self.future_job is not None 293 | ), "BUG: Attempt to take a future job from channel: {}".format(self.id) 294 | future_job = self.future_job 295 | self.future_job = None 296 | return future_job 297 | 298 | def add_future_job(self, job): 299 | """Stores future job ready for mining should a new block be found""" 300 | assert ( 301 | self.future_job is None 302 | ), "BUG: Attempt to overwrite an existing future job: {}".format(self.id) 303 | self.future_job = job 304 | 305 | 306 | class ConnectionConfig: 307 | """Stratum V2 connection configuration. 308 | 309 | For now, it is sufficient to record the SetupConnection to have full connection configuration available. 310 | """ 311 | 312 | def __init__(self, msg: SetupConnection): 313 | self.setup_msg = msg 314 | 315 | @property 316 | def requires_version_rolling(self): 317 | return ( 318 | DownstreamConnectionFlags.REQUIRES_VERSION_ROLLING in self.setup_msg.flags 319 | ) 320 | 321 | 322 | class ChannelRegistry: 323 | """Keeps track of channels on individual connection""" 324 | 325 | def __init__(self, conn_uid): 326 | self.conn_uid = conn_uid 327 | self.channels = [] 328 | 329 | def append(self, channel): 330 | """Simplify registering new channels""" 331 | new_channel_id = len(self.channels) 332 | channel.set_id(new_channel_id) 333 | self.channels.append(channel) 334 | 335 | def get_channel(self, channel_id): 336 | if channel_id < len(self.channels): 337 | return self.channels[channel_id] 338 | else: 339 | return None 340 | -------------------------------------------------------------------------------- /primitives/types.py: -------------------------------------------------------------------------------- 1 | """Protocol specific types""" 2 | import enum 3 | 4 | 5 | class DeviceInfo: 6 | pass 7 | 8 | 9 | class ProtocolType: 10 | MINING_PROTOCOL = 0 11 | JOB_NEGOTIATION_PROTOCOL = 1 12 | TEMPLATE_DISTRIBUTION_PROTOCOL = 2 13 | JOB_DISTRIBUTION_PROTOCOL = 3 14 | 15 | 16 | class MiningChannelType(enum.Enum): 17 | """Stratum V1 mining session follows the state machine below.""" 18 | 19 | # Header only mining/standard 20 | STANDARD = 0 21 | EXTENDED = 1 22 | 23 | 24 | class DownstreamConnectionFlags(enum.Enum): 25 | """Flags provided by downstream node""" 26 | 27 | #: The downstream node requires standard jobs. It doesn’t understand group channels - it is unable to process 28 | #: extended jobs sent to standard channels thru a group channel. 29 | REQUIRES_STANDARD_JOBS = 0 30 | 31 | #: If set, the client notifies the server that it will send SetCustomMiningJob on this connection 32 | REQUIRES_WORK_SELECTION = 1 33 | 34 | #: The client requires version rolling for efficiency or correct operation and the server MUST NOT send jobs 35 | #: which do not allow version rolling. 36 | REQUIRES_VERSION_ROLLING = 2 37 | 38 | 39 | class UpstreamConnectionFlags(enum.Enum): 40 | """Flags provided by upstream node""" 41 | 42 | #: Upstream node will not accept any changes to the version field. Note that if REQUIRES_VERSION_ROLLING was set 43 | #: in the SetupConnection::flags field, this bit MUST NOT be set. Further, if this bit is set, extended jobs MUST 44 | #: NOT indicate support for version rolling. 45 | REQUIRES_FIXED_VERSION = 0 46 | 47 | #: Upstream node will not accept opening of a standard channel. 48 | REQUIRES_EXTENDED_CHANNELS = 1 49 | 50 | 51 | class Hash: 52 | """Hash value doesn't need specific representation within the simulation""" 53 | 54 | pass 55 | 56 | 57 | class MerklePath: 58 | """Merkle path doesn't need specific representation within the simulation""" 59 | 60 | pass 61 | 62 | 63 | class CoinBasePrefix: 64 | pass 65 | 66 | 67 | class CoinBaseSuffix: 68 | pass 69 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | simpy 3 | matplotlib 4 | hashids 5 | event_bus 6 | colorama 7 | stringcase 8 | noiseprotocol===0.3.1 9 | ed25519===1.5 10 | base58===2.1.1 11 | dissononce===0.34.3 12 | asyncio===3.4.3 13 | -------------------------------------------------------------------------------- /simulate-pool.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import asyncio # new module 3 | import base64 4 | import socket 5 | from itertools import cycle 6 | 7 | import numpy as np 8 | import simpy 9 | from colorama import Fore, init 10 | from cryptography.hazmat.primitives.asymmetric import x25519 11 | from dissononce.cipher.chachapoly import ChaChaPolyCipher 12 | from dissononce.dh.x25519.x25519 import X25519DH 13 | from dissononce.hash.blake2s import Blake2sHash 14 | from dissononce.processing.handshakepatterns.interactive.NX import NXHandshakePattern 15 | from dissononce.processing.impl.cipherstate import CipherState 16 | from dissononce.processing.impl.handshakestate import HandshakeState 17 | from dissononce.processing.impl.symmetricstate import SymmetricState 18 | from event_bus import EventBus 19 | 20 | import primitives.coins as coins 21 | import primitives.mining_params as mining_params 22 | from primitives.connection import Connection 23 | from primitives.messages import SetupConnection, SetupConnectionSuccess 24 | from primitives.miner import Miner 25 | from primitives.pool import Pool 26 | 27 | init() 28 | bus = EventBus() 29 | 30 | 31 | async def connect(): 32 | np.random.seed(123) 33 | parser = argparse.ArgumentParser( 34 | prog="mine.py", 35 | description="Simulates interaction of a mining pool and two miners", 36 | ) 37 | parser.add_argument( 38 | "--limit", 39 | type=int, 40 | help="simulation time limit in seconds, default = 500", 41 | default=50, 42 | ) 43 | parser.add_argument( 44 | "--verbose", 45 | help="display all events (warning: a lot of text is generated)", 46 | action="store_const", 47 | const=True, 48 | ) 49 | 50 | parser.add_argument( 51 | "--plain-output", 52 | help="Print just values to terminal: accepted shares, accepted submits," 53 | " stale shares, stale submits, rejected submits", 54 | action="store_true", 55 | ) 56 | 57 | args = parser.parse_args() 58 | 59 | if args.verbose: 60 | 61 | @bus.on("pool1") 62 | def subscribe_pool1(ts, conn_uid, message, aux=None): 63 | print( 64 | Fore.LIGHTCYAN_EX, 65 | "T+{0:.3f}:".format(ts), 66 | "(pool1)", 67 | conn_uid if conn_uid is not None else "", 68 | message, 69 | aux, 70 | Fore.RESET, 71 | ) 72 | 73 | conn1 = Connection("pool", "stratum", pool_host="localhost", pool_port=2000) 74 | 75 | pool = Pool( 76 | "pool1", 77 | bus, 78 | default_target=coins.Target.from_difficulty( 79 | 100000, mining_params.diff_1_target 80 | ), 81 | enable_vardiff=True, 82 | ) 83 | 84 | await pool.make_handshake(conn1) 85 | 86 | if args.plain_output: 87 | print( 88 | pool.accepted_shares, 89 | pool.accepted_submits, 90 | pool.stale_shares, 91 | pool.stale_submits, 92 | pool.rejected_submits, 93 | sep=",", 94 | ) 95 | else: 96 | print( 97 | "accepted shares:", 98 | pool.accepted_shares, 99 | "accepted submits:", 100 | pool.accepted_submits, 101 | ) 102 | print( 103 | "stale shares:", 104 | pool.stale_shares, 105 | "stale submits:", 106 | pool.stale_submits, 107 | "rejected submits:", 108 | pool.rejected_submits, 109 | ) 110 | return pool 111 | 112 | 113 | async def loop(): 114 | pool = await connect() 115 | 116 | await asyncio.gather(pool.start_server(), pool.receive_loop(), pool.pow_update()) 117 | 118 | 119 | if __name__ == "__main__": 120 | asyncio.run(loop()) 121 | --------------------------------------------------------------------------------