├── src
├── __init__.py
├── bitcoin
│ ├── __init__.py
│ ├── descriptors.py
│ ├── musig.py
│ ├── address.py
│ ├── coverage.py
│ ├── segwit_addr.py
│ ├── authproxy.py
│ ├── util.py
│ ├── mininode.py
│ ├── key.py
│ ├── test_node.py
│ └── test_framework.py
├── signer
│ ├── __init__.py
│ ├── nsec.txt
│ ├── coordinator_pk.txt
│ ├── wallet.py
│ └── signer.py
├── utils
│ ├── __init__.py
│ ├── payload.py
│ └── nostr_utils.py
└── coordinator
│ ├── __init__.py
│ ├── nsec.txt
│ ├── signer_pks.txt
│ ├── db.template.json
│ ├── mempool_space_client.py
│ ├── db.py
│ ├── coordinator.py
│ └── wallet.py
├── requirements.txt
├── assets
├── images
│ ├── flow.png
│ ├── munstr-logo.png
│ ├── on_chain_tx.png
│ ├── frankenstein.png
│ └── xcf
│ │ └── munstr-logo.xcf
├── font
│ └── 1313MockingbirdLane.ttf
└── presentation
│ └── MIT2023-Munstr.pdf
├── start_coordinator.py
├── start_signer.py
├── .gitignore
└── README.md
/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/bitcoin/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/signer/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/coordinator/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | bip32
2 | nostr
3 | colorama
4 | python-bitcoinlib
5 |
--------------------------------------------------------------------------------
/src/signer/nsec.txt:
--------------------------------------------------------------------------------
1 | nsec13kwzykv6tvranz0hj4ckc7mt8jh7x7s8us5j4m0xv2zd02esrz3qy6xq40
--------------------------------------------------------------------------------
/src/coordinator/nsec.txt:
--------------------------------------------------------------------------------
1 | nsec1fwfc64nlq4sefw3myz7sft7npvdh9666x330rtlhzh5uxl88l9gs59th9h
--------------------------------------------------------------------------------
/src/signer/coordinator_pk.txt:
--------------------------------------------------------------------------------
1 | 5b08cc2b5d5771243ad6fe108972411338ea04ed7fc4a499a904e1cf6895f471
--------------------------------------------------------------------------------
/src/coordinator/signer_pks.txt:
--------------------------------------------------------------------------------
1 | 939222991dc2c54918551ead7082bc0ef9fc52e3b705f5d9d48727b57e70b845
--------------------------------------------------------------------------------
/assets/images/flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arminsabouri/munstr/HEAD/assets/images/flow.png
--------------------------------------------------------------------------------
/src/coordinator/db.template.json:
--------------------------------------------------------------------------------
1 | {"xpubs": [], "nonces": [], "wallets": [], "signatures": [], "spends": []}
--------------------------------------------------------------------------------
/assets/images/munstr-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arminsabouri/munstr/HEAD/assets/images/munstr-logo.png
--------------------------------------------------------------------------------
/assets/images/on_chain_tx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arminsabouri/munstr/HEAD/assets/images/on_chain_tx.png
--------------------------------------------------------------------------------
/assets/images/frankenstein.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arminsabouri/munstr/HEAD/assets/images/frankenstein.png
--------------------------------------------------------------------------------
/assets/images/xcf/munstr-logo.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arminsabouri/munstr/HEAD/assets/images/xcf/munstr-logo.xcf
--------------------------------------------------------------------------------
/assets/font/1313MockingbirdLane.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arminsabouri/munstr/HEAD/assets/font/1313MockingbirdLane.ttf
--------------------------------------------------------------------------------
/assets/presentation/MIT2023-Munstr.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/arminsabouri/munstr/HEAD/assets/presentation/MIT2023-Munstr.pdf
--------------------------------------------------------------------------------
/start_coordinator.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from src.coordinator.coordinator import run
3 |
4 | def main():
5 | run()
6 |
7 | if __name__ == "__main__":
8 | main()
9 |
--------------------------------------------------------------------------------
/start_signer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from src.signer.signer import run_signer
3 |
4 | import sys
5 | import argparse
6 |
7 |
8 | def main(wallet_id=None, key_pair_seed=None, nonce_seed=None):
9 | run_signer(wallet_id=wallet_id, key_pair_seed=key_pair_seed, nonce_seed=nonce_seed)
10 |
11 | if __name__ == "__main__":
12 | parser = argparse.ArgumentParser()
13 | parser.add_argument("--wallet_id", help="Wallet ID", default=None)
14 | parser.add_argument("--key_seed", help="Key seed")
15 | parser.add_argument("--nonce_seed", help="Nonce seed")
16 | args = parser.parse_args()
17 |
18 | # Call the main function with the command line arguments
19 | main(args.wallet_id, args.key_seed, args.nonce_seed)
20 |
21 |
--------------------------------------------------------------------------------
/src/utils/payload.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 |
4 | from enum import Enum
5 |
6 | # ref_id is also valid but not required
7 | class PayloadKeys(Enum):
8 | REQUEST_ID = 'req_id' # the request ID
9 | COMMAND = 'command' # command to pass into the application
10 | PAYLOAD = 'payload' # payload for the command
11 | TIMESTAMP = 'ts' # timestamp
12 |
13 | def is_valid_json(json_str: str):
14 | try:
15 | json.loads(json_str)
16 | except ValueError as e:
17 | logging.error("Invalid JSON!")
18 | print(e)
19 | return False
20 | return True
21 |
22 | def is_valid_payload(payload: dict):
23 | required_keys = [payload_key.value for payload_key in PayloadKeys]
24 | for key in required_keys:
25 | if key not in list(payload.keys()):
26 | # logging.error("Key is missing from JSON payload: %s", key)
27 | return False
28 |
29 | return True
--------------------------------------------------------------------------------
/src/coordinator/mempool_space_client.py:
--------------------------------------------------------------------------------
1 | #
2 | # Use mempool.space's API for interfacing with the network. In the future we
3 | # would like users to be able to connect Munstr to their own node
4 | #
5 | import requests
6 | import logging
7 |
8 | API_ENDPOINT = "https://mempool.space/testnet/api"
9 |
10 | def broadcast_transaction(tx_hex):
11 | broadcast_transaction_path = f"/tx"
12 |
13 | payload = {"tx": tx_hex}
14 |
15 | # TODO should data be set to tx_hex?
16 | response = requests.post(API_ENDPOINT + broadcast_transaction_path, data=payload)
17 |
18 | if (response.status_code == 200):
19 | logging.info('[mempool.space client] Transaction broadcast success!')
20 | else:
21 | logging.error('[mempool.space.client] Transaction broadcast failed with error code %d', response.status_code)
22 | logging.error(response.content)
23 |
24 | def get_transaction(txid):
25 | get_transaction_path = f"/tx/{txid}"
26 |
27 | try:
28 | response = requests.get(API_ENDPOINT + get_transaction_path)
29 | response.raise_for_status()
30 | transaction = response.json()
31 |
32 | return transaction
33 | except requests.exceptions.RequestException as e:
34 | logging.error("[mempool.space client] Error retrieving transaction %s", txid)
35 | print(e)
36 | return None
--------------------------------------------------------------------------------
/src/bitcoin/descriptors.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) 2019 Pieter Wuille
3 | # Distributed under the MIT software license, see the accompanying
4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 | """Utility functions related to output descriptors"""
6 |
7 | INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "
8 | CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
9 | GENERATOR = [0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a, 0x644d626ffd]
10 |
11 | def descsum_polymod(symbols):
12 | """Internal function that computes the descriptor checksum."""
13 | chk = 1
14 | for value in symbols:
15 | top = chk >> 35
16 | chk = (chk & 0x7ffffffff) << 5 ^ value
17 | for i in range(5):
18 | chk ^= GENERATOR[i] if ((top >> i) & 1) else 0
19 | return chk
20 |
21 | def descsum_expand(s):
22 | """Internal function that does the character to symbol expansion"""
23 | groups = []
24 | symbols = []
25 | for c in s:
26 | if not c in INPUT_CHARSET:
27 | return None
28 | v = INPUT_CHARSET.find(c)
29 | symbols.append(v & 31)
30 | groups.append(v >> 5)
31 | if len(groups) == 3:
32 | symbols.append(groups[0] * 9 + groups[1] * 3 + groups[2])
33 | groups = []
34 | if len(groups) == 1:
35 | symbols.append(groups[0])
36 | elif len(groups) == 2:
37 | symbols.append(groups[0] * 3 + groups[1])
38 | return symbols
39 |
40 | def descsum_create(s):
41 | """Add a checksum to a descriptor without"""
42 | symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0]
43 | checksum = descsum_polymod(symbols) ^ 1
44 | return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8))
45 |
46 | def descsum_check(s, require=True):
47 | """Verify that the checksum is correct in a descriptor"""
48 | if not '#' in s:
49 | return not require
50 | if s[-9] != '#':
51 | return False
52 | if not all(x in CHECKSUM_CHARSET for x in s[-8:]):
53 | return False
54 | symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]]
55 | return descsum_polymod(symbols) == 1
56 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Python
2 |
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 | # C extensions
9 | *.so
10 |
11 | # Distribution / packaging
12 | .Python
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | pip-wheel-metadata/
26 | share/python-wheels/
27 | *.egg-info/
28 | .installed.cfg
29 | *.egg
30 | MANIFEST
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | .hypothesis/
53 | .pytest_cache/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 | db.sqlite3-journal
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | .python-version
87 |
88 | # pipenv
89 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
90 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
91 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
92 | # install all needed dependencies.
93 | #Pipfile.lock
94 |
95 | # celery beat schedule file
96 | celerybeat-schedule
97 |
98 | # SageMath parsed files
99 | *.sage.py
100 |
101 | # Environments
102 | .env
103 | .venv
104 | env/
105 | venv/
106 | ENV/
107 | env.bak/
108 | venv.bak/
109 |
110 | # Spyder project settings
111 | .spyderproject
112 | .spyproject
113 |
114 | # Rope project settings
115 | .ropeproject
116 |
117 | # mkdocs documentation
118 | /site
119 |
120 | # mypy
121 | .mypy_cache/
122 | .dmypy.json
123 | dmypy.json
124 |
125 | # Pyre type checker
126 | .pyre/
127 |
128 | ## VIM
129 |
130 | # Swap
131 | [._]*.s[a-v][a-z]
132 | [._]*.sw[a-p]
133 | [._]s[a-rt-v][a-z]
134 | [._]ss[a-gi-z]
135 | [._]sw[a-p]
136 |
137 | # Session
138 | Session.vim
139 | Sessionx.vim
140 |
141 | # Temporary
142 | .netrwhist
143 | *~
144 | # Auto-generated tag files
145 | tags
146 | # Persistent undo
147 | [._]*.un~
148 |
149 | # Custom stuff
150 | db.json
151 |
152 | # Lil' Stevie Jobs
153 | .DS_Store
154 |
--------------------------------------------------------------------------------
/src/bitcoin/musig.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 The Bitcoin Core developers
2 | # Distributed under the MIT software license, see the accompanying
3 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 | """Preliminary MuSig implementation.
5 |
6 | WARNING: This code is slow, uses bad randomness, does not properly protect
7 | keys, and is trivially vulnerable to side channel attacks. Do not use for
8 | anything but tests.
9 |
10 | See https://eprint.iacr.org/2018/068.pdf for the MuSig signature scheme implemented here.
11 | """
12 |
13 | import hashlib
14 |
15 | from .key import (
16 | SECP256K1,
17 | SECP256K1_ORDER,
18 | TaggedHash,
19 | )
20 |
21 | def generate_musig_key(pubkey_list):
22 | """Aggregate individually generated public keys.
23 |
24 | Returns a MuSig public key as defined in the MuSig paper."""
25 | pubkey_list_sorted = sorted([int.from_bytes(key.get_bytes(), 'big') for key in pubkey_list])
26 |
27 | # Concatenate all the public keys together
28 | L = b''
29 | for px in pubkey_list_sorted:
30 | L += px.to_bytes(32, 'big')
31 |
32 | # hash of all the public keys concatenated
33 | Lh = hashlib.sha256(L).digest()
34 |
35 | # the MuSig coefficients. This prevents a key cancellation attack.
36 | musig_c = {}
37 |
38 | aggregate_key = 0
39 | for key in pubkey_list:
40 | musig_c[key] = hashlib.sha256(Lh + key.get_bytes()).digest()
41 | aggregate_key += key.mul(musig_c[key])
42 | return musig_c, aggregate_key
43 |
44 | def aggregate_schnorr_nonces(nonce_point_list):
45 | """Construct aggregated musig nonce from individually generated nonces."""
46 | R_agg = sum(nonce_point_list)
47 | R_agg_affine = SECP256K1.affine(R_agg.p)
48 | negated = False
49 | if R_agg_affine[1] % 2 != 0:
50 | negated = True
51 | R_agg_negated = SECP256K1.mul([(R_agg.p, SECP256K1_ORDER - 1)])
52 | R_agg.p = R_agg_negated
53 | return R_agg, negated
54 |
55 | def sign_musig(priv_key, k_key, R_musig, P_musig, msg):
56 | """Construct a MuSig partial signature and return the s value."""
57 | assert priv_key.valid
58 | assert priv_key.compressed
59 | assert P_musig.compressed
60 | assert len(msg) == 32
61 | assert k_key is not None and k_key.secret != 0
62 | assert R_musig.get_y() % 2 == 0
63 | e = musig_digest(R_musig, P_musig, msg)
64 | return (k_key.secret + e * priv_key.secret) % SECP256K1_ORDER
65 |
66 | def musig_digest(R_musig, P_musig, msg):
67 | """Get the digest to sign for musig"""
68 | return int.from_bytes(TaggedHash("BIP0340/challenge", R_musig.get_bytes() + P_musig.get_bytes() + msg), 'big') % SECP256K1_ORDER
69 |
70 | def aggregate_musig_signatures(s_list, R_musig):
71 | """Construct valid Schnorr signature from a list of partial MuSig signatures."""
72 | assert s_list is not None and all(isinstance(s, int) for s in s_list)
73 | s_agg = sum(s_list) % SECP256K1_ORDER
74 | return R_musig.get_x().to_bytes(32, 'big') + s_agg.to_bytes(32, 'big')
75 |
--------------------------------------------------------------------------------
/src/utils/nostr_utils.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import ssl
4 | import time
5 | import uuid
6 |
7 | from nostr.event import Event, EventKind
8 | from nostr.filter import Filter, Filters
9 | from nostr.key import PrivateKey
10 | from nostr.message_type import ClientMessageType
11 | from nostr.relay_manager import RelayManager
12 |
13 | from src.utils.payload import PayloadKeys
14 |
15 | NOSTR_RELAYS = ["wss://nostr-pub.wellorder.net", "wss://relay.damus.io"]
16 |
17 | def add_relays():
18 | relay_manager = RelayManager()
19 | [relay_manager.add_relay(relay) for relay in NOSTR_RELAYS]
20 |
21 | logging.info("[nostr] Added the following relay(s): %s", NOSTR_RELAYS)
22 | return relay_manager
23 |
24 | def construct_and_publish_event(payload: dict, private_key: PrivateKey, relay_manager: RelayManager):
25 | public_key = private_key.public_key
26 | event = Event(content=json.dumps(payload), public_key=public_key.hex())
27 |
28 | private_key.sign_event(event)
29 | relay_manager.publish_event(event)
30 |
31 | logging.info('[nostr] Published event for the %s command', payload[PayloadKeys.COMMAND.value])
32 |
33 |
34 | # Used to generate both requests and responses
35 | def generate_nostr_message(command: str, req_id=str(uuid.uuid4()), ref_id=None, payload={}):
36 | message = {
37 | PayloadKeys.COMMAND.value: command,
38 | PayloadKeys.REQUEST_ID.value: req_id,
39 | PayloadKeys.PAYLOAD.value: payload,
40 | PayloadKeys.TIMESTAMP.value: int(time.time())
41 | }
42 |
43 | if (ref_id != None):
44 | message['ref_id'] = ref_id
45 |
46 | return message
47 |
48 | def init_relay_manager(relay_manager: RelayManager, author_pks: list[str]):
49 | # set up relay subscription
50 | subscription_id = "str"
51 | filters = Filters([Filter(authors=author_pks, kinds=[EventKind.TEXT_NOTE])])
52 | relay_manager.add_subscription(subscription_id, filters)
53 |
54 | # NOTE: This disables ssl certificate verification
55 | relay_manager.open_connections({"cert_reqs": ssl.CERT_NONE})
56 |
57 | # wait a moment for a connection to open to each relay
58 | time.sleep(1.5)
59 |
60 | request = [ClientMessageType.REQUEST, subscription_id]
61 | request.extend(filters.to_json_array())
62 | message = json.dumps(request)
63 |
64 | relay_manager.relays[NOSTR_RELAYS[0]].publish(message)
65 |
66 | # give the message a moment to send
67 | time.sleep(1)
68 |
69 | logging.info("[nostr] Relay manager started!")
70 |
71 | # read an nsec from a file, return both private and public keys
72 | def read_nsec(nsec_file_name):
73 | with open(nsec_file_name, 'r') as f:
74 | try:
75 | nsec = f.read()
76 | private_key = PrivateKey().from_nsec(nsec)
77 | public_key = private_key.public_key.hex()
78 | logging.info("[nostr] My public key: %s", public_key)
79 | return private_key, public_key
80 | except(error):
81 | logging.error("[nostr] Unexpected error reading nsec from %s", nsec_file_name)
82 | sys.exit(1)
83 |
84 | # read public keys from a file
85 | def read_public_keys(file_name):
86 | with open(file_name, 'r') as f:
87 | try:
88 | lines = f.readlines()
89 | return [line.strip() for line in lines]
90 | except(error):
91 | logging.error("[nostr] Unexpected error reading public keys from %s", file_name)
92 | sys.exit(1)
93 |
94 |
--------------------------------------------------------------------------------
/src/bitcoin/address.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) 2016-2019 The Bitcoin Core developers
3 | # Distributed under the MIT software license, see the accompanying
4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 | """Encode and decode BASE58, P2PKH and P2SH addresses."""
6 |
7 | import enum
8 |
9 | from .script import hash256, hash160, sha256, CScript, OP_0
10 | from .util import hex_str_to_bytes
11 |
12 | from . import segwit_addr
13 |
14 | ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj'
15 |
16 |
17 | class AddressType(enum.Enum):
18 | bech32 = 'bech32'
19 | p2sh_segwit = 'p2sh-segwit'
20 | legacy = 'legacy' # P2PKH
21 |
22 |
23 | chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
24 |
25 |
26 | def byte_to_base58(b, version):
27 | result = ''
28 | str = b.hex()
29 | str = chr(version).encode('latin-1').hex() + str
30 | checksum = hash256(hex_str_to_bytes(str)).hex()
31 | str += checksum[:8]
32 | value = int('0x'+str,0)
33 | while value > 0:
34 | result = chars[value % 58] + result
35 | value //= 58
36 | while (str[:2] == '00'):
37 | result = chars[0] + result
38 | str = str[2:]
39 | return result
40 |
41 | # TODO: def base58_decode
42 |
43 | def keyhash_to_p2pkh(hash, main = False):
44 | assert len(hash) == 20
45 | version = 0 if main else 111
46 | return byte_to_base58(hash, version)
47 |
48 | def scripthash_to_p2sh(hash, main = False):
49 | assert len(hash) == 20
50 | version = 5 if main else 196
51 | return byte_to_base58(hash, version)
52 |
53 | def key_to_p2pkh(key, main = False):
54 | key = check_key(key)
55 | return keyhash_to_p2pkh(hash160(key), main)
56 |
57 | def script_to_p2sh(script, main = False):
58 | script = check_script(script)
59 | return scripthash_to_p2sh(hash160(script), main)
60 |
61 | def key_to_p2sh_p2wpkh(key, main = False):
62 | key = check_key(key)
63 | p2shscript = CScript([OP_0, hash160(key)])
64 | return script_to_p2sh(p2shscript, main)
65 |
66 | ## TODO
67 | # def witness_v1_program_to_scriptpubkey(witness_program):
68 | # return CScript([OP_1, ])
69 |
70 | def program_to_witness(version, program, main=False):
71 | if (type(program) is str):
72 | program = hex_str_to_bytes(program)
73 | assert 0 <= version <= 16
74 | assert 2 <= len(program) <= 40
75 | assert version > 0 or len(program) in [20, 32]
76 | return segwit_addr.encode_segwit_address("bc" if main else "tb", version, program)
77 |
78 | def script_to_p2wsh(script, main = False):
79 | script = check_script(script)
80 | return program_to_witness(0, sha256(script), main)
81 |
82 | def key_to_p2wpkh(key, main = False):
83 | key = check_key(key)
84 | return program_to_witness(0, hash160(key), main)
85 |
86 | def script_to_p2sh_p2wsh(script, main = False):
87 | script = check_script(script)
88 | p2shscript = CScript([OP_0, sha256(script)])
89 | return script_to_p2sh(p2shscript, main)
90 |
91 | def check_key(key):
92 | if (type(key) is str):
93 | key = hex_str_to_bytes(key) # Assuming this is hex string
94 | if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
95 | return key
96 | assert False
97 |
98 | def check_script(script):
99 | if (type(script) is str):
100 | script = hex_str_to_bytes(script) # Assuming this is hex string
101 | if (type(script) is bytes or type(script) is CScript):
102 | return script
103 | assert False
104 |
--------------------------------------------------------------------------------
/src/bitcoin/coverage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) 2015-2018 The Bitcoin Core developers
3 | # Distributed under the MIT software license, see the accompanying
4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 | """Utilities for doing coverage analysis on the RPC interface.
6 |
7 | Provides a way to track which RPC commands are exercised during
8 | testing.
9 | """
10 |
11 | import os
12 |
13 |
14 | REFERENCE_FILENAME = 'rpc_interface.txt'
15 |
16 |
17 | class AuthServiceProxyWrapper():
18 | """
19 | An object that wraps AuthServiceProxy to record specific RPC calls.
20 |
21 | """
22 | def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
23 | """
24 | Kwargs:
25 | auth_service_proxy_instance (AuthServiceProxy): the instance
26 | being wrapped.
27 | coverage_logfile (str): if specified, write each service_name
28 | out to a file when called.
29 |
30 | """
31 | self.auth_service_proxy_instance = auth_service_proxy_instance
32 | self.coverage_logfile = coverage_logfile
33 |
34 | def __getattr__(self, name):
35 | return_val = getattr(self.auth_service_proxy_instance, name)
36 | if not isinstance(return_val, type(self.auth_service_proxy_instance)):
37 | # If proxy getattr returned an unwrapped value, do the same here.
38 | return return_val
39 | return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
40 |
41 | def __call__(self, *args, **kwargs):
42 | """
43 | Delegates to AuthServiceProxy, then writes the particular RPC method
44 | called to a file.
45 |
46 | """
47 | return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
48 | self._log_call()
49 | return return_val
50 |
51 | def _log_call(self):
52 | rpc_method = self.auth_service_proxy_instance._service_name
53 |
54 | if self.coverage_logfile:
55 | with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
56 | f.write("%s\n" % rpc_method)
57 |
58 | def __truediv__(self, relative_uri):
59 | return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
60 | self.coverage_logfile)
61 |
62 | def get_request(self, *args, **kwargs):
63 | self._log_call()
64 | return self.auth_service_proxy_instance.get_request(*args, **kwargs)
65 |
66 | def get_filename(dirname, n_node):
67 | """
68 | Get a filename unique to the test process ID and node.
69 |
70 | This file will contain a list of RPC commands covered.
71 | """
72 | pid = str(os.getpid())
73 | return os.path.join(
74 | dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
75 |
76 |
77 | def write_all_rpc_commands(dirname, node):
78 | """
79 | Write out a list of all RPC functions available in `bitcoin-cli` for
80 | coverage comparison. This will only happen once per coverage
81 | directory.
82 |
83 | Args:
84 | dirname (str): temporary test dir
85 | node (AuthServiceProxy): client
86 |
87 | Returns:
88 | bool. if the RPC interface file was written.
89 |
90 | """
91 | filename = os.path.join(dirname, REFERENCE_FILENAME)
92 |
93 | if os.path.isfile(filename):
94 | return False
95 |
96 | help_output = node.help().split('\n')
97 | commands = set()
98 |
99 | for line in help_output:
100 | line = line.strip()
101 |
102 | # Ignore blanks and headers
103 | if line and not line.startswith('='):
104 | commands.add("%s\n" % line.split()[0])
105 |
106 | with open(filename, 'w', encoding='utf8') as f:
107 | f.writelines(list(commands))
108 |
109 | return True
110 |
--------------------------------------------------------------------------------
/src/signer/wallet.py:
--------------------------------------------------------------------------------
1 | from bip32 import BIP32
2 |
3 | from src.bitcoin.key import ECKey, ECPubKey, generate_key_pair
4 | from src.bitcoin.messages import sha256
5 | from src.bitcoin.musig import sign_musig
6 |
7 |
8 | class Wallet:
9 | def __init__(self, wallet_id, key_pair_seed, nonce_seed):
10 | self.wallet_id = wallet_id
11 | self.nonce_seed = int(nonce_seed)
12 |
13 | self.private_key = None
14 | self.public_key = None
15 | self.cmap = None
16 | self.pubkey_agg = None
17 | self.r_agg = None
18 | self.should_negate_nonce = None
19 |
20 | self.current_spend_request_id = None
21 | self.sig_hash = None
22 |
23 | # TODO should use ascii or utf-8?
24 | prv, pk = generate_key_pair(sha256(bytes(key_pair_seed, 'ascii')))
25 |
26 | self.private_key = prv
27 | self.public_key = pk
28 |
29 | def get_root_xpub(self):
30 | return self.get_root_hd_node().get_xpub()
31 |
32 | def get_root_hd_node(self):
33 | key_bytes = self.private_key.get_bytes()
34 | return BIP32.from_seed(key_bytes)
35 |
36 | def get_pubkey_at_index(self, index: int):
37 | root_xpub = self.get_root_xpub()
38 | bip32_node = BIP32.from_xpub(root_xpub)
39 | pk = bip32_node.get_pubkey_from_path(f"m/{index}")
40 |
41 | key = ECPubKey().set(pk)
42 | return key.get_bytes()
43 |
44 | def get_new_nonce(self):
45 | k = ECKey().set(self.nonce_seed)
46 | return k.get_pubkey()
47 |
48 | def get_wallet_id(self):
49 | return self.wallet_id
50 |
51 | def set_cmap(self, cmap):
52 | modified_cmap = {}
53 | # Both key and value should be hex encoded strings
54 | # Key is public key
55 | # value is the challenge
56 | for key, value in cmap.items():
57 | modified_cmap[key] = bytes.fromhex(value)
58 |
59 | self.cmap = modified_cmap
60 |
61 | def get_pubkey(self):
62 | return self.public_key.get_bytes().hex()
63 |
64 | def set_pubkey_agg(self, pubkey_agg):
65 | self.pubkey_agg = ECPubKey().set(bytes.fromhex(pubkey_agg))
66 |
67 | def set_r_agg(self, r_agg):
68 | self.r_agg = ECPubKey().set(bytes.fromhex(r_agg))
69 |
70 | def set_should_negate_nonce(self, value):
71 | self.should_negate_nonce = value
72 |
73 | def set_sig_hash(self, sig_hash):
74 | self.sig_hash = bytes.fromhex(sig_hash)
75 |
76 | def set_current_spend_request_id(self, current_spend_request_id):
77 | self.current_spend_request_id = current_spend_request_id
78 |
79 | def get_private_key_tweaked(self):
80 | if self.cmap != None:
81 | # TODO this is all bip32 stuff
82 | # TODO index is hardcoded at 1
83 | # index = 1
84 | # pk = self.get_pubkey_at_index(index).hex()
85 | # TODO hardcoded pk, get from class variable
86 | # prv = self.get_root_hd_node().get_privkey_from_path(f"m/{index}")
87 | # print("prv", prv)
88 | # private_key = ECKey().set(prv)
89 |
90 | pk = self.public_key.get_bytes().hex()
91 | private_key = self.private_key
92 | tweaked_key = private_key * self.cmap[pk]
93 |
94 | if self.pubkey_agg.get_y() % 2 != 0:
95 | tweaked_key.negate()
96 | self.pubkey_agg.negate()
97 |
98 | return tweaked_key
99 | return None
100 |
101 | def sign_with_current_context(self, nonce: str):
102 | if self.sig_hash == None or self.cmap == None or self.r_agg == None or self.pubkey_agg == None:
103 | # TODO should throw
104 | return None
105 |
106 | k1 = ECKey().set(self.nonce_seed)
107 | # negate here
108 | if self.should_negate_nonce:
109 | k1.negate()
110 | tweaked_private_key = self.get_private_key_tweaked()
111 | return sign_musig(tweaked_private_key, k1, self.r_agg, self.pubkey_agg, self.sig_hash)
112 |
--------------------------------------------------------------------------------
/src/bitcoin/segwit_addr.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) 2017 Pieter Wuille
3 | # Distributed under the MIT software license, see the accompanying
4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 | """Reference implementation for Bech32/Bech32m and segwit addresses."""
6 | from enum import Enum
7 |
8 | CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
9 | BECH32_CONST = 1
10 | BECH32M_CONST = 0x2bc830a3
11 |
12 | class Encoding(Enum):
13 | """Enumeration type to list the various supported encodings."""
14 | BECH32 = 1
15 | BECH32M = 2
16 |
17 |
18 | def bech32_polymod(values):
19 | """Internal function that computes the Bech32 checksum."""
20 | generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
21 | chk = 1
22 | for value in values:
23 | top = chk >> 25
24 | chk = (chk & 0x1ffffff) << 5 ^ value
25 | for i in range(5):
26 | chk ^= generator[i] if ((top >> i) & 1) else 0
27 | return chk
28 |
29 |
30 | def bech32_hrp_expand(hrp):
31 | """Expand the HRP into values for checksum computation."""
32 | return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
33 |
34 |
35 | def bech32_verify_checksum(hrp, data):
36 | """Verify a checksum given HRP and converted data characters."""
37 | check = bech32_polymod(bech32_hrp_expand(hrp) + data)
38 | if check == BECH32_CONST:
39 | return Encoding.BECH32
40 | elif check == BECH32M_CONST:
41 | return Encoding.BECH32M
42 | else:
43 | return None
44 |
45 | def bech32_create_checksum(encoding, hrp, data):
46 | """Compute the checksum values given HRP and data."""
47 | values = bech32_hrp_expand(hrp) + data
48 | const = BECH32M_CONST if encoding == Encoding.BECH32M else BECH32_CONST
49 | polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ const
50 | return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
51 |
52 |
53 | def bech32_encode(encoding, hrp, data):
54 | """Compute a Bech32 or Bech32m string given HRP and data values."""
55 | combined = data + bech32_create_checksum(encoding, hrp, data)
56 | return hrp + '1' + ''.join([CHARSET[d] for d in combined])
57 |
58 |
59 | def bech32_decode(bech):
60 | """Validate a Bech32/Bech32m string, and determine HRP and data."""
61 | if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
62 | (bech.lower() != bech and bech.upper() != bech)):
63 | return (None, None, None)
64 | bech = bech.lower()
65 | pos = bech.rfind('1')
66 | if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
67 | return (None, None, None)
68 | if not all(x in CHARSET for x in bech[pos+1:]):
69 | return (None, None, None)
70 | hrp = bech[:pos]
71 | data = [CHARSET.find(x) for x in bech[pos+1:]]
72 | encoding = bech32_verify_checksum(hrp, data)
73 | if encoding is None:
74 | return (None, None, None)
75 | return (encoding, hrp, data[:-6])
76 |
77 |
78 | def convertbits(data, frombits, tobits, pad=True):
79 | """General power-of-2 base conversion."""
80 | acc = 0
81 | bits = 0
82 | ret = []
83 | maxv = (1 << tobits) - 1
84 | max_acc = (1 << (frombits + tobits - 1)) - 1
85 | for value in data:
86 | if value < 0 or (value >> frombits):
87 | return None
88 | acc = ((acc << frombits) | value) & max_acc
89 | bits += frombits
90 | while bits >= tobits:
91 | bits -= tobits
92 | ret.append((acc >> bits) & maxv)
93 | if pad:
94 | if bits:
95 | ret.append((acc << (tobits - bits)) & maxv)
96 | elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
97 | return None
98 | return ret
99 |
100 |
101 | def decode_segwit_address(hrp, addr):
102 | """Decode a segwit address."""
103 | encoding, hrpgot, data = bech32_decode(addr)
104 | if hrpgot != hrp:
105 | return (None, None)
106 | decoded = convertbits(data[1:], 5, 8, False)
107 | if decoded is None or len(decoded) < 2 or len(decoded) > 40:
108 | return (None, None)
109 | if data[0] > 16:
110 | return (None, None)
111 | if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:
112 | return (None, None)
113 | if (data[0] == 0 and encoding != Encoding.BECH32) or (data[0] != 0 and encoding != Encoding.BECH32M):
114 | return (None, None)
115 | return (data[0], decoded)
116 |
117 |
118 | def encode_segwit_address(hrp, witver, witprog):
119 | """Encode a segwit address."""
120 | encoding = Encoding.BECH32 if witver == 0 else Encoding.BECH32M
121 | ret = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5))
122 | if decode_segwit_address(hrp, ret) == (None, None):
123 | return None
124 | return ret
125 |
--------------------------------------------------------------------------------
/src/coordinator/db.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import logging
4 |
5 | class UUIDEncoder(json.JSONEncoder):
6 | def default(self, obj):
7 | if isinstance(obj, uuid.UUID):
8 | # If obj is a UUID, serialize it to a string
9 | return str(obj)
10 | # For all other objects, use the default serialization method
11 | return super().default(obj)
12 |
13 | class DB:
14 | def __init__(self, json_file):
15 | if not os.path.isfile(json_file):
16 | raise FileNotFoundError(f"JSON file '{json_file}' not found")
17 | self.json_file = json_file
18 |
19 | def get_data(self):
20 | with open(self.json_file, "r") as f:
21 | return json.load(f)
22 |
23 | def set_data(self, new_data):
24 | with open(self.json_file, "w") as f:
25 | json.dump(new_data, f)
26 |
27 | def get_value(self, key):
28 | with open(self.json_file, "r") as f:
29 | data = json.load(f)
30 | return data.get(key)
31 |
32 | def set_value(self, key, value):
33 | with open(self.json_file, "r") as f:
34 | data = json.load(f)
35 | data[key] = value
36 | with open(self.json_file, "w") as f:
37 | json.dump(data, f)
38 |
39 | ## TODO these utility functions should reside in their own file
40 |
41 | def add_wallet(self, wallet_id, quorum):
42 | with open(self.json_file, "r") as f:
43 | data = json.load(f)
44 |
45 | filtered = [wallet for wallet in data['wallets'] if wallet['wallet_id'] == wallet_id]
46 | if len(filtered) > 0:
47 | return False
48 | data['wallets'].append({'wallet_id': wallet_id, 'quorum': quorum})
49 | with open(self.json_file, "w") as f:
50 | json.dump(data, f)
51 |
52 | return True
53 |
54 | def get_wallet(self, wallet_id):
55 | with open(self.json_file, "r") as f:
56 | data = json.load(f)
57 | filtered = [wallet for wallet in data['wallets'] if wallet['wallet_id'] == wallet_id]
58 | if len(filtered) > 0:
59 | return filtered[0]
60 | return None
61 |
62 | def get_xpubs(self, wallet_id):
63 | with open(self.json_file, "r") as f:
64 | data = json.load(f)
65 | filtered = [xpub for xpub in data['xpubs'] if xpub['wallet_id'] == wallet_id]
66 | if len(filtered) > 0:
67 | return filtered
68 | return None
69 |
70 | def add_xpub(self, wallet_id, xpub):
71 | with open(self.json_file, "r") as f:
72 | data = json.load(f)
73 | # TODO check wallet id exists
74 | filtered_xpubs = [xpub for xpub in data['xpubs'] if xpub['wallet_id'] == wallet_id and xpub['xpub'] == xpub]
75 | if len(filtered_xpubs) > 0:
76 | return False
77 |
78 | data['xpubs'].append({'wallet_id': wallet_id, 'xpub': xpub})
79 |
80 | with open(self.json_file, "w") as f:
81 | json.dump(data, f)
82 |
83 | return True
84 |
85 | def add_spend_request(self, txid, output_index, prev_script_pubkey, prev_value_sats, spend_request_id, new_address,
86 | value, wallet_id):
87 | with open(self.json_file, "r") as f:
88 | data = json.load(f)
89 | filtered = [spend for spend in data['spends'] if spend['spend_request_id'] == spend_request_id]
90 | if len(filtered) > 0:
91 | return False
92 | data['spends'].append({'spend_request_id': spend_request_id, 'txid': txid, 'output_index': output_index, 'prev_script_pubkey': prev_script_pubkey,
93 | 'prev_value_sats': prev_value_sats, 'new_address': new_address, 'value': value, 'wallet_id': wallet_id})
94 | with open(self.json_file, "w") as f:
95 | json.dump(data, f)
96 |
97 | return True
98 |
99 | def get_spend_request(self, spend_request_id):
100 | with open(self.json_file, "r") as f:
101 | data = json.load(f)
102 |
103 | filtered = [spend for spend in data['spends'] if spend['spend_request_id'] == spend_request_id]
104 |
105 | if (len(filtered) == 0):
106 | logging.error('[db] Unable to locate spend request %s', spend_request_id)
107 |
108 | # There should only be one
109 | return filtered[0]
110 |
111 | def add_nonce(self, nonce, spend_request_id):
112 | with open(self.json_file, "r") as f:
113 | data = json.load(f)
114 | # TODO should protect the same nonce being provided again
115 | data['nonces'].append({'spend_request_id': spend_request_id, 'nonce': nonce})
116 | with open(self.json_file, "w") as f:
117 | json.dump(data, f)
118 |
119 | return True
120 |
121 | def get_all_nonces(self, spend_request_id):
122 | with open(self.json_file, "r") as f:
123 | data = json.load(f)
124 |
125 | # TODO check that the wallet ID exists
126 | nonces = [nonce for nonce in data['nonces'] if nonce['spend_request_id'] == spend_request_id]
127 |
128 | return nonces
129 |
130 | def add_partial_signature(self, signature, spend_request_id):
131 | with open(self.json_file, "r") as f:
132 | data = json.load(f)
133 |
134 | # TODO should guard against providing the same signature again
135 | data['signatures'].append({'spend_request_id': spend_request_id, 'signature': signature})
136 |
137 | with open(self.json_file, "w") as f:
138 | json.dump(data, f)
139 |
140 | return True
141 |
142 | def get_all_signatures(self, spend_request_id):
143 | with open(self.json_file, "r") as f:
144 | data = json.load(f)
145 |
146 | # TODO check that the wallet ID exists
147 | signatures = [signature for signature in data['signatures'] if signature['spend_request_id'] == spend_request_id]
148 | return signatures
--------------------------------------------------------------------------------
/src/coordinator/coordinator.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import time
4 |
5 | from colorama import Fore
6 |
7 | from src.utils.nostr_utils import add_relays, construct_and_publish_event, generate_nostr_message, init_relay_manager, read_nsec, read_public_keys
8 | from src.utils.payload import is_valid_json, is_valid_payload, PayloadKeys
9 | from src.coordinator.wallet import add_xpub, create_wallet, is_valid_command, get_address, save_nonce, start_spend, save_signature
10 | from src.coordinator.db import DB
11 |
12 | header = """
13 | ▄████▄ ▒█████ ▒█████ ██▀███ ▓█████▄ ██▓ ███▄ █ ▄▄▄ ▄▄▄█████▓ ▒█████ ██▀███
14 | ▒██▀ ▀█ ▒██▒ ██▒▒██▒ ██▒▓██ ▒ ██▒▒██▀ ██▌▓██▒ ██ ▀█ █ ▒████▄ ▓ ██▒ ▓▒▒██▒ ██▒▓██ ▒ ██▒
15 | ▒▓█ ▄ ▒██░ ██▒▒██░ ██▒▓██ ░▄█ ▒░██ █▌▒██▒▓██ ▀█ ██▒▒██ ▀█▄ ▒ ▓██░ ▒░▒██░ ██▒▓██ ░▄█ ▒
16 | ▒▓▓▄ ▄██▒▒██ ██░▒██ ██░▒██▀▀█▄ ░▓█▄ ▌░██░▓██▒ ▐▌██▒░██▄▄▄▄██░ ▓██▓ ░ ▒██ ██░▒██▀▀█▄
17 | ▒ ▓███▀ ░░ ████▓▒░░ ████▓▒░░██▓ ▒██▒░▒████▓ ░██░▒██░ ▓██░ ▓█ ▓██▒ ▒██▒ ░ ░ ████▓▒░░██▓ ▒██▒
18 | ░ ░▒ ▒ ░░ ▒░▒░▒░ ░ ▒░▒░▒░ ░ ▒▓ ░▒▓░ ▒▒▓ ▒ ░▓ ░ ▒░ ▒ ▒ ▒▒ ▓▒█░ ▒ ░░ ░ ▒░▒░▒░ ░ ▒▓ ░▒▓░
19 | ░ ▒ ░ ▒ ▒░ ░ ▒ ▒░ ░▒ ░ ▒░ ░ ▒ ▒ ▒ ░░ ░░ ░ ▒░ ▒ ▒▒ ░ ░ ░ ▒ ▒░ ░▒ ░ ▒░
20 | ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ▒ ░░ ░
21 | ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
22 | ░ ░
23 | """
24 |
25 | # Map application commands to the corresponding methods
26 | COMMAND_MAP = {
27 | 'address': get_address,
28 | 'nonce': save_nonce,
29 | 'spend': start_spend,
30 | 'sign': save_signature,
31 | 'wallet': create_wallet,
32 | 'xpub': add_xpub
33 | }
34 |
35 | def setup_logging():
36 | logging.basicConfig()
37 | logging.getLogger().setLevel(logging.INFO)
38 | logging.info(Fore.GREEN + header)
39 |
40 | def run():
41 | setup_logging()
42 |
43 | # start up the db
44 | db = DB('src/coordinator/db.json')
45 |
46 | relay_manager = add_relays()
47 | nostr_private_key, nostr_public_key = read_nsec('src/coordinator/nsec.txt')
48 |
49 | # get the public keys for the signers so we can subscribe to messages from them
50 | signer_pks = read_public_keys('src/coordinator/signer_pks.txt')
51 |
52 | init_relay_manager(relay_manager, signer_pks)
53 |
54 | # initialize a timestamp filter
55 | # this will be used to keep track of messages that we have already seen
56 | timestamp_filter = int(time.time())
57 |
58 | while 1:
59 | if (not relay_manager.message_pool.has_events()):
60 | logging.info('No messages! Sleeping ZzZzZzzz...')
61 | time.sleep(1)
62 | continue
63 |
64 | new_event = relay_manager.message_pool.get_event()
65 | event_content = new_event.event.content
66 | # print(f"Message content: {event_content}")
67 | # print(f"From Public key {new_event.event.public_key}")
68 |
69 | #
70 | # Event validation
71 | #
72 | if (not is_valid_json(event_content)):
73 | logging.info('Error with new event! Invalid JSON')
74 | continue
75 |
76 | json_payload = json.loads(event_content)
77 | if (not is_valid_payload(json_payload)):
78 | logging.info('Error with new event! Payload does not have the required keys')
79 | continue
80 |
81 | command = json_payload['command']
82 | if (not is_valid_command):
83 | logging.info('%s is not a valid command!', command)
84 | continue
85 |
86 | # skip if this event is old
87 | event_timestamp = json_payload[PayloadKeys.TIMESTAMP.value]
88 | if (event_timestamp < timestamp_filter):
89 | continue
90 |
91 | #
92 | # Handle the command that's in the event
93 | #
94 | try:
95 | logging.info('[coordinator] Handling command of type: %s', command)
96 | result = COMMAND_MAP[command](json_payload['payload'], db)
97 |
98 | # package the result into a response
99 | ref_id = json_payload[PayloadKeys.REQUEST_ID.value]
100 | response_payload = {}
101 |
102 | if command == "wallet":
103 | response_payload = {
104 | 'wallet_id': result
105 | }
106 | elif command == "address":
107 | response_payload = {
108 | 'address': result[0],
109 | 'cmap': result[1],
110 | 'pubkey_agg': result[2]
111 | }
112 | elif command == "spend":
113 | response_payload = {
114 | 'spend_request_id': result
115 | }
116 | elif command == "nonce":
117 | if result != None:
118 | response_payload = {
119 | 'r_agg': result[0],
120 | 'sig_hash': result[1],
121 | 'negated': result[2],
122 | 'spend_request_id': json_payload['payload']['spend_request_id']
123 | }
124 | elif command == "sign":
125 | if (result != None):
126 | response_payload = {
127 | 'raw_tx': result,
128 | 'spend_request_id': json_payload['payload']['spend_request_id']
129 | }
130 | if result != None:
131 | nostr_response = generate_nostr_message(command=command, ref_id=ref_id, payload=response_payload)
132 | construct_and_publish_event(nostr_response, nostr_private_key, relay_manager)
133 |
134 | except Exception as e:
135 | logging.error('Something went wrong!')
136 | print(e)
137 | pass
138 | # TODO better error handling
139 |
140 | # update the timestamp filter to keep track of messages we have already seen
141 | timestamp_filter = int(time.time())
142 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |

5 |
6 |
7 |
8 |
9 |
10 | Secure your Bitcoin with the Munstrous power of decentralized multi-signature technology
🕸🕯 An open source Musig privacy based wallet 🕯🕸
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | [](https://bitcoin.org) [](https://www.python.org/)
19 |
20 | [](https://github.com/0xBEEFCAF3/munstr/releases/) [](#license) [](https://github.com/0xBEEFCAF3/munstr)
21 |
22 |
23 |
24 |
25 |
26 | ## What is Munstr?
27 | **Munstr** (MuSig + Nostr) is a combination of Schnorr signature based **MuSig** (multisignature) keys in a terminal based wallet using decentralized **Nostr** networks as a communication layer to facilitate a secure and encrypted method of transporting and digitally signing bitcoin transactions in a way that chain analysis cannot identify the nature and setup of the transaction data. To anyone observing the blockchain, Munstr transactions look like single key **Pay-to-Taproot** (P2TR) spends.
28 |
29 | This is facilitated through an interactive, multi-signature (n-of-n) Bitcoin wallet that is designed to enable a group of signers to coordinate an interactive signing session for taproot based outputs that belong to an aggregated public key.
30 |
31 |
32 |
33 |
34 |

35 |
36 |
37 |
38 |
39 |
40 | ## Disclaimer
41 | This software is beta and should not be used for any real funds. Code and authors are subject to change. The maintainers take no responsibility for any lost funds or damages incurred.
42 |
43 |
44 | ## Key Features
45 |
46 | 🌐 **Open source** for anyone to use or to contribute to
47 |
48 | 🔐 **Multisignature** keysets to reduce single key risk
49 |
50 | 🔀 **Encrypted Communications** with Nostr decentralized events
51 |
52 | 💪 **Taproot** supported outputs
53 |
54 |
55 | ## Architecture
56 | There are three major components to Munstr.
57 |
58 | ### Signer
59 | The signer is responsible for using private keys in a multisignature keyset to digitally sign a **partially signed bitcoin transaction** (PSBT).
60 |
61 | ### Nostr
62 | The Nostr decentralized network acts as a transport and communications layer for PSBT data.
63 |
64 | ### Coordinator
65 | Coordinators act as a mediator between digital signers and wallets. The coordinator facilitates digital signatures from each required (n-of-n) key signers and assists in broadcasting the fully signed transaction.
66 |
67 |
68 |

69 |
70 |
71 |
72 |
73 | ## Getting started
74 |
75 | 1. Start virtualenv `python3 -m venv .venv`
76 | 2. `pip3 install -r requirements.txt`
77 |
78 | ### Running the coordinator
79 |
80 | 1. Initialize the persistent storage: Create a `src/coordinator/db.json` file from the provided `db.template.json`.
81 | ```
82 | cp src/coordinator/db.template.json src/coordinator/db.json
83 | ```
84 | 2. Start the coordinator
85 | ```
86 | ./start_coordinator.py
87 | ```
88 |
89 | ### Running a signer
90 |
91 | `./start_signer.py`
92 |
93 |
94 | Possible arguments:
95 |
96 | - `--wallet_id`: The coordinator persists wallets by ID with some associated information. Default is none.
97 | - `--key_seed`: An optional seed to use when initializing the signer's keys. Not recommended for anything other than testing.
98 | - `--nonce_seed`: An optional seed to use when creating nonces. Not recommended for anything other than testing.
99 |
100 | ### Completing an end-to-end test
101 |
102 | 1. Start the coordinator
103 | 2. In a separate terminal window, start a signer (Signer1) and use the `--key_seed` option to set a seed for the key, and the `--nonce_seed` option to set the nonce seed (must be an integer). Example: `./start_signer.py --key_seed=key0 --nonce_seed=256`
104 | 3. Signer1: Execute the "new wallet" command. When prompted to specify a quorum, enter "2". Take note of the wallet ID that is returned.
105 | 4. In a separate terminal window, start a second signer (Signer2) with the `--wallet_id` flag set to the wallet ID that was returned in the previous step. Example: `./start_signer.py --key_seed=key1 --nonce_seed=256 --wallet_id=527f0dee-8b2a-45a1-87c6-98e9b6f642f7`
106 | 5. Have each signer send keys to the coordinator (`send pk` command)
107 | 6. Have each signer get an address from the coordinator (`address`). Confirm that the addresses are the same. This address corresponds to an aggregate pubkey that combines the keys of each signer.
108 | 7. Outside of Munstr, fund the address.
109 | 8. Have one signer initiate a spend by using the `spend` command.
110 | 9. Execute the `sign` command from each of the signers. After all signers have provided nonces, the coordinator will return the aggregate nonce, and the signers will be prompted to provide a partial signature. The coordinator will then aggregate the signatures and provide a raw hex transaction that is ready for broadcast!
111 |
112 | ## Demo
113 | [](https://www.youtube.com/watch?v=9AhzEatrZbg)
114 |
115 | ## Presentation
116 | [Presentation Deck](https://docs.google.com/presentation/d/1UlT6VwL7sNL3wtElnNe2ITDrrGDHLcnl6U_Gsl02dtY/edit?usp=sharing)
117 |
118 | ## Future goals
119 |
120 | - MuSig 2 enhancements
121 | - More accurate transaction fee estimation
122 | - Better nostr encrypted DM support
123 | - Custom nostr relay servers
124 | - Custom nostr PSBT event types
125 | - Node connectivity
126 | - Sovereign TX lookup & broadcast
127 | - Seed Phrases & xpubs
128 | - Hardware Wallet support
129 | - SeedSigner (Taproot incoming)
130 | - Blockstream Jade
131 |
132 |
133 | ## Standing on the shoulders of giants
134 |
135 | In addition to the libraries listed in `requirements.txt`, this project also uses:
136 |
137 | - Code from the Bitcoin Optech Taproot & Schnorr workshop, specifically the Python test framework. The `test_framework` folder has been brought into our project and renamed to `src/bitcoin`.
138 | - [Nostr](https://github.com/nostr-protocol/nostr) by fiatjaf
139 | - [1313 Mockingbird Lane](https://www.dafont.com/1313-mockingbird-lane.font) font by Jeff Bensch
140 | - [Frankenstein Emoji](https://www.pngwing.com/en/free-png-yziyw)
141 | - [Markdown Badges](https://github.com/Ileriayo/markdown-badges) by Ileriayo
142 | - [ANSI FIGlet font: Bloody](https://patorjk.com/software/taag/#p=display&f=Bloody&t=munstr)
143 |
144 |
145 |
146 | ## Resources
147 |
148 | **MuSig**
149 |
150 | - \[blog\] [Taproot and MuSig2 Recap by Elle Mouton](https://ellemouton.com/posts/taproot-prelims/)
151 | - [video] [Tim Ruffing | MuSig 2: Simple Two-Round Schnorr Multi-Signatures](https://youtu.be/DRzDDFetS3E)
152 |
153 | ## TeamMunstr
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 | ## License
167 |
168 | Licensed under the MIT License, Copyright © 2023-present TeamMunstr
169 |
--------------------------------------------------------------------------------
/src/bitcoin/authproxy.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2011 Jeff Garzik
2 | #
3 | # Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
4 | #
5 | # Copyright (c) 2007 Jan-Klaas Kollhof
6 | #
7 | # This file is part of jsonrpc.
8 | #
9 | # jsonrpc is free software; you can redistribute it and/or modify
10 | # it under the terms of the GNU Lesser General Public License as published by
11 | # the Free Software Foundation; either version 2.1 of the License, or
12 | # (at your option) any later version.
13 | #
14 | # This software is distributed in the hope that it will be useful,
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 | # GNU Lesser General Public License for more details.
18 | #
19 | # You should have received a copy of the GNU Lesser General Public License
20 | # along with this software; if not, write to the Free Software
21 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 | """HTTP proxy for opening RPC connection to bitcoind.
23 |
24 | AuthServiceProxy has the following improvements over python-jsonrpc's
25 | ServiceProxy class:
26 |
27 | - HTTP connections persist for the life of the AuthServiceProxy object
28 | (if server supports HTTP/1.1)
29 | - sends protocol 'version', per JSON-RPC 1.1
30 | - sends proper, incrementing 'id'
31 | - sends Basic HTTP authentication headers
32 | - parses all JSON numbers that look like floats as Decimal
33 | - uses standard Python json lib
34 | """
35 |
36 | import base64
37 | import decimal
38 | from http import HTTPStatus
39 | import http.client
40 | import json
41 | import logging
42 | import os
43 | import socket
44 | import time
45 | import urllib.parse
46 |
47 | HTTP_TIMEOUT = 30
48 | USER_AGENT = "AuthServiceProxy/0.1"
49 |
50 | log = logging.getLogger("BitcoinRPC")
51 |
52 | class JSONRPCException(Exception):
53 | def __init__(self, rpc_error, http_status=None):
54 | try:
55 | errmsg = '%(message)s (%(code)i)' % rpc_error
56 | except (KeyError, TypeError):
57 | errmsg = ''
58 | super().__init__(errmsg)
59 | self.error = rpc_error
60 | self.http_status = http_status
61 |
62 |
63 | def EncodeDecimal(o):
64 | if isinstance(o, decimal.Decimal):
65 | return str(o)
66 | raise TypeError(repr(o) + " is not JSON serializable")
67 |
68 | class AuthServiceProxy():
69 | __id_count = 0
70 |
71 | # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
72 | def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
73 | self.__service_url = service_url
74 | self._service_name = service_name
75 | self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
76 | self.__url = urllib.parse.urlparse(service_url)
77 | user = None if self.__url.username is None else self.__url.username.encode('utf8')
78 | passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
79 | authpair = user + b':' + passwd
80 | self.__auth_header = b'Basic ' + base64.b64encode(authpair)
81 | self.timeout = timeout
82 | self._set_conn(connection)
83 |
84 | def __getattr__(self, name):
85 | if name.startswith('__') and name.endswith('__'):
86 | # Python internal stuff
87 | raise AttributeError
88 | if self._service_name is not None:
89 | name = "%s.%s" % (self._service_name, name)
90 | return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
91 |
92 | def _request(self, method, path, postdata):
93 | '''
94 | Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
95 | This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
96 | '''
97 | headers = {'Host': self.__url.hostname,
98 | 'User-Agent': USER_AGENT,
99 | 'Authorization': self.__auth_header,
100 | 'Content-type': 'application/json'}
101 | if os.name == 'nt':
102 | # Windows somehow does not like to re-use connections
103 | # TODO: Find out why the connection would disconnect occasionally and make it reusable on Windows
104 | self._set_conn()
105 | try:
106 | self.__conn.request(method, path, postdata, headers)
107 | return self._get_response()
108 | except http.client.BadStatusLine as e:
109 | if e.line == "''": # if connection was closed, try again
110 | self.__conn.close()
111 | self.__conn.request(method, path, postdata, headers)
112 | return self._get_response()
113 | else:
114 | raise
115 | except (BrokenPipeError, ConnectionResetError):
116 | # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
117 | # ConnectionResetError happens on FreeBSD with Python 3.4
118 | self.__conn.close()
119 | self.__conn.request(method, path, postdata, headers)
120 | return self._get_response()
121 |
122 | def get_request(self, *args, **argsn):
123 | AuthServiceProxy.__id_count += 1
124 |
125 | log.debug("-{}-> {} {}".format(
126 | AuthServiceProxy.__id_count,
127 | self._service_name,
128 | json.dumps(args or argsn, default=EncodeDecimal, ensure_ascii=self.ensure_ascii),
129 | ))
130 | if args and argsn:
131 | raise ValueError('Cannot handle both named and positional arguments')
132 | return {'version': '1.1',
133 | 'method': self._service_name,
134 | 'params': args or argsn,
135 | 'id': AuthServiceProxy.__id_count}
136 |
137 | def __call__(self, *args, **argsn):
138 | postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
139 | response, status = self._request('POST', self.__url.path, postdata.encode('utf-8'))
140 | if response['error'] is not None:
141 | raise JSONRPCException(response['error'], status)
142 | elif 'result' not in response:
143 | raise JSONRPCException({
144 | 'code': -343, 'message': 'missing JSON-RPC result'}, status)
145 | elif status != HTTPStatus.OK:
146 | raise JSONRPCException({
147 | 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
148 | else:
149 | return response['result']
150 |
151 | def batch(self, rpc_call_list):
152 | postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
153 | log.debug("--> " + postdata)
154 | response, status = self._request('POST', self.__url.path, postdata.encode('utf-8'))
155 | if status != HTTPStatus.OK:
156 | raise JSONRPCException({
157 | 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
158 | return response
159 |
160 | def _get_response(self):
161 | req_start_time = time.time()
162 | try:
163 | http_response = self.__conn.getresponse()
164 | except socket.timeout:
165 | raise JSONRPCException({
166 | 'code': -344,
167 | 'message': '%r RPC took longer than %f seconds. Consider '
168 | 'using larger timeout for calls that take '
169 | 'longer to return.' % (self._service_name,
170 | self.__conn.timeout)})
171 | if http_response is None:
172 | raise JSONRPCException({
173 | 'code': -342, 'message': 'missing HTTP response from server'})
174 |
175 | content_type = http_response.getheader('Content-Type')
176 | if content_type != 'application/json':
177 | raise JSONRPCException(
178 | {'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)},
179 | http_response.status)
180 |
181 | responsedata = http_response.read().decode('utf8')
182 | response = json.loads(responsedata, parse_float=decimal.Decimal)
183 | elapsed = time.time() - req_start_time
184 | if "error" in response and response["error"] is None:
185 | log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
186 | else:
187 | log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
188 | return response, http_response.status
189 |
190 | def __truediv__(self, relative_uri):
191 | return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
192 |
193 | def _set_conn(self, connection=None):
194 | port = 80 if self.__url.port is None else self.__url.port
195 | if connection:
196 | self.__conn = connection
197 | self.timeout = connection.timeout
198 | elif self.__url.scheme == 'https':
199 | self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=self.timeout)
200 | else:
201 | self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=self.timeout)
202 |
--------------------------------------------------------------------------------
/src/coordinator/wallet.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import uuid
3 | from bip32 import BIP32
4 |
5 | from src.bitcoin.musig import generate_musig_key, aggregate_schnorr_nonces, aggregate_musig_signatures
6 | from src.bitcoin.address import program_to_witness
7 | from src.bitcoin.key import ECPubKey
8 | from src.bitcoin.messages import CTransaction, CTxIn, COutPoint, CTxOut, CScriptWitness, CTxInWitness
9 | from src.bitcoin.script import TaprootSignatureHash, SIGHASH_ALL_TAPROOT
10 | from src.coordinator.mempool_space_client import broadcast_transaction, get_transaction
11 |
12 | # Using Peter Todd's python-bitcoinlib (https://github.com/petertodd/python-bitcoinlib)
13 | from bitcoin.wallet import CBitcoinAddress
14 | import bitcoin
15 |
16 | # can abstract this network selection bit out to a properties/config file
17 | bitcoin.SelectParams('testnet')
18 |
19 | COMMANDS = ['address', 'nonce','spend', 'wallet', 'xpub']
20 |
21 | # in memory cache of transactions that are in the process of being spent
22 | # keys are aggregate nonces and values are spending transactions
23 | spending_txs = {}
24 |
25 | def is_valid_command(command: str):
26 | return command in COMMANDS
27 |
28 | def create_spending_transaction(txid, outputIndex, destination_addr, amount_sat, version=1, nSequence=0):
29 | """Construct a CTransaction object that spends the first ouput from txid."""
30 | # Construct transaction
31 | spending_tx = CTransaction()
32 | # Populate the transaction version
33 | spending_tx.nVersion = version
34 | # Populate the locktime
35 | spending_tx.nLockTime = 0
36 |
37 | # Populate the transaction inputs
38 | outpoint = COutPoint(int(txid, 16), outputIndex)
39 | spending_tx_in = CTxIn(outpoint=outpoint, nSequence=nSequence)
40 | spending_tx.vin = [spending_tx_in]
41 |
42 | script_pubkey = CBitcoinAddress(destination_addr).to_scriptPubKey()
43 | dest_output = CTxOut(nValue=amount_sat, scriptPubKey=script_pubkey)
44 | spending_tx.vout = [dest_output]
45 |
46 | return (spending_tx, script_pubkey)
47 |
48 |
49 | def create_wallet(payload: dict, db):
50 | if (not 'quorum' in payload):
51 | raise Exception("[wallet] Cannot create a wallet without the 'quorum' property")
52 |
53 | quorum = payload['quorum']
54 | if (quorum < 1):
55 | raise Exception("[wallet] Quorum must be greater than 1")
56 |
57 | new_wallet_id = str(uuid.uuid4())
58 |
59 | if (db.add_wallet(new_wallet_id, quorum)):
60 | logging.info("[wallet] Saved new wallet ID %s to the database", new_wallet_id)
61 | return new_wallet_id
62 |
63 | # Get a wallet ID from the provided payload. Throw an error if it is missing.
64 | def get_wallet_id(payload: dict):
65 | if (not 'wallet_id' in payload):
66 | raise Exception("[wallet] 'wallet_id' property is missing")
67 |
68 | wallet_id = payload['wallet_id']
69 | return wallet_id
70 |
71 | # Get the spend request ID from the provided payload. Throw an error if it is missing
72 | def get_spend_request_id(payload: dict):
73 | if (not 'spend_request_id' in payload):
74 | raise Exception("[wallet] 'spend_request_id' property is missing")
75 |
76 | spend_request_id = payload['spend_request_id']
77 | return spend_request_id
78 |
79 | def add_xpub(payload: dict, db):
80 | if (not 'xpub' in payload):
81 | raise Exception("[wallet] Cannot add an xpub without the 'xpub' property")
82 |
83 | xpub = payload['xpub']
84 | wallet_id = get_wallet_id(payload)
85 |
86 | if (db.add_xpub(wallet_id, xpub)):
87 | logging.info('[wallet] Added xpub to wallet %s', wallet_id)
88 |
89 | def get_address(payload: dict, db):
90 | index = payload['index']
91 | wallet_id = get_wallet_id(payload)
92 | ec_public_keys = []
93 |
94 | # wallet = db.get_wallet(wallet_id)
95 | wallet_xpubs = db.get_xpubs(wallet_id)
96 |
97 | if (wallet_xpubs == []):
98 | raise Exception('[wallet] No xpubs to create an address from!')
99 |
100 | for xpub in wallet_xpubs:
101 | # The method to generate and aggregate MuSig key expects ECPubKey objects
102 | ec_public_key = ECPubKey()
103 |
104 | # TODO xpubs aren't working quite right. Using regular public keys for now.
105 | # bip32_node = BIP32.from_xpub(xpub['xpub'])
106 | # public_key = bip32_node.get_pubkey_from_path(f"m/{index}")
107 | #e c_public_key.set(public_key)
108 |
109 | ec_public_key.set(bytes.fromhex(xpub['xpub']))
110 | ec_public_keys.append(ec_public_key)
111 |
112 | c_map, pubkey_agg = generate_musig_key(ec_public_keys)
113 | logging.info('[wallet] Aggregate public key: %s', pubkey_agg.get_bytes().hex())
114 |
115 | # Create a segwit v1 address (P2TR) from the aggregate key
116 | p2tr_address = program_to_witness(0x01, pubkey_agg.get_bytes())
117 | logging.info('[wallet] Returning P2TR address %s', p2tr_address)
118 |
119 | # convert the challenges/coefficients to hex so they can be returned to the signer
120 | c_map_hex = {}
121 | for key, value in c_map.items():
122 | # k is the hex encoded pubkey, the value is the challenge/coefficient
123 | k = key.get_bytes().hex()
124 | c_map_hex[k] = value.hex()
125 |
126 | return [p2tr_address, c_map_hex, pubkey_agg.get_bytes().hex()]
127 |
128 | # Initiates a spending transaction
129 | def start_spend(payload: dict, db):
130 | # create an ID for this request
131 | spend_request_id = str(uuid.uuid4())
132 | logging.info('[wallet] Starting spend request with id %s', spend_request_id)
133 |
134 |
135 | if (not 'txid' in payload):
136 | raise Exception("[wallet] Cannot spend without the 'txid' property, which corresponds to the transaction ID of the output that is being spent")
137 |
138 | if (not 'output_index' in payload):
139 | raise Exception("[wallet] Cannot spend without the 'output_index' property, which corresponds to the index of the oputput that is being spent")
140 |
141 | if (not 'new_address' in payload):
142 | raise Exception("[wallet] Cannot spend without the 'new_address' property, which corresponds to the destination address of the transaction")
143 |
144 | if (not 'value' in payload):
145 | raise Exception("[wallet] Cannot spend without the 'value' property, which corresponds to the value (in satoshis) of the output that is being spent")
146 |
147 | txid = payload['txid']
148 | output_index = payload['output_index']
149 | destination_address = payload['new_address']
150 | wallet_id = get_wallet_id(payload)
151 |
152 | # 10% of fees will go to miners. Can have better fee support in the future
153 | output_amount = int(payload['value'] * 0.9)
154 |
155 | # Use mempool.space to look up the scriptpubkey for the output being spent
156 | # Could probably find a library to do this so we don't have to make any external calls
157 | tx = get_transaction(txid)
158 | input_script_pub_key = tx['vout'][output_index]['scriptpubkey']
159 | input_value_sats = tx['vout'][output_index]['value']
160 |
161 | # Persist to the db so other signers can easily retrieve this information
162 | if (db.add_spend_request(txid,
163 | output_index,
164 | input_script_pub_key,
165 | input_value_sats,
166 | spend_request_id,
167 | destination_address,
168 | output_amount,
169 | wallet_id)):
170 | logging.info('[wallet] Saved spend request %s to the database', spend_request_id)
171 |
172 | return spend_request_id
173 |
174 | def save_nonce(payload: dict, db):
175 | if (not 'nonce' in payload):
176 | raise Exception("[wallet] Cannot save a nonce without the 'nonce' property")
177 |
178 | nonce = payload['nonce']
179 | spend_request_id = get_spend_request_id(payload)
180 | spend_request = db.get_spend_request(spend_request_id)
181 | wallet_id = spend_request['wallet_id']
182 | if (spend_request is None):
183 | logging.error('[wallet] Cannot find spend request %s in the database', spend_request_id)
184 |
185 |
186 | logging.info('[wallet] Saving nonce for request id %s', spend_request_id)
187 |
188 | wallet = db.get_wallet(wallet_id)
189 |
190 | # Save the nonce to the db
191 | db.add_nonce(nonce, spend_request_id)
192 |
193 | logging.info('[wallet] Successfully saved nonce for request id %s', spend_request_id)
194 |
195 | # When the last signer provides a nonce, we can return the aggregate nonce (R_AGG)
196 | nonces = db.get_all_nonces(spend_request_id)
197 |
198 | if (len(nonces) != wallet['quorum']):
199 | return None
200 |
201 | # Generate nonce points
202 | nonce_points = [ECPubKey().set(bytes.fromhex(nonce['nonce'])) for nonce in nonces]
203 | # R_agg is the aggregate nonce, negated is if the private key was negated to produce
204 | # an even y coordinate
205 | R_agg, negated = aggregate_schnorr_nonces(nonce_points)
206 |
207 | (spending_tx, _script_pub_key) = create_spending_transaction(spend_request['txid'],
208 | spend_request['output_index'],
209 | spend_request['new_address'],
210 | spend_request['value'])
211 |
212 | # Create a sighash for ALL (0x00)
213 | sighash_musig = TaprootSignatureHash(spending_tx, [{'n': spend_request['output_index'], 'nValue': spend_request['prev_value_sats'], 'scriptPubKey': bytes.fromhex(spend_request['prev_script_pubkey'])}], SIGHASH_ALL_TAPROOT)
214 | print(sighash_musig)
215 |
216 | # Update cache
217 | spending_txs[R_agg] = spending_tx
218 |
219 | # Encode everything as hex before returning
220 | return (R_agg.get_bytes().hex(), sighash_musig.hex(), negated)
221 |
222 | def save_signature(payload, db):
223 | if (not 'signature' in payload):
224 | raise Exception("[wallet] Cannot save a signature without the 'signature' property")
225 |
226 | signature = payload['signature']
227 | spend_request_id = get_spend_request_id(payload)
228 |
229 | logging.info("[wallet] Recieved partial signature for spend request %s", spend_request_id)
230 |
231 | db.add_partial_signature(signature, spend_request_id)
232 |
233 | spend_request = db.get_spend_request(spend_request_id)
234 | wallet_id = spend_request['wallet_id']
235 | wallet = db.get_wallet(wallet_id)
236 |
237 | sigs = db.get_all_signatures(spend_request_id)
238 | sigs = [sig['signature'] for sig in sigs]
239 |
240 | nonces = db.get_all_nonces(spend_request_id)
241 | quorum = wallet['quorum']
242 | if len(nonces) != quorum or len(sigs) != quorum:
243 | logging.error('[wallet] Number of nonces and signatures does not match expected quorum of %d', quorum)
244 | return None
245 |
246 | nonce_points = [ECPubKey().set(bytes.fromhex(nonce['nonce'])) for nonce in nonces]
247 |
248 | # Aggregate keys and signatures
249 | R_agg, negated = aggregate_schnorr_nonces(nonce_points)
250 |
251 | # Retrieve the current transaction from the cache
252 | spending_tx = spending_txs[R_agg]
253 |
254 | # The aggregate signature
255 | tx_sig_agg = aggregate_musig_signatures(sigs, R_agg)
256 |
257 | # Add the aggregate signature to the witness stack
258 | witness_stack = CScriptWitness()
259 | witness_stack.stack.append(tx_sig_agg)
260 |
261 | # Add the witness to the transaction
262 | spending_tx.wit.vtxinwit.append(CTxInWitness(witness_stack))
263 |
264 | tx_serialized_hex = spending_tx.serialize().hex()
265 | logging.info("[wallet] Serialized transaction hex, ready for broadcast: %s", tx_serialized_hex)
266 |
267 | # Uncomment to broadcast tx
268 | # txid = broadcast_transaction(raw_tx)
269 | # print("TXID", txid)
270 |
271 | return tx_serialized_hex
272 |
273 |
274 |
--------------------------------------------------------------------------------
/src/signer/signer.py:
--------------------------------------------------------------------------------
1 | from colorama import Fore
2 |
3 | from src.signer.wallet import Wallet
4 | from src.utils.nostr_utils import generate_nostr_message, add_relays, construct_and_publish_event, init_relay_manager, read_nsec, read_public_keys
5 | from src.utils.payload import is_valid_json, is_valid_payload
6 |
7 | from enum import Enum
8 |
9 | import json
10 | import ssl
11 | import time
12 | import uuid
13 | import logging
14 |
15 |
16 | header = """
17 | ██████ ██▓ ▄████ ███▄ █ ▓█████ ██▀███
18 | ▒██ ▒ ▓██▒ ██▒ ▀█▒ ██ ▀█ █ ▓█ ▀ ▓██ ▒ ██▒
19 | ░ ▓██▄ ▒██▒▒██░▄▄▄░▓██ ▀█ ██▒▒███ ▓██ ░▄█ ▒
20 | ▒ ██▒░██░░▓█ ██▓▓██▒ ▐▌██▒▒▓█ ▄ ▒██▀▀█▄
21 | ▒██████▒▒░██░░▒▓███▀▒▒██░ ▓██░░▒████▒░██▓ ▒██▒
22 | ▒ ▒▓▒ ▒ ░░▓ ░▒ ▒ ░ ▒░ ▒ ▒ ░░ ▒░ ░░ ▒▓ ░▒▓░
23 | ░ ░▒ ░ ░ ▒ ░ ░ ░ ░ ░░ ░ ▒░ ░ ░ ░ ░▒ ░ ▒░
24 | ░ ░ ░ ▒ ░░ ░ ░ ░ ░ ░ ░ ░░ ░
25 | ░ ░ ░ ░ ░ ░ ░
26 | """
27 |
28 | CORDINATOR_TIMEOUT = 5 # seconds
29 |
30 | class SignerCommands(Enum):
31 | GENERATE_ADDRESS = 'address'
32 | NEW_WALLET = 'new wallet'
33 | SEND_PUBLIC_KEY = 'send pk'
34 | SIGN = 'sign'
35 | SPEND = 'spend'
36 |
37 | def read_cordinator_messages(relay_manager, private_key, time_stamp_filter=None):
38 | payloads = []
39 | while relay_manager.message_pool.has_events():
40 | event_msg = relay_manager.message_pool.get_event()
41 | # Uncomment in debug
42 | # print(f"Message content: {event_msg.event.content}")
43 | # print(f"From Public key {event_msg.event.public_key}")
44 | if not is_valid_json(event_msg.event.content):
45 | continue
46 |
47 | json_payload = json.loads(event_msg.event.content)
48 | if not is_valid_payload(json_payload):
49 | continue
50 |
51 | if time_stamp_filter != None and json_payload['ts'] < time_stamp_filter:
52 | continue
53 |
54 | payloads.append(json_payload)
55 | return payloads
56 |
57 | def wait_for_coordinator(relay_manager, nostr_private_key, time_stamp, command, req_id):
58 | # Wait for a bit, ideally this is a exponentially backoff waiting period where we timeout after n tries
59 | time.sleep(CORDINATOR_TIMEOUT)
60 | payloads = read_cordinator_messages(
61 | relay_manager, nostr_private_key, time_stamp_filter=time_stamp)
62 |
63 | filtered_payloads = [payload for payload in payloads if payload['command']
64 | == command and payload['ref_id'] == req_id]
65 |
66 | if len(filtered_payloads) == 0:
67 | logging.info('Coordinator did not respond to %s command (request ID: %s)', command, req_id)
68 | return None
69 |
70 | return filtered_payloads[0]
71 |
72 |
73 | def handle_create_wallet(quorum, relay_manager, private_key):
74 | time_stamp = int(time.time())
75 | req_id = str(uuid.uuid4())
76 | new_wallet_payload = generate_nostr_message(
77 | command='wallet', req_id=req_id, payload={'quorum': quorum})
78 | construct_and_publish_event(new_wallet_payload, private_key, relay_manager)
79 |
80 | logging.info('Nostr payload sent to coordinator, awaiting response')
81 | coordinator_response = wait_for_coordinator(relay_manager, private_key, time_stamp, "wallet", req_id)
82 |
83 | if coordinator_response == None:
84 | print('Coordinator did not respond to %s command', SignerCommands.NEW_WALLET.value)
85 | return None
86 |
87 | new_wallet_id = coordinator_response['payload']['wallet_id']
88 | logging.info("Wallet created with ID: %s", new_wallet_id)
89 |
90 | return new_wallet_id
91 |
92 | # NOTE creating an xpub does not expect a response
93 | def handle_create_xpub(wallet, relay_manager, private_key):
94 | xpub = wallet.get_root_xpub()
95 | add_xpub_payload = generate_nostr_message(
96 | command='xpub', payload={'wallet_id': wallet.get_wallet_id(), 'xpub': wallet.get_pubkey()})
97 | construct_and_publish_event(add_xpub_payload, private_key, relay_manager)
98 | print("Operation Finished")
99 |
100 |
101 | def handle_get_address(wallet, index, relay_manager, private_key):
102 | time_stamp = int(time.time())
103 | req_id = str(uuid.uuid4())
104 | get_address_paylaod = generate_nostr_message(command='address', req_id=req_id, payload={
105 | 'wallet_id': wallet.get_wallet_id(), 'index': index})
106 | construct_and_publish_event(
107 | get_address_paylaod, private_key, relay_manager)
108 |
109 | logging.info('Nostr payload sent to coordinator, awaiting response')
110 | coordinator_response = wait_for_coordinator(relay_manager, private_key, time_stamp, "address", req_id)
111 |
112 | if coordinator_response == None:
113 | print('Coordinator did not respond to %s command', SignerCommands.GENERATE_ADDRESS.value)
114 | return None
115 |
116 | new_address = coordinator_response['payload']
117 | return new_address
118 |
119 |
120 | def handle_spend(outpoint, new_address, value, wallet, relay_manager, private_key):
121 | time_stamp = int(time.time())
122 | req_id = str(uuid.uuid4())
123 | start_spend_payload = generate_nostr_message(command='spend', req_id=req_id, payload={'wallet_id': wallet.get_wallet_id(
124 | ), 'txid': outpoint[0], 'output_index': outpoint[1], 'new_address': new_address, 'value': value})
125 | construct_and_publish_event(
126 | start_spend_payload, private_key, relay_manager)
127 |
128 | logging.info('Nostr payload sent to coordinator, awaiting response')
129 | coordinator_response = wait_for_coordinator(relay_manager, private_key, time_stamp, "spend", req_id)
130 |
131 | if coordinator_response == None:
132 | print('Coordinator did not respond to %s command', SignerCommands.SPEND.value)
133 | return None
134 |
135 | spend_request_id = coordinator_response['payload']['spend_request_id']
136 | return spend_request_id
137 |
138 |
139 | def handle_sign_tx(spend_request_id, wallet, relay_manager, private_key):
140 | time_stamp = int(time.time())
141 | req_id = str(uuid.uuid4())
142 | nonce = wallet.get_new_nonce().get_bytes().hex()
143 | nonce_payload = generate_nostr_message(command='nonce', req_id=req_id, payload={
144 | 'spend_request_id': spend_request_id, 'nonce': nonce})
145 | construct_and_publish_event(
146 | nonce_payload, private_key, relay_manager)
147 |
148 | logging.info('Nonce Send! Awaiting response...')
149 | # Wait for a bit, ideally this is a expoentially backoff waiting period where we timeout after n tries
150 | time.sleep(CORDINATOR_TIMEOUT)
151 | # Here we want to wait for other signers to provide their nonces
152 | # Also assuming the cordinator will not send other types of messages
153 | nonce_response = None
154 | while 1:
155 | if not relay_manager.message_pool.has_events():
156 | logging.info("Waiting for other signers to send nonce.. ")
157 | time.sleep(5)
158 |
159 | payloads = read_cordinator_messages(
160 | relay_manager, private_key, time_stamp_filter=time_stamp)
161 | print(payloads)
162 | filtered_payloads = [payload for payload in payloads if payload['command']
163 | == "nonce" and 'spend_request_id' in payload['payload'] and payload['payload']['spend_request_id'] == spend_request_id]
164 | logging.info(filtered_payloads)
165 | if len(filtered_payloads) == 0:
166 | logging.info('Cordinator did not respond to nonce command')
167 | continue
168 | # Server shouldnt respond with > 1 nonce notes
169 | nonce_response = filtered_payloads[0]
170 | break
171 | if nonce_response == None:
172 | logging.info('Cordinator did not respond to nonce command ')
173 | return None
174 | # At this point all signers provided nonces so we should have a agg_nonce and a sighash
175 | r_agg = nonce_response['payload']['r_agg']
176 | sig_hash = nonce_response['payload']['sig_hash']
177 | should_negate_nonce = nonce_response['payload']['negated']
178 |
179 | wallet.set_r_agg(r_agg)
180 | wallet.set_sig_hash(sig_hash)
181 | wallet.set_should_negate_nonce(should_negate_nonce)
182 |
183 | partial_signature = wallet.sign_with_current_context(nonce)
184 | logging.info(f"Providing partial signatuire: {partial_signature}")
185 |
186 | #Provide cordinator with partial sig
187 | nonce_payload = generate_nostr_message(command='sign', req_id=req_id, payload={
188 | 'spend_request_id': spend_request_id, 'signature': partial_signature})
189 | construct_and_publish_event(
190 | nonce_payload, private_key, relay_manager)
191 |
192 | logging.info('Nostr payload sent to cordinator, awaiting response')
193 | # Wait for a bit, ideally this is a expoentially backoff waiting period where we timeout after n tries
194 | time.sleep(CORDINATOR_TIMEOUT)
195 | sign_response = None
196 | # Here we want to wait for other signers to provide their nonces
197 | while 1:
198 | if not relay_manager.message_pool.has_events():
199 | logging.info("Waiting for other signers to send signatures... ")
200 | time.sleep(5)
201 |
202 | payloads = read_cordinator_messages(
203 | relay_manager, private_key, time_stamp_filter=time_stamp)
204 | filtered_payloads = [payload for payload in payloads if payload['command']
205 | == "sign" and 'spend_request_id' in payload['payload'] and payload['payload']['spend_request_id'] == spend_request_id]
206 |
207 | if len(filtered_payloads) == 0:
208 | logging.info('Cordinator did not respond to sign command')
209 | continue
210 | # Server shouldnt respond with > 1 notes
211 | sign_response = filtered_payloads[0]
212 | break
213 |
214 | if sign_response == None:
215 | logging.info('Cordinator did not respond to spend command')
216 | return None
217 | raw_tx = sign_response['payload']['raw_tx']
218 | logging.info(f"Got rawtx from cordinator {raw_tx}")
219 | return raw_tx
220 |
221 | def setup_logging():
222 | logging.basicConfig()
223 | logging.getLogger().setLevel(logging.INFO)
224 | logging.info(Fore.RED + header)
225 |
226 | def run_signer(wallet_id=None, key_pair_seed=None, nonce_seed=None):
227 | setup_logging()
228 |
229 | relay_manager = add_relays()
230 | nostr_private_key, nostr_public_key = read_nsec('src/signer/nsec.txt')
231 |
232 | # get the public keys for the coordinator so we can subscribe to messages from them
233 | cordinator_pk = read_public_keys('src/signer/coordinator_pk.txt')[0]
234 |
235 | init_relay_manager(relay_manager, [cordinator_pk])
236 | logging.info('Relay manager started')
237 |
238 | # Load the wallet if an ID was provided
239 | wallet = None
240 | if wallet_id != None:
241 | wallet = Wallet(wallet_id=wallet_id, key_pair_seed=key_pair_seed, nonce_seed=nonce_seed)
242 |
243 | while True:
244 | user_input = input("Enter a command: ")
245 |
246 | if user_input.lower() == SignerCommands.NEW_WALLET.value:
247 | quorum = int(input("Enter a quorum: "))
248 | logging.info(f"Creating a new wallet with {quorum} signers ...")
249 | wallet_id = handle_create_wallet(
250 | quorum, relay_manager, nostr_private_key)
251 | # TODO: nonce_seed is currently required
252 | wallet = Wallet(wallet_id, key_pair_seed=key_pair_seed, nonce_seed=nonce_seed)
253 |
254 | elif user_input.lower() == SignerCommands.SEND_PUBLIC_KEY.value:
255 | logging.info("Generating and posting the public key...")
256 | handle_create_xpub(wallet, relay_manager, nostr_private_key)
257 |
258 | elif user_input.lower() == SignerCommands.GENERATE_ADDRESS.value:
259 | # TODO bug: you cannot sign or spend with out getting an address first
260 | logging.info("Generating a new address...")
261 | address_payload = handle_get_address(
262 | wallet, 0, relay_manager, nostr_private_key)
263 |
264 | wallet.set_cmap(address_payload['cmap'])
265 | wallet.set_pubkey_agg(address_payload['pubkey_agg'])
266 | logging.info(f"Got address {address_payload['address']}")
267 |
268 | elif user_input.lower() == SignerCommands.SPEND.value:
269 | logging.info("Preparing to spend funds...")
270 | txid = input("Enter previous tx id: ")
271 | index = int(input("Enter output index: "))
272 | new_address = input("Destination address (where would you like to send funds to?): ")
273 | sats = int(input("Amount in satoshis (how much are we spending?): "))
274 |
275 | spend_request_id = handle_spend(
276 | [txid, index], new_address, sats, wallet, relay_manager, nostr_private_key)
277 | wallet.set_current_spend_request_id(spend_request_id)
278 | logging.info(
279 | f'Your spend request id {spend_request_id}, next provide nonces and signatures!!')
280 |
281 | elif user_input.lower() == SignerCommands.SIGN.value:
282 | spend_request_id = input("Provide a spend request id: ")
283 | spend_request_id = handle_sign_tx(
284 | spend_request_id, wallet, relay_manager, nostr_private_key)
285 |
286 | else:
287 | possible_commands = [c.value for c in SignerCommands]
288 | logging.info("Invalid command. Please enter one of the following: %s", possible_commands)
289 |
--------------------------------------------------------------------------------
/src/bitcoin/util.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) 2014-2019 The Bitcoin Core developers
3 | # Distributed under the MIT software license, see the accompanying
4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 | """Helpful routines for regression testing."""
6 |
7 | from base64 import b64encode
8 | from binascii import unhexlify
9 | from decimal import Decimal, ROUND_DOWN
10 | import inspect
11 | import json
12 | import logging
13 | import os
14 | import random
15 | import re
16 | from subprocess import CalledProcessError
17 | import time
18 |
19 | from . import coverage
20 | from .authproxy import AuthServiceProxy, JSONRPCException
21 | from io import BytesIO
22 |
23 | logger = logging.getLogger("TestFramework.utils")
24 |
25 | # Assert functions
26 | ##################
27 |
28 | def assert_fee_amount(fee, tx_size, fee_per_kB):
29 | """Assert the fee was in range"""
30 | target_fee = round(tx_size * fee_per_kB / 1000, 8)
31 | if fee < target_fee:
32 | raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
33 | # allow the wallet's estimation to be at most 2 bytes off
34 | if fee > (tx_size + 2) * fee_per_kB / 1000:
35 | raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
36 |
37 | def assert_equal(thing1, thing2, *args):
38 | if thing1 != thing2 or any(thing1 != arg for arg in args):
39 | raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
40 |
41 | def assert_greater_than(thing1, thing2):
42 | if thing1 <= thing2:
43 | raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
44 |
45 | def assert_greater_than_or_equal(thing1, thing2):
46 | if thing1 < thing2:
47 | raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
48 |
49 | def assert_raises(exc, fun, *args, **kwds):
50 | assert_raises_message(exc, None, fun, *args, **kwds)
51 |
52 | def assert_raises_message(exc, message, fun, *args, **kwds):
53 | try:
54 | fun(*args, **kwds)
55 | except JSONRPCException:
56 | raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
57 | except exc as e:
58 | if message is not None and message not in e.error['message']:
59 | raise AssertionError("Expected substring not found:" + e.error['message'])
60 | except Exception as e:
61 | raise AssertionError("Unexpected exception raised: " + type(e).__name__)
62 | else:
63 | raise AssertionError("No exception raised")
64 |
65 | def assert_raises_process_error(returncode, output, fun, *args, **kwds):
66 | """Execute a process and asserts the process return code and output.
67 |
68 | Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
69 | and verifies that the return code and output are as expected. Throws AssertionError if
70 | no CalledProcessError was raised or if the return code and output are not as expected.
71 |
72 | Args:
73 | returncode (int): the process return code.
74 | output (string): [a substring of] the process output.
75 | fun (function): the function to call. This should execute a process.
76 | args*: positional arguments for the function.
77 | kwds**: named arguments for the function.
78 | """
79 | try:
80 | fun(*args, **kwds)
81 | except CalledProcessError as e:
82 | if returncode != e.returncode:
83 | raise AssertionError("Unexpected returncode %i" % e.returncode)
84 | if output not in e.output:
85 | raise AssertionError("Expected substring not found:" + e.output)
86 | else:
87 | raise AssertionError("No exception raised")
88 |
89 | def assert_raises_rpc_error(code, message, fun, *args, **kwds):
90 | """Run an RPC and verify that a specific JSONRPC exception code and message is raised.
91 |
92 | Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
93 | and verifies that the error code and message are as expected. Throws AssertionError if
94 | no JSONRPCException was raised or if the error code/message are not as expected.
95 |
96 | Args:
97 | code (int), optional: the error code returned by the RPC call (defined
98 | in src/rpc/protocol.h). Set to None if checking the error code is not required.
99 | message (string), optional: [a substring of] the error string returned by the
100 | RPC call. Set to None if checking the error string is not required.
101 | fun (function): the function to call. This should be the name of an RPC.
102 | args*: positional arguments for the function.
103 | kwds**: named arguments for the function.
104 | """
105 | assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
106 |
107 | def try_rpc(code, message, fun, *args, **kwds):
108 | """Tries to run an rpc command.
109 |
110 | Test against error code and message if the rpc fails.
111 | Returns whether a JSONRPCException was raised."""
112 | try:
113 | fun(*args, **kwds)
114 | except JSONRPCException as e:
115 | # JSONRPCException was thrown as expected. Check the code and message values are correct.
116 | if (code is not None) and (code != e.error["code"]):
117 | raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
118 | if (message is not None) and (message not in e.error['message']):
119 | raise AssertionError("Expected substring not found:" + e.error['message'])
120 | return True
121 | except Exception as e:
122 | raise AssertionError("Unexpected exception raised: " + type(e).__name__)
123 | else:
124 | return False
125 |
126 | def assert_is_hex_string(string):
127 | try:
128 | int(string, 16)
129 | except Exception as e:
130 | raise AssertionError(
131 | "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
132 |
133 | def assert_is_hash_string(string, length=64):
134 | if not isinstance(string, str):
135 | raise AssertionError("Expected a string, got type %r" % type(string))
136 | elif length and len(string) != length:
137 | raise AssertionError(
138 | "String of length %d expected; got %d" % (length, len(string)))
139 | elif not re.match('[abcdef0-9]+$', string):
140 | raise AssertionError(
141 | "String %r contains invalid characters for a hash." % string)
142 |
143 | def assert_array_result(object_array, to_match, expected, should_not_find=False):
144 | """
145 | Pass in array of JSON objects, a dictionary with key/value pairs
146 | to match against, and another dictionary with expected key/value
147 | pairs.
148 | If the should_not_find flag is true, to_match should not be found
149 | in object_array
150 | """
151 | if should_not_find:
152 | assert_equal(expected, {})
153 | num_matched = 0
154 | for item in object_array:
155 | all_match = True
156 | for key, value in to_match.items():
157 | if item[key] != value:
158 | all_match = False
159 | if not all_match:
160 | continue
161 | elif should_not_find:
162 | num_matched = num_matched + 1
163 | for key, value in expected.items():
164 | if item[key] != value:
165 | raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
166 | num_matched = num_matched + 1
167 | if num_matched == 0 and not should_not_find:
168 | raise AssertionError("No objects matched %s" % (str(to_match)))
169 | if num_matched > 0 and should_not_find:
170 | raise AssertionError("Objects were found %s" % (str(to_match)))
171 |
172 | # Utility functions
173 | ###################
174 |
175 | def check_json_precision():
176 | """Make sure json library being used does not lose precision converting BTC values"""
177 | n = Decimal("20000000.00000003")
178 | satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
179 | if satoshis != 2000000000000003:
180 | raise RuntimeError("JSON encode/decode loses precision")
181 |
182 | def count_bytes(hex_string):
183 | return len(bytearray.fromhex(hex_string))
184 |
185 |
186 | def hex_str_to_bytes(hex_str):
187 | return unhexlify(hex_str.encode('ascii'))
188 |
189 | def str_to_b64str(string):
190 | return b64encode(string.encode('utf-8')).decode('ascii')
191 |
192 | def satoshi_round(amount):
193 | return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
194 |
195 | def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
196 | if attempts == float('inf') and timeout == float('inf'):
197 | timeout = 60
198 | attempt = 0
199 | time_end = time.time() + timeout
200 |
201 | while attempt < attempts and time.time() < time_end:
202 | if lock:
203 | with lock:
204 | if predicate():
205 | return
206 | else:
207 | if predicate():
208 | return
209 | attempt += 1
210 | time.sleep(0.05)
211 |
212 | # Print the cause of the timeout
213 | predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
214 | logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
215 | if attempt >= attempts:
216 | raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
217 | elif time.time() >= time_end:
218 | raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
219 | raise RuntimeError('Unreachable')
220 |
221 | # RPC/P2P connection constants and functions
222 | ############################################
223 |
224 | # The maximum number of nodes a single test can spawn
225 | MAX_NODES = 12
226 | # Don't assign rpc or p2p ports lower than this
227 | PORT_MIN = 11000
228 | # The number of ports to "reserve" for p2p and rpc, each
229 | PORT_RANGE = 5000
230 |
231 | class PortSeed:
232 | # Must be initialized with a unique integer for each process
233 | n = None
234 |
235 | def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
236 | """
237 | Args:
238 | url (str): URL of the RPC server to call
239 | node_number (int): the node number (or id) that this calls to
240 |
241 | Kwargs:
242 | timeout (int): HTTP timeout in seconds
243 |
244 | Returns:
245 | AuthServiceProxy. convenience object for making RPC calls.
246 |
247 | """
248 | proxy_kwargs = {}
249 | if timeout is not None:
250 | proxy_kwargs['timeout'] = timeout
251 |
252 | proxy = AuthServiceProxy(url, **proxy_kwargs)
253 | proxy.url = url # store URL on proxy for info
254 |
255 | coverage_logfile = coverage.get_filename(
256 | coveragedir, node_number) if coveragedir else None
257 |
258 | return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
259 |
260 | def p2p_port(n):
261 | assert n <= MAX_NODES
262 | return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
263 |
264 | def rpc_port(n):
265 | return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
266 |
267 | def rpc_url(datadir, i, chain, rpchost):
268 | rpc_u, rpc_p = get_auth_cookie(datadir, chain)
269 | host = '127.0.0.1'
270 | port = rpc_port(i)
271 | if rpchost:
272 | parts = rpchost.split(':')
273 | if len(parts) == 2:
274 | host, port = parts
275 | else:
276 | host = rpchost
277 | return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
278 |
279 | # Node functions
280 | ################
281 |
282 | def initialize_datadir(dirname, n, chain):
283 | datadir = get_datadir_path(dirname, n)
284 | if not os.path.isdir(datadir):
285 | os.makedirs(datadir)
286 | with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
287 | f.write("{}=1\n".format(chain))
288 | f.write("[{}]\n".format(chain))
289 | f.write("port=" + str(p2p_port(n)) + "\n")
290 | f.write("rpcport=" + str(rpc_port(n)) + "\n")
291 | f.write("server=1\n")
292 | f.write("keypool=1\n")
293 | f.write("discover=0\n")
294 | f.write("listenonion=0\n")
295 | f.write("printtoconsole=0\n")
296 | f.write("upnp=0\n")
297 | os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
298 | os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
299 | return datadir
300 |
301 | def get_datadir_path(dirname, n):
302 | return os.path.join(dirname, "node" + str(n))
303 |
304 | def append_config(datadir, options):
305 | with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
306 | for option in options:
307 | f.write(option + "\n")
308 |
309 | def get_auth_cookie(datadir, chain):
310 | user = None
311 | password = None
312 | if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
313 | with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
314 | for line in f:
315 | if line.startswith("rpcuser="):
316 | assert user is None # Ensure that there is only one rpcuser line
317 | user = line.split("=")[1].strip("\n")
318 | if line.startswith("rpcpassword="):
319 | assert password is None # Ensure that there is only one rpcpassword line
320 | password = line.split("=")[1].strip("\n")
321 | try:
322 | with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
323 | userpass = f.read()
324 | split_userpass = userpass.split(':')
325 | user = split_userpass[0]
326 | password = split_userpass[1]
327 | except OSError:
328 | pass
329 | if user is None or password is None:
330 | raise ValueError("No RPC credentials")
331 | return user, password
332 |
333 | # If a cookie file exists in the given datadir, delete it.
334 | def delete_cookie_file(datadir, chain):
335 | if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
336 | logger.debug("Deleting leftover cookie file")
337 | os.remove(os.path.join(datadir, chain, ".cookie"))
338 |
339 | def softfork_active(node, key):
340 | """Return whether a softfork is active."""
341 | return node.getblockchaininfo()['softforks'][key]['active']
342 |
343 | def set_node_times(nodes, t):
344 | for node in nodes:
345 | node.setmocktime(t)
346 |
347 | def disconnect_nodes(from_connection, node_num):
348 | for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
349 | try:
350 | from_connection.disconnectnode(nodeid=peer_id)
351 | except JSONRPCException as e:
352 | # If this node is disconnected between calculating the peer id
353 | # and issuing the disconnect, don't worry about it.
354 | # This avoids a race condition if we're mass-disconnecting peers.
355 | if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
356 | raise
357 |
358 | # wait to disconnect
359 | wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
360 |
361 | def connect_nodes(from_connection, node_num):
362 | ip_port = "127.0.0.1:" + str(p2p_port(node_num))
363 | from_connection.addnode(ip_port, "onetry")
364 | # poll until version handshake complete to avoid race conditions
365 | # with transaction relaying
366 | wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
367 |
368 | def connect_nodes_bi(nodes, a, b):
369 | connect_nodes(nodes[a], b)
370 | connect_nodes(nodes[b], a)
371 |
372 | def sync_blocks(rpc_connections, *, wait=1, timeout=60):
373 | """
374 | Wait until everybody has the same tip.
375 |
376 | sync_blocks needs to be called with an rpc_connections set that has least
377 | one node already synced to the latest, stable tip, otherwise there's a
378 | chance it might return before all nodes are stably synced.
379 | """
380 | stop_time = time.time() + timeout
381 | while time.time() <= stop_time:
382 | best_hash = [x.getbestblockhash() for x in rpc_connections]
383 | if best_hash.count(best_hash[0]) == len(rpc_connections):
384 | return
385 | time.sleep(wait)
386 | raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
387 |
388 | def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
389 | """
390 | Wait until everybody has the same transactions in their memory
391 | pools
392 | """
393 | stop_time = time.time() + timeout
394 | while time.time() <= stop_time:
395 | pool = [set(r.getrawmempool()) for r in rpc_connections]
396 | if pool.count(pool[0]) == len(rpc_connections):
397 | if flush_scheduler:
398 | for r in rpc_connections:
399 | r.syncwithvalidationinterfacequeue()
400 | return
401 | time.sleep(wait)
402 | raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
403 |
404 | # Transaction/Block functions
405 | #############################
406 |
407 | def find_output(node, txid, amount, *, blockhash=None):
408 | """
409 | Return index to output of txid with value amount
410 | Raises exception if there is none.
411 | """
412 | txdata = node.getrawtransaction(txid, 1, blockhash)
413 | for i in range(len(txdata["vout"])):
414 | if txdata["vout"][i]["value"] == amount:
415 | return i
416 | raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
417 |
418 | def gather_inputs(from_node, amount_needed, confirmations_required=1):
419 | """
420 | Return a random set of unspent txouts that are enough to pay amount_needed
421 | """
422 | assert confirmations_required >= 0
423 | utxo = from_node.listunspent(confirmations_required)
424 | random.shuffle(utxo)
425 | inputs = []
426 | total_in = Decimal("0.00000000")
427 | while total_in < amount_needed and len(utxo) > 0:
428 | t = utxo.pop()
429 | total_in += t["amount"]
430 | inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
431 | if total_in < amount_needed:
432 | raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
433 | return (total_in, inputs)
434 |
435 | def make_change(from_node, amount_in, amount_out, fee):
436 | """
437 | Create change output(s), return them
438 | """
439 | outputs = {}
440 | amount = amount_out + fee
441 | change = amount_in - amount
442 | if change > amount * 2:
443 | # Create an extra change output to break up big inputs
444 | change_address = from_node.getnewaddress()
445 | # Split change in two, being careful of rounding:
446 | outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
447 | change = amount_in - amount - outputs[change_address]
448 | if change > 0:
449 | outputs[from_node.getnewaddress()] = change
450 | return outputs
451 |
452 | def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
453 | """
454 | Create a random transaction.
455 | Returns (txid, hex-encoded-transaction-data, fee)
456 | """
457 | from_node = random.choice(nodes)
458 | to_node = random.choice(nodes)
459 | fee = min_fee + fee_increment * random.randint(0, fee_variants)
460 |
461 | (total_in, inputs) = gather_inputs(from_node, amount + fee)
462 | outputs = make_change(from_node, total_in, amount, fee)
463 | outputs[to_node.getnewaddress()] = float(amount)
464 |
465 | rawtx = from_node.createrawtransaction(inputs, outputs)
466 | signresult = from_node.signrawtransactionwithwallet(rawtx)
467 | txid = from_node.sendrawtransaction(signresult["hex"], 0)
468 |
469 | return (txid, signresult["hex"], fee)
470 |
471 | # Helper to create at least "count" utxos
472 | # Pass in a fee that is sufficient for relay and mining new transactions.
473 | def create_confirmed_utxos(fee, node, count):
474 | to_generate = int(0.5 * count) + 101
475 | while to_generate > 0:
476 | node.generate(min(25, to_generate))
477 | to_generate -= 25
478 | utxos = node.listunspent()
479 | iterations = count - len(utxos)
480 | addr1 = node.getnewaddress()
481 | addr2 = node.getnewaddress()
482 | if iterations <= 0:
483 | return utxos
484 | for i in range(iterations):
485 | t = utxos.pop()
486 | inputs = []
487 | inputs.append({"txid": t["txid"], "vout": t["vout"]})
488 | outputs = {}
489 | send_value = t['amount'] - fee
490 | outputs[addr1] = satoshi_round(send_value / 2)
491 | outputs[addr2] = satoshi_round(send_value / 2)
492 | raw_tx = node.createrawtransaction(inputs, outputs)
493 | signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
494 | node.sendrawtransaction(signed_tx)
495 |
496 | while (node.getmempoolinfo()['size'] > 0):
497 | node.generate(1)
498 |
499 | utxos = node.listunspent()
500 | assert len(utxos) >= count
501 | return utxos
502 |
503 | def find_vout_for_address(node, txid, addr):
504 | """
505 | Locate the vout index of the given transaction sending to the
506 | given address. Raises runtime error exception if not found.
507 | """
508 | tx = node.getrawtransaction(txid, True)
509 | for i in range(len(tx["vout"])):
510 | if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
511 | return i
512 | raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
513 |
--------------------------------------------------------------------------------
/src/bitcoin/mininode.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) 2010 ArtForz -- public domain half-a-node
3 | # Copyright (c) 2012 Jeff Garzik
4 | # Copyright (c) 2010-2019 The Bitcoin Core developers
5 | # Distributed under the MIT software license, see the accompanying
6 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
7 | """Bitcoin P2P network half-a-node.
8 |
9 | This python code was modified from ArtForz' public domain half-a-node, as
10 | found in the mini-node branch of http://github.com/jgarzik/pynode.
11 |
12 | P2PConnection: A low-level connection object to a node's P2P interface
13 | P2PInterface: A high-level interface object for communicating to a node over P2P
14 | P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
15 | and can respond correctly to getdata and getheaders messages"""
16 | import asyncio
17 | from collections import defaultdict
18 | from io import BytesIO
19 | import logging
20 | import struct
21 | import sys
22 | import threading
23 |
24 | from src.bitcoin.messages import (
25 | CBlockHeader,
26 | MIN_VERSION_SUPPORTED,
27 | msg_addr,
28 | msg_block,
29 | MSG_BLOCK,
30 | msg_blocktxn,
31 | msg_cmpctblock,
32 | msg_feefilter,
33 | msg_getaddr,
34 | msg_getblocks,
35 | msg_getblocktxn,
36 | msg_getdata,
37 | msg_getheaders,
38 | msg_headers,
39 | msg_inv,
40 | msg_mempool,
41 | msg_notfound,
42 | msg_ping,
43 | msg_pong,
44 | msg_reject,
45 | msg_sendcmpct,
46 | msg_sendheaders,
47 | msg_tx,
48 | MSG_TX,
49 | MSG_TYPE_MASK,
50 | msg_verack,
51 | msg_version,
52 | NODE_NETWORK,
53 | NODE_WITNESS,
54 | sha256,
55 | )
56 | from src.bitcoin.util import wait_until
57 |
58 | logger = logging.getLogger("TestFramework.mininode")
59 |
60 | MESSAGEMAP = {
61 | b"addr": msg_addr,
62 | b"block": msg_block,
63 | b"blocktxn": msg_blocktxn,
64 | b"cmpctblock": msg_cmpctblock,
65 | b"feefilter": msg_feefilter,
66 | b"getaddr": msg_getaddr,
67 | b"getblocks": msg_getblocks,
68 | b"getblocktxn": msg_getblocktxn,
69 | b"getdata": msg_getdata,
70 | b"getheaders": msg_getheaders,
71 | b"headers": msg_headers,
72 | b"inv": msg_inv,
73 | b"mempool": msg_mempool,
74 | b"notfound": msg_notfound,
75 | b"ping": msg_ping,
76 | b"pong": msg_pong,
77 | b"reject": msg_reject,
78 | b"sendcmpct": msg_sendcmpct,
79 | b"sendheaders": msg_sendheaders,
80 | b"tx": msg_tx,
81 | b"verack": msg_verack,
82 | b"version": msg_version,
83 | }
84 |
85 | MAGIC_BYTES = {
86 | "mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
87 | "testnet3": b"\x0b\x11\x09\x07", # testnet3
88 | "regtest": b"\xfa\xbf\xb5\xda", # regtest
89 | }
90 |
91 |
92 | class P2PConnection(asyncio.Protocol):
93 | """A low-level connection object to a node's P2P interface.
94 |
95 | This class is responsible for:
96 |
97 | - opening and closing the TCP connection to the node
98 | - reading bytes from and writing bytes to the socket
99 | - deserializing and serializing the P2P message header
100 | - logging messages as they are sent and received
101 |
102 | This class contains no logic for handing the P2P message payloads. It must be
103 | sub-classed and the on_message() callback overridden."""
104 |
105 | def __init__(self):
106 | # The underlying transport of the connection.
107 | # Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
108 | self._transport = None
109 |
110 | @property
111 | def is_connected(self):
112 | return self._transport is not None
113 |
114 | def peer_connect(self, dstaddr, dstport, net="regtest"):
115 | assert not self.is_connected
116 | self.dstaddr = dstaddr
117 | self.dstport = dstport
118 | # The initial message to send after the connection was made:
119 | self.on_connection_send_msg = None
120 | self.recvbuf = b""
121 | self.magic_bytes = MAGIC_BYTES[net]
122 | logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
123 |
124 | loop = NetworkThread.network_event_loop
125 | conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
126 | conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
127 | return conn_gen
128 |
129 | def peer_disconnect(self):
130 | # Connection could have already been closed by other end.
131 | NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
132 |
133 | # Connection and disconnection methods
134 |
135 | def connection_made(self, transport):
136 | """asyncio callback when a connection is opened."""
137 | assert not self._transport
138 | logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
139 | self._transport = transport
140 | if self.on_connection_send_msg:
141 | self.send_message(self.on_connection_send_msg)
142 | self.on_connection_send_msg = None # Never used again
143 | self.on_open()
144 |
145 | def connection_lost(self, exc):
146 | """asyncio callback when a connection is closed."""
147 | if exc:
148 | logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
149 | else:
150 | logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
151 | self._transport = None
152 | self.recvbuf = b""
153 | self.on_close()
154 |
155 | # Socket read methods
156 |
157 | def data_received(self, t):
158 | """asyncio callback when data is read from the socket."""
159 | if len(t) > 0:
160 | self.recvbuf += t
161 | self._on_data()
162 |
163 | def _on_data(self):
164 | """Try to read P2P messages from the recv buffer.
165 |
166 | This method reads data from the buffer in a loop. It deserializes,
167 | parses and verifies the P2P header, then passes the P2P payload to
168 | the on_message callback for processing."""
169 | try:
170 | while True:
171 | if len(self.recvbuf) < 4:
172 | return
173 | if self.recvbuf[:4] != self.magic_bytes:
174 | raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf)))
175 | if len(self.recvbuf) < 4 + 12 + 4 + 4:
176 | return
177 | command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
178 | msglen = struct.unpack(" 500:
250 | log_message += "... (msg truncated)"
251 | logger.debug(log_message)
252 |
253 |
254 | class P2PInterface(P2PConnection):
255 | """A high-level P2P interface class for communicating with a Bitcoin node.
256 |
257 | This class provides high-level callbacks for processing P2P message
258 | payloads, as well as convenience methods for interacting with the
259 | node over P2P.
260 |
261 | Individual testcases should subclass this and override the on_* methods
262 | if they want to alter message handling behaviour."""
263 | def __init__(self):
264 | super().__init__()
265 |
266 | # Track number of messages of each type received and the most recent
267 | # message of each type
268 | self.message_count = defaultdict(int)
269 | self.last_message = {}
270 |
271 | # A count of the number of ping messages we've sent to the node
272 | self.ping_counter = 1
273 |
274 | # The network services received from the peer
275 | self.nServices = 0
276 |
277 | def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
278 | create_conn = super().peer_connect(*args, **kwargs)
279 |
280 | if send_version:
281 | # Send a version msg
282 | vt = msg_version()
283 | vt.nServices = services
284 | vt.addrTo.ip = self.dstaddr
285 | vt.addrTo.port = self.dstport
286 | vt.addrFrom.ip = "0.0.0.0"
287 | vt.addrFrom.port = 0
288 | self.on_connection_send_msg = vt # Will be sent soon after connection_made
289 |
290 | return create_conn
291 |
292 | # Message receiving methods
293 |
294 | def on_message(self, message):
295 | """Receive message and dispatch message to appropriate callback.
296 |
297 | We keep a count of how many of each message type has been received
298 | and the most recent message of each type."""
299 | with mininode_lock:
300 | try:
301 | command = message.command.decode('ascii')
302 | self.message_count[command] += 1
303 | self.last_message[command] = message
304 | getattr(self, 'on_' + command)(message)
305 | except:
306 | print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
307 | raise
308 |
309 | # Callback methods. Can be overridden by subclasses in individual test
310 | # cases to provide custom message handling behaviour.
311 |
312 | def on_open(self):
313 | pass
314 |
315 | def on_close(self):
316 | pass
317 |
318 | def on_addr(self, message): pass
319 | def on_block(self, message): pass
320 | def on_blocktxn(self, message): pass
321 | def on_cmpctblock(self, message): pass
322 | def on_feefilter(self, message): pass
323 | def on_getaddr(self, message): pass
324 | def on_getblocks(self, message): pass
325 | def on_getblocktxn(self, message): pass
326 | def on_getdata(self, message): pass
327 | def on_getheaders(self, message): pass
328 | def on_headers(self, message): pass
329 | def on_mempool(self, message): pass
330 | def on_notfound(self, message): pass
331 | def on_pong(self, message): pass
332 | def on_reject(self, message): pass
333 | def on_sendcmpct(self, message): pass
334 | def on_sendheaders(self, message): pass
335 | def on_tx(self, message): pass
336 |
337 | def on_inv(self, message):
338 | want = msg_getdata()
339 | for i in message.inv:
340 | if i.type != 0:
341 | want.inv.append(i)
342 | if len(want.inv):
343 | self.send_message(want)
344 |
345 | def on_ping(self, message):
346 | self.send_message(msg_pong(message.nonce))
347 |
348 | def on_verack(self, message):
349 | pass
350 |
351 | def on_version(self, message):
352 | assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
353 | self.send_message(msg_verack())
354 | self.nServices = message.nServices
355 |
356 | # Connection helper methods
357 |
358 | def wait_for_disconnect(self, timeout=60):
359 | test_function = lambda: not self.is_connected
360 | wait_until(test_function, timeout=timeout, lock=mininode_lock)
361 |
362 | # Message receiving helper methods
363 |
364 | def wait_for_tx(self, txid, timeout=60):
365 | def test_function():
366 | assert self.is_connected
367 | if not self.last_message.get('tx'):
368 | return False
369 | return self.last_message['tx'].tx.rehash() == txid
370 |
371 | wait_until(test_function, timeout=timeout, lock=mininode_lock)
372 |
373 | def wait_for_block(self, blockhash, timeout=60):
374 | def test_function():
375 | assert self.is_connected
376 | return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
377 |
378 | wait_until(test_function, timeout=timeout, lock=mininode_lock)
379 |
380 | def wait_for_header(self, blockhash, timeout=60):
381 | def test_function():
382 | assert self.is_connected
383 | last_headers = self.last_message.get('headers')
384 | if not last_headers:
385 | return False
386 | return last_headers.headers[0].rehash() == blockhash
387 |
388 | wait_until(test_function, timeout=timeout, lock=mininode_lock)
389 |
390 | def wait_for_getdata(self, timeout=60):
391 | """Waits for a getdata message.
392 |
393 | Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
394 | value must be explicitly cleared before calling this method, or this will return
395 | immediately with success. TODO: change this method to take a hash value and only
396 | return true if the correct block/tx has been requested."""
397 |
398 | def test_function():
399 | assert self.is_connected
400 | return self.last_message.get("getdata")
401 |
402 | wait_until(test_function, timeout=timeout, lock=mininode_lock)
403 |
404 | def wait_for_getheaders(self, timeout=60):
405 | """Waits for a getheaders message.
406 |
407 | Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
408 | value must be explicitly cleared before calling this method, or this will return
409 | immediately with success. TODO: change this method to take a hash value and only
410 | return true if the correct block header has been requested."""
411 |
412 | def test_function():
413 | assert self.is_connected
414 | return self.last_message.get("getheaders")
415 |
416 | wait_until(test_function, timeout=timeout, lock=mininode_lock)
417 |
418 | def wait_for_inv(self, expected_inv, timeout=60):
419 | """Waits for an INV message and checks that the first inv object in the message was as expected."""
420 | if len(expected_inv) > 1:
421 | raise NotImplementedError("wait_for_inv() will only verify the first inv object")
422 |
423 | def test_function():
424 | assert self.is_connected
425 | return self.last_message.get("inv") and \
426 | self.last_message["inv"].inv[0].type == expected_inv[0].type and \
427 | self.last_message["inv"].inv[0].hash == expected_inv[0].hash
428 |
429 | wait_until(test_function, timeout=timeout, lock=mininode_lock)
430 |
431 | def wait_for_verack(self, timeout=60):
432 | def test_function():
433 | return self.message_count["verack"]
434 |
435 | wait_until(test_function, timeout=timeout, lock=mininode_lock)
436 |
437 | # Message sending helper functions
438 |
439 | def send_and_ping(self, message, timeout=60):
440 | self.send_message(message)
441 | self.sync_with_ping(timeout=timeout)
442 |
443 | # Sync up with the node
444 | def sync_with_ping(self, timeout=60):
445 | self.send_message(msg_ping(nonce=self.ping_counter))
446 |
447 | def test_function():
448 | assert self.is_connected
449 | return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
450 |
451 | wait_until(test_function, timeout=timeout, lock=mininode_lock)
452 | self.ping_counter += 1
453 |
454 |
455 | # One lock for synchronizing all data access between the network event loop (see
456 | # NetworkThread below) and the thread running the test logic. For simplicity,
457 | # P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
458 | # This lock should be acquired in the thread running the test logic to synchronize
459 | # access to any data shared with the P2PInterface or P2PConnection.
460 | mininode_lock = threading.RLock()
461 |
462 |
463 | class NetworkThread(threading.Thread):
464 | network_event_loop = None
465 |
466 | def __init__(self):
467 | super().__init__(name="NetworkThread")
468 | # There is only one event loop and no more than one thread must be created
469 | assert not self.network_event_loop
470 |
471 | NetworkThread.network_event_loop = asyncio.new_event_loop()
472 |
473 | def run(self):
474 | """Start the network thread."""
475 | self.network_event_loop.run_forever()
476 |
477 | def close(self, timeout=10):
478 | """Close the connections and network event loop."""
479 | self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
480 | wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
481 | self.network_event_loop.close()
482 | self.join(timeout)
483 | NetworkThread.network_event_loop = None # Safe to remove event loop.
484 |
485 | class P2PDataStore(P2PInterface):
486 | """A P2P data store class.
487 |
488 | Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
489 |
490 | def __init__(self):
491 | super().__init__()
492 | # store of blocks. key is block hash, value is a CBlock object
493 | self.block_store = {}
494 | self.last_block_hash = ''
495 | # store of txs. key is txid, value is a CTransaction object
496 | self.tx_store = {}
497 | self.getdata_requests = []
498 |
499 | def on_getdata(self, message):
500 | """Check for the tx/block in our stores and if found, reply with an inv message."""
501 | for inv in message.inv:
502 | self.getdata_requests.append(inv.hash)
503 | if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
504 | self.send_message(msg_tx(self.tx_store[inv.hash]))
505 | elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
506 | self.send_message(msg_block(self.block_store[inv.hash]))
507 | else:
508 | logger.debug('getdata message type {} received.'.format(hex(inv.type)))
509 |
510 | def on_getheaders(self, message):
511 | """Search back through our block store for the locator, and reply with a headers message if found."""
512 |
513 | locator, hash_stop = message.locator, message.hashstop
514 |
515 | # Assume that the most recent block added is the tip
516 | if not self.block_store:
517 | return
518 |
519 | headers_list = [self.block_store[self.last_block_hash]]
520 | maxheaders = 2000
521 | while headers_list[-1].sha256 not in locator.vHave:
522 | # Walk back through the block store, adding headers to headers_list
523 | # as we go.
524 | prev_block_hash = headers_list[-1].hashPrevBlock
525 | if prev_block_hash in self.block_store:
526 | prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
527 | headers_list.append(prev_block_header)
528 | if prev_block_header.sha256 == hash_stop:
529 | # if this is the hashstop header, stop here
530 | break
531 | else:
532 | logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
533 | break
534 |
535 | # Truncate the list if there are too many headers
536 | headers_list = headers_list[:-maxheaders - 1:-1]
537 | response = msg_headers(headers_list)
538 |
539 | if response is not None:
540 | self.send_message(response)
541 |
542 | def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
543 | """Send blocks to test node and test whether the tip advances.
544 |
545 | - add all blocks to our block_store
546 | - send a headers message for the final block
547 | - the on_getheaders handler will ensure that any getheaders are responded to
548 | - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
549 | ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
550 | - if success is True: assert that the node's tip advances to the most recent block
551 | - if success is False: assert that the node's tip doesn't advance
552 | - if reject_reason is set: assert that the correct reject message is logged"""
553 |
554 | with mininode_lock:
555 | for block in blocks:
556 | self.block_store[block.sha256] = block
557 | self.last_block_hash = block.sha256
558 |
559 | reject_reason = [reject_reason] if reject_reason else []
560 | with node.assert_debug_log(expected_msgs=reject_reason):
561 | if force_send:
562 | for b in blocks:
563 | self.send_message(msg_block(block=b))
564 | else:
565 | self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))
566 | wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
567 |
568 | if expect_disconnect:
569 | self.wait_for_disconnect(timeout=timeout)
570 | else:
571 | self.sync_with_ping(timeout=timeout)
572 |
573 | if success:
574 | wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
575 | else:
576 | assert node.getbestblockhash() != blocks[-1].hash
577 |
578 | def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
579 | """Send txs to test node and test whether they're accepted to the mempool.
580 |
581 | - add all txs to our tx_store
582 | - send tx messages for all txs
583 | - if success is True/False: assert that the txs are/are not accepted to the mempool
584 | - if expect_disconnect is True: Skip the sync with ping
585 | - if reject_reason is set: assert that the correct reject message is logged."""
586 |
587 | with mininode_lock:
588 | for tx in txs:
589 | self.tx_store[tx.sha256] = tx
590 |
591 | reject_reason = [reject_reason] if reject_reason else []
592 | with node.assert_debug_log(expected_msgs=reject_reason):
593 | for tx in txs:
594 | self.send_message(msg_tx(tx))
595 |
596 | if expect_disconnect:
597 | self.wait_for_disconnect()
598 | else:
599 | self.sync_with_ping()
600 |
601 | raw_mempool = node.getrawmempool()
602 | if success:
603 | # Check that all txs are now in the mempool
604 | for tx in txs:
605 | assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
606 | else:
607 | # Check that none of the txs are now in the mempool
608 | for tx in txs:
609 | assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
610 |
--------------------------------------------------------------------------------
/src/bitcoin/key.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Pieter Wuille
2 | # Distributed under the MIT software license, see the accompanying
3 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 | """Test-only secp256k1 elliptic curve implementation
5 |
6 | WARNING: This code is slow, uses bad randomness, does not properly protect
7 | keys, and is trivially vulnerable to side channel attacks. Do not use for
8 | anything but tests."""
9 | import random
10 | import hashlib
11 |
12 | def TaggedHash(tag, data):
13 | ss = hashlib.sha256(tag.encode('utf-8')).digest()
14 | ss += ss
15 | ss += data
16 | return hashlib.sha256(ss).digest()
17 |
18 | def modinv(a, n):
19 | """Compute the modular inverse of a modulo n
20 |
21 | See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
22 | """
23 | t1, t2 = 0, 1
24 | r1, r2 = n, a
25 | while r2 != 0:
26 | q = r1 // r2
27 | t1, t2 = t2, t1 - q * t2
28 | r1, r2 = r2, r1 - q * r2
29 | if r1 > 1:
30 | return None
31 | if t1 < 0:
32 | t1 += n
33 | return t1
34 |
35 | def jacobi_symbol(n, k):
36 | """Compute the Jacobi symbol of n modulo k
37 |
38 | See http://en.wikipedia.org/wiki/Jacobi_symbol
39 |
40 | For our application k is always prime, so this is the same as the Legendre symbol."""
41 | assert k > 0 and k & 1, "jacobi symbol is only defined for positive odd k"
42 | n %= k
43 | t = 0
44 | while n != 0:
45 | while n & 1 == 0:
46 | n >>= 1
47 | r = k & 7
48 | t ^= (r == 3 or r == 5)
49 | n, k = k, n
50 | t ^= (n & k & 3 == 3)
51 | n = n % k
52 | if k == 1:
53 | return -1 if t else 1
54 | return 0
55 |
56 | def modsqrt(a, p):
57 | """Compute the square root of a modulo p when p % 4 = 3.
58 |
59 | The Tonelli-Shanks algorithm can be used. See https://en.wikipedia.org/wiki/Tonelli-Shanks_algorithm
60 |
61 | Limiting this function to only work for p % 4 = 3 means we don't need to
62 | iterate through the loop. The highest n such that p - 1 = 2^n Q with Q odd
63 | is n = 1. Therefore Q = (p-1)/2 and sqrt = a^((Q+1)/2) = a^((p+1)/4)
64 |
65 | secp256k1's is defined over field of size 2**256 - 2**32 - 977, which is 3 mod 4.
66 | """
67 | if p % 4 != 3:
68 | raise NotImplementedError("modsqrt only implemented for p % 4 = 3")
69 | sqrt = pow(a, (p + 1)//4, p)
70 | if pow(sqrt, 2, p) == a % p:
71 | return sqrt
72 | return None
73 |
74 | def int_or_bytes(s):
75 | "Convert 32-bytes to int while accepting also int and returning it as is."
76 | if isinstance(s, bytes):
77 | assert(len(s) == 32)
78 | s = int.from_bytes(s, 'big')
79 | elif not isinstance(s, int):
80 | raise TypeError
81 | return s
82 |
83 | class EllipticCurve:
84 | def __init__(self, p, a, b):
85 | """Initialize elliptic curve y^2 = x^3 + a*x + b over GF(p)."""
86 | self.p = p
87 | self.a = a % p
88 | self.b = b % p
89 |
90 | def affine(self, p1):
91 | """Convert a Jacobian point tuple p1 to affine form, or None if at infinity.
92 |
93 | An affine point is represented as the Jacobian (x, y, 1)"""
94 | x1, y1, z1 = p1
95 | if z1 == 0:
96 | return None
97 | inv = modinv(z1, self.p)
98 | inv_2 = (inv**2) % self.p
99 | inv_3 = (inv_2 * inv) % self.p
100 | return ((inv_2 * x1) % self.p, (inv_3 * y1) % self.p, 1)
101 |
102 | def has_even_y(self, p1):
103 | """Whether the point p1 has an even Y coordinate when expressed in affine coordinates."""
104 | return not (p1[2] == 0 or self.affine(p1)[1] & 1)
105 |
106 | def negate(self, p1):
107 | """Negate a Jacobian point tuple p1."""
108 | x1, y1, z1 = p1
109 | return (x1, (self.p - y1) % self.p, z1)
110 |
111 | def on_curve(self, p1):
112 | """Determine whether a Jacobian tuple p is on the curve (and not infinity)"""
113 | x1, y1, z1 = p1
114 | z2 = pow(z1, 2, self.p)
115 | z4 = pow(z2, 2, self.p)
116 | return z1 != 0 and (pow(x1, 3, self.p) + self.a * x1 * z4 + self.b * z2 * z4 - pow(y1, 2, self.p)) % self.p == 0
117 |
118 | def is_x_coord(self, x):
119 | """Test whether x is a valid X coordinate on the curve."""
120 | x_3 = pow(x, 3, self.p)
121 | return jacobi_symbol(x_3 + self.a * x + self.b, self.p) != -1
122 |
123 | def lift_x(self, x):
124 | """Given an X coordinate on the curve, return a corresponding affine point."""
125 | x_3 = pow(x, 3, self.p)
126 | v = x_3 + self.a * x + self.b
127 | y = modsqrt(v, self.p)
128 | if y is None:
129 | return None
130 | return (x, y, 1)
131 |
132 | def double(self, p1):
133 | """Double a Jacobian tuple p1
134 |
135 | See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Doubling"""
136 | x1, y1, z1 = p1
137 | if z1 == 0:
138 | return (0, 1, 0)
139 | y1_2 = (y1**2) % self.p
140 | y1_4 = (y1_2**2) % self.p
141 | x1_2 = (x1**2) % self.p
142 | s = (4*x1*y1_2) % self.p
143 | m = 3*x1_2
144 | if self.a:
145 | m += self.a * pow(z1, 4, self.p)
146 | m = m % self.p
147 | x2 = (m**2 - 2*s) % self.p
148 | y2 = (m*(s - x2) - 8*y1_4) % self.p
149 | z2 = (2*y1*z1) % self.p
150 | return (x2, y2, z2)
151 |
152 | def add_mixed(self, p1, p2):
153 | """Add a Jacobian tuple p1 and an affine tuple p2
154 |
155 | See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Addition (with affine point)"""
156 | x1, y1, z1 = p1
157 | x2, y2, z2 = p2
158 | assert(z2 == 1)
159 | # Adding to the point at infinity is a no-op
160 | if z1 == 0:
161 | return p2
162 | z1_2 = (z1**2) % self.p
163 | z1_3 = (z1_2 * z1) % self.p
164 | u2 = (x2 * z1_2) % self.p
165 | s2 = (y2 * z1_3) % self.p
166 | if x1 == u2:
167 | if (y1 != s2):
168 | # p1 and p2 are inverses. Return the point at infinity.
169 | return (0, 1, 0)
170 | # p1 == p2. The formulas below fail when the two points are equal.
171 | return self.double(p1)
172 | h = u2 - x1
173 | r = s2 - y1
174 | h_2 = (h**2) % self.p
175 | h_3 = (h_2 * h) % self.p
176 | u1_h_2 = (x1 * h_2) % self.p
177 | x3 = (r**2 - h_3 - 2*u1_h_2) % self.p
178 | y3 = (r*(u1_h_2 - x3) - y1*h_3) % self.p
179 | z3 = (h*z1) % self.p
180 | return (x3, y3, z3)
181 |
182 | def add(self, p1, p2):
183 | """Add two Jacobian tuples p1 and p2
184 |
185 | See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Addition"""
186 | x1, y1, z1 = p1
187 | x2, y2, z2 = p2
188 | # Adding the point at infinity is a no-op
189 | if z1 == 0:
190 | return p2
191 | if z2 == 0:
192 | return p1
193 | # Adding an Affine to a Jacobian is more efficient since we save field multiplications and squarings when z = 1
194 | if z1 == 1:
195 | return self.add_mixed(p2, p1)
196 | if z2 == 1:
197 | return self.add_mixed(p1, p2)
198 | z1_2 = (z1**2) % self.p
199 | z1_3 = (z1_2 * z1) % self.p
200 | z2_2 = (z2**2) % self.p
201 | z2_3 = (z2_2 * z2) % self.p
202 | u1 = (x1 * z2_2) % self.p
203 | u2 = (x2 * z1_2) % self.p
204 | s1 = (y1 * z2_3) % self.p
205 | s2 = (y2 * z1_3) % self.p
206 | if u1 == u2:
207 | if (s1 != s2):
208 | # p1 and p2 are inverses. Return the point at infinity.
209 | return (0, 1, 0)
210 | # p1 == p2. The formulas below fail when the two points are equal.
211 | return self.double(p1)
212 | h = u2 - u1
213 | r = s2 - s1
214 | h_2 = (h**2) % self.p
215 | h_3 = (h_2 * h) % self.p
216 | u1_h_2 = (u1 * h_2) % self.p
217 | x3 = (r**2 - h_3 - 2*u1_h_2) % self.p
218 | y3 = (r*(u1_h_2 - x3) - s1*h_3) % self.p
219 | z3 = (h*z1*z2) % self.p
220 | return (x3, y3, z3)
221 |
222 | def mul(self, ps):
223 | """Compute a (multi) point multiplication
224 |
225 | ps is a list of (Jacobian tuple, scalar) pairs.
226 | """
227 | r = (0, 1, 0)
228 | for i in range(255, -1, -1):
229 | r = self.double(r)
230 | for (p, n) in ps:
231 | if ((n >> i) & 1):
232 | r = self.add(r, p)
233 | return r
234 |
235 | SECP256K1_FIELD_SIZE = 2**256 - 2**32 - 977
236 | SECP256K1 = EllipticCurve(SECP256K1_FIELD_SIZE, 0, 7)
237 | SECP256K1_G = (0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8, 1)
238 | SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
239 | SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
240 |
241 | class ECPubKey():
242 | """A secp256k1 public key"""
243 |
244 | def __init__(self):
245 | """Construct an uninitialized public key"""
246 | self.valid = False
247 |
248 | def __repr__(self):
249 | return self.get_bytes().hex()
250 |
251 | def __eq__(self, other):
252 | if other == None:
253 | return False
254 | assert isinstance(other, ECPubKey)
255 | return self.get_bytes() == other.get_bytes()
256 |
257 | def __hash__(self):
258 | return hash(self.get_bytes())
259 |
260 | def set(self, data):
261 | """Construct a public key from a serialization in compressed or uncompressed DER format or BIP340 format"""
262 | if (len(data) == 65 and data[0] == 0x04):
263 | p = (int.from_bytes(data[1:33], 'big'), int.from_bytes(data[33:65], 'big'), 1)
264 | self.valid = SECP256K1.on_curve(p)
265 | if self.valid:
266 | self.p = p
267 | self.compressed = False
268 | elif (len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03)):
269 | x = int.from_bytes(data[1:33], 'big')
270 | if SECP256K1.is_x_coord(x):
271 | p = SECP256K1.lift_x(x)
272 | # if the oddness of the y co-ord isn't correct, find the other
273 | # valid y
274 | if (p[1] & 1) != (data[0] & 1):
275 | p = SECP256K1.negate(p)
276 | self.p = p
277 | self.valid = True
278 | self.compressed = True
279 | else:
280 | self.valid = False
281 | elif (len(data) == 32):
282 | x = int.from_bytes(data[0:32], 'big')
283 | if SECP256K1.is_x_coord(x):
284 | p = SECP256K1.lift_x(x)
285 | # if the oddness of the y co-ord isn't correct, find the other
286 | # valid y
287 | if p[1]%2 != 0:
288 | p = SECP256K1.negate(p)
289 | self.p = p
290 | self.valid = True
291 | self.compressed = True
292 | else:
293 | self.valid = False
294 | else:
295 | self.valid = False
296 | return self
297 |
298 | @property
299 | def is_compressed(self):
300 | return self.compressed
301 |
302 | @property
303 | def is_valid(self):
304 | return self.valid
305 |
306 | def get_y(self):
307 | return SECP256K1.affine(self.p)[1]
308 |
309 | def get_x(self):
310 | return SECP256K1.affine(self.p)[0]
311 |
312 | def get_bytes(self, bip340=True):
313 | assert(self.valid)
314 | p = SECP256K1.affine(self.p)
315 | if p is None:
316 | return None
317 | if bip340:
318 | return bytes(p[0].to_bytes(32, 'big'))
319 | elif self.compressed:
320 | return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, 'big')
321 | else:
322 | return bytes([0x04]) + p[0].to_bytes(32, 'big') + p[1].to_bytes(32, 'big')
323 |
324 | def verify_ecdsa(self, sig, msg, low_s=True):
325 | """Verify a strictly DER-encoded ECDSA signature against this pubkey.
326 |
327 | See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the
328 | ECDSA verifier algorithm"""
329 | assert(self.valid)
330 |
331 | # Extract r and s from the DER formatted signature. Return false for
332 | # any DER encoding errors.
333 | if (sig[1] + 2 != len(sig)):
334 | return False
335 | if (len(sig) < 4):
336 | return False
337 | if (sig[0] != 0x30):
338 | return False
339 | if (sig[2] != 0x02):
340 | return False
341 | rlen = sig[3]
342 | if (len(sig) < 6 + rlen):
343 | return False
344 | if rlen < 1 or rlen > 33:
345 | return False
346 | if sig[4] >= 0x80:
347 | return False
348 | if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)):
349 | return False
350 | r = int.from_bytes(sig[4:4+rlen], 'big')
351 | if (sig[4+rlen] != 0x02):
352 | return False
353 | slen = sig[5+rlen]
354 | if slen < 1 or slen > 33:
355 | return False
356 | if (len(sig) != 6 + rlen + slen):
357 | return False
358 | if sig[6+rlen] >= 0x80:
359 | return False
360 | if (slen > 1 and (sig[6+rlen] == 0) and not (sig[7+rlen] & 0x80)):
361 | return False
362 | s = int.from_bytes(sig[6+rlen:6+rlen+slen], 'big')
363 |
364 | # Verify that r and s are within the group order
365 | if r < 1 or s < 1 or r >= SECP256K1_ORDER or s >= SECP256K1_ORDER:
366 | return False
367 | if low_s and s >= SECP256K1_ORDER_HALF:
368 | return False
369 | z = int.from_bytes(msg, 'big')
370 |
371 | # Run verifier algorithm on r, s
372 | w = modinv(s, SECP256K1_ORDER)
373 | u1 = z*w % SECP256K1_ORDER
374 | u2 = r*w % SECP256K1_ORDER
375 | R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, u1), (self.p, u2)]))
376 | if R is None or R[0] != r:
377 | return False
378 | return True
379 |
380 | def verify_schnorr(self, sig, msg):
381 | assert(len(msg) == 32)
382 | assert(len(sig) == 64)
383 | assert(self.valid)
384 | r = int.from_bytes(sig[0:32], 'big')
385 | if r >= SECP256K1_FIELD_SIZE:
386 | return False
387 | s = int.from_bytes(sig[32:64], 'big')
388 | if s >= SECP256K1_ORDER:
389 | return False
390 | e = int.from_bytes(TaggedHash("BIP0340/challenge", sig[0:32] + self.get_bytes() + msg), 'big') % SECP256K1_ORDER
391 | R = SECP256K1.mul([(SECP256K1_G, s), (self.p, SECP256K1_ORDER - e)])
392 | if not SECP256K1.has_even_y(R):
393 | return False
394 | if ((r * R[2] * R[2]) % SECP256K1_FIELD_SIZE) != R[0]:
395 | return False
396 | return True
397 |
398 | def __add__(self, other):
399 | """Adds two ECPubKey points."""
400 | assert isinstance(other, ECPubKey)
401 | assert self.valid
402 | assert other.valid
403 | ret = ECPubKey()
404 | ret.p = SECP256K1.add(other.p, self.p)
405 | ret.valid = True
406 | ret.compressed = self.compressed
407 | return ret
408 |
409 | def __radd__(self, other):
410 | """Allows this ECPubKey to be added to 0 for sum()"""
411 | if other == 0:
412 | return self
413 | else:
414 | return self + other
415 |
416 | def __mul__(self, other):
417 | """Multiplies ECPubKey point with a scalar(int/32bytes/ECKey)."""
418 | if isinstance(other, ECKey):
419 | assert self.valid
420 | assert other.secret is not None
421 | multiplier = other.secret
422 | else:
423 | # int_or_bytes checks that other is `int` or `bytes`
424 | multiplier = int_or_bytes(other)
425 |
426 | assert multiplier < SECP256K1_ORDER
427 | multiplier = multiplier % SECP256K1_ORDER
428 | ret = ECPubKey()
429 | ret.p = SECP256K1.mul([(self.p, multiplier)])
430 | ret.valid = True
431 | ret.compressed = self.compressed
432 | return ret
433 |
434 | def __rmul__(self, other):
435 | """Multiplies a scalar(int/32bytes/ECKey) with an ECPubKey point"""
436 | return self * other
437 |
438 | def __sub__(self, other):
439 | """Subtract one point from another"""
440 | assert isinstance(other, ECPubKey)
441 | assert self.valid
442 | assert other.valid
443 | ret = ECPubKey()
444 | ret.p = SECP256K1.add(self.p, SECP256K1.negate(other.p))
445 | ret.valid = True
446 | ret.compressed = self.compressed
447 | return ret
448 |
449 | def tweak_add(self, tweak):
450 | assert(self.valid)
451 | t = int_or_bytes(tweak)
452 | if t >= SECP256K1_ORDER:
453 | return None
454 | tweaked = SECP256K1.affine(SECP256K1.mul([(self.p, 1), (SECP256K1_G, t)]))
455 | if tweaked is None:
456 | return None
457 | ret = ECPubKey()
458 | ret.p = tweaked
459 | ret.valid = True
460 | ret.compressed = self.compressed
461 | return ret
462 |
463 | def mul(self, data):
464 | """Multiplies ECPubKey point with scalar data."""
465 | assert self.valid
466 | other = ECKey()
467 | other.set(data, True)
468 | return self * other
469 |
470 | def negate(self):
471 | self.p = SECP256K1.affine(SECP256K1.negate(self.p))
472 |
473 | class ECKey():
474 | """A secp256k1 private key"""
475 |
476 | def __init__(self):
477 | self.valid = False
478 |
479 | def __repr__(self):
480 | return str(self.secret)
481 |
482 | def __eq__(self, other):
483 | assert isinstance(other, ECKey)
484 | return self.secret == other.secret
485 |
486 | def __hash__(self):
487 | return hash(self.secret)
488 |
489 | def set(self, secret, compressed=True):
490 | """Construct a private key object from either 32-bytes or an int secret and a compressed flag."""
491 | secret = int_or_bytes(secret)
492 |
493 | self.valid = (secret > 0 and secret < SECP256K1_ORDER)
494 | if self.valid:
495 | self.secret = secret
496 | self.compressed = compressed
497 | return self
498 |
499 | def generate(self, compressed=True):
500 | """Generate a random private key (compressed or uncompressed)."""
501 | self.set(random.randrange(1, SECP256K1_ORDER).to_bytes(32, 'big'), compressed)
502 | return self
503 |
504 | def get_bytes(self):
505 | """Retrieve the 32-byte representation of this key."""
506 | assert(self.valid)
507 | return self.secret.to_bytes(32, 'big')
508 |
509 | def as_int(self):
510 | return self.secret
511 |
512 | def from_int(self, secret, compressed=True):
513 | self.valid = (secret > 0 and secret < SECP256K1_ORDER)
514 | if self.valid:
515 | self.secret = secret
516 | self.compressed = compressed
517 |
518 | def __add__(self, other):
519 | """Add key secrets. Returns compressed key."""
520 | assert isinstance(other, ECKey)
521 | assert other.secret > 0 and other.secret < SECP256K1_ORDER
522 | assert self.valid is True
523 | ret_data = ((self.secret + other.secret) % SECP256K1_ORDER).to_bytes(32, 'big')
524 | ret = ECKey()
525 | ret.set(ret_data, True)
526 | return ret
527 |
528 | def __radd__(self, other):
529 | """Allows this ECKey to be added to 0 for sum()"""
530 | if other == 0:
531 | return self
532 | else:
533 | return self + other
534 |
535 | def __sub__(self, other):
536 | """Subtract key secrets. Returns compressed key."""
537 | assert isinstance(other, ECKey)
538 | assert other.secret > 0 and other.secret < SECP256K1_ORDER
539 | assert self.valid is True
540 | ret_data = ((self.secret - other.secret) % SECP256K1_ORDER).to_bytes(32, 'big')
541 | ret = ECKey()
542 | ret.set(ret_data, True)
543 | return ret
544 |
545 | def __mul__(self, other):
546 | """Multiply a private key by another private key or multiply a public key by a private key. Returns compressed key."""
547 | if isinstance(other, ECKey):
548 | assert other.secret > 0 and other.secret < SECP256K1_ORDER
549 | assert self.valid is True
550 | ret_data = ((self.secret * other.secret) % SECP256K1_ORDER).to_bytes(32, 'big')
551 | ret = ECKey()
552 | ret.set(ret_data, True)
553 | return ret
554 | elif isinstance(other, ECPubKey):
555 | return other * self
556 | else:
557 | # ECKey().set() checks that other is an `int` or `bytes`
558 | assert self.valid
559 | second = ECKey().set(other, self.compressed)
560 | return self * second
561 |
562 | def __rmul__(self, other):
563 | return self * other
564 |
565 | def add(self, data):
566 | """Add key to scalar data. Returns compressed key."""
567 | other = ECKey()
568 | other.set(data, True)
569 | return self + other
570 |
571 | def mul(self, data):
572 | """Multiply key secret with scalar data. Returns compressed key."""
573 | other = ECKey()
574 | other.set(data, True)
575 | return self * other
576 |
577 | def negate(self):
578 | """Negate a private key."""
579 | assert self.valid
580 | self.secret = SECP256K1_ORDER - self.secret
581 |
582 | @property
583 | def is_valid(self):
584 | return self.valid
585 |
586 | @property
587 | def is_compressed(self):
588 | return self.compressed
589 |
590 | def get_pubkey(self):
591 | """Compute an ECPubKey object for this secret key."""
592 | assert(self.valid)
593 | ret = ECPubKey()
594 | p = SECP256K1.mul([(SECP256K1_G, self.secret)])
595 | ret.p = p
596 | ret.valid = True
597 | ret.compressed = self.compressed
598 | return ret
599 |
600 | def sign_ecdsa(self, msg, low_s=True):
601 | """Construct a DER-encoded ECDSA signature with this key.
602 |
603 | See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the
604 | ECDSA signer algorithm."""
605 | assert(self.valid)
606 | z = int.from_bytes(msg, 'big')
607 | # Note: no RFC6979, but a simple random nonce (some tests rely on distinct transactions for the same operation)
608 | k = random.randrange(1, SECP256K1_ORDER)
609 | R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)]))
610 | r = R[0] % SECP256K1_ORDER
611 | s = (modinv(k, SECP256K1_ORDER) * (z + self.secret * r)) % SECP256K1_ORDER
612 | if low_s and s > SECP256K1_ORDER_HALF:
613 | s = SECP256K1_ORDER - s
614 | # Represent in DER format. The byte representations of r and s have
615 | # length rounded up (255 bits becomes 32 bytes and 256 bits becomes 33
616 | # bytes).
617 | rb = r.to_bytes((r.bit_length() + 8) // 8, 'big')
618 | sb = s.to_bytes((s.bit_length() + 8) // 8, 'big')
619 | return b'\x30' + bytes([4 + len(rb) + len(sb), 2, len(rb)]) + rb + bytes([2, len(sb)]) + sb
620 |
621 | def sign_schnorr(self, msg, aux=None):
622 | """Create a Schnorr signature (see BIP340)."""
623 | if aux is None:
624 | aux = bytes(32)
625 |
626 | assert self.valid
627 | assert len(msg) == 32
628 | assert len(aux) == 32
629 |
630 | t = (self.secret ^ int.from_bytes(TaggedHash("BIP0340/aux", aux), 'big')).to_bytes(32, 'big')
631 | kp = int.from_bytes(TaggedHash("BIP0340/nonce", t + self.get_pubkey().get_bytes() + msg), 'big') % SECP256K1_ORDER
632 | assert kp != 0
633 | R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, kp)]))
634 | k = kp if SECP256K1.has_even_y(R) else SECP256K1_ORDER - kp
635 | e = int.from_bytes(TaggedHash("BIP0340/challenge", R[0].to_bytes(32, 'big') + self.get_pubkey().get_bytes() + msg), 'big') % SECP256K1_ORDER
636 | return R[0].to_bytes(32, 'big') + ((k + e * self.secret) % SECP256K1_ORDER).to_bytes(32, 'big')
637 |
638 | def tweak_add(self, tweak):
639 | """Return a tweaked version of this private key."""
640 | assert(self.valid)
641 | t = int_or_bytes(tweak)
642 | if t >= SECP256K1_ORDER:
643 | return None
644 | tweaked = (self.secret + t) % SECP256K1_ORDER
645 | if tweaked == 0:
646 | return None
647 | ret = ECKey()
648 | ret.set(tweaked.to_bytes(32, 'big'), self.compressed)
649 | return ret
650 |
651 | def generate_key_pair(secret=None, compressed=True):
652 | """Convenience function to generate a private-public key pair."""
653 | d = ECKey()
654 | if secret:
655 | d.set(secret, compressed)
656 | else:
657 | d.generate(compressed)
658 |
659 | P = d.get_pubkey()
660 | return d, P
661 |
662 | def generate_bip340_key_pair(secret=None):
663 | """Convenience function to generate a BIP0340 private-public key pair."""
664 | d = ECKey()
665 |
666 | if (secret):
667 | d.set(secret)
668 | else:
669 | d.generate()
670 |
671 | P = d.get_pubkey()
672 | if P.get_y()%2 != 0:
673 | d.negate()
674 | P.negate()
675 | return d, P
676 |
677 | def generate_schnorr_nonce():
678 | """Generate a random valid BIP340 nonce.
679 |
680 | See https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki.
681 | This implementation ensures the y-coordinate of the nonce point is even."""
682 | kp = random.randrange(1, SECP256K1_ORDER)
683 | assert kp != 0
684 | R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, kp)]))
685 | k = kp if R[1] % 2 == 0 else SECP256K1_ORDER - kp
686 | k_key = ECKey()
687 | k_key.set(k.to_bytes(32, 'big'), True)
688 | return k_key
689 |
--------------------------------------------------------------------------------
/src/bitcoin/test_node.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) 2017-2019 The Bitcoin Core developers
3 | # Distributed under the MIT software license, see the accompanying
4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 | """Class for bitcoind node under test"""
6 |
7 | import contextlib
8 | import decimal
9 | import errno
10 | from enum import Enum
11 | import http.client
12 | import json
13 | import logging
14 | import os
15 | import re
16 | import subprocess
17 | import tempfile
18 | import time
19 | import urllib.parse
20 | import collections
21 | import shlex
22 | import sys
23 |
24 | from .authproxy import JSONRPCException
25 | from .util import (
26 | MAX_NODES,
27 | append_config,
28 | delete_cookie_file,
29 | get_rpc_proxy,
30 | rpc_url,
31 | wait_until,
32 | p2p_port,
33 | )
34 |
35 | BITCOIND_PROC_WAIT_TIMEOUT = 60
36 |
37 |
38 | class FailedToStartError(Exception):
39 | """Raised when a node fails to start correctly."""
40 |
41 |
42 | class ErrorMatch(Enum):
43 | FULL_TEXT = 1
44 | FULL_REGEX = 2
45 | PARTIAL_REGEX = 3
46 |
47 |
48 | class TestNode():
49 | """A class for representing a bitcoind node under test.
50 |
51 | This class contains:
52 |
53 | - state about the node (whether it's running, etc)
54 | - a Python subprocess.Popen object representing the running process
55 | - an RPC connection to the node
56 | - one or more P2P connections to the node
57 |
58 |
59 | To make things easier for the test writer, any unrecognised messages will
60 | be dispatched to the RPC connection."""
61 |
62 | def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False):
63 | """
64 | Kwargs:
65 | start_perf (bool): If True, begin profiling the node with `perf` as soon as
66 | the node starts.
67 | """
68 |
69 | self.index = i
70 | self.datadir = datadir
71 | self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf")
72 | self.stdout_dir = os.path.join(self.datadir, "stdout")
73 | self.stderr_dir = os.path.join(self.datadir, "stderr")
74 | self.chain = chain
75 | self.rpchost = rpchost
76 | self.rpc_timeout = timewait
77 | self.binary = bitcoind
78 | self.coverage_dir = coverage_dir
79 | self.cwd = cwd
80 | if extra_conf is not None:
81 | append_config(datadir, extra_conf)
82 | # Most callers will just need to add extra args to the standard list below.
83 | # For those callers that need more flexibility, they can just set the args property directly.
84 | # Note that common args are set in the config file (see initialize_datadir)
85 | self.extra_args = extra_args
86 | # Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
87 | # This means that starting a bitcoind using the temp dir to debug a failed test won't
88 | # spam debug.log.
89 | self.args = [
90 | self.binary,
91 | "-datadir=" + self.datadir,
92 | "-logtimemicros",
93 | "-logthreadnames",
94 | "-debug",
95 | "-debugexclude=libevent",
96 | "-debugexclude=leveldb",
97 | "-uacomment=testnode%d" % i,
98 | ]
99 |
100 | self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
101 | self.use_cli = use_cli
102 | self.start_perf = start_perf
103 |
104 | self.running = False
105 | self.process = None
106 | self.rpc_connected = False
107 | self.rpc = None
108 | self.url = None
109 | self.log = logging.getLogger('TestFramework.node%d' % i)
110 | self.cleanup_on_exit = True # Whether to kill the node when this object goes away
111 | # Cache perf subprocesses here by their data output filename.
112 | self.perf_subprocesses = {}
113 |
114 | self.p2ps = []
115 |
116 | AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
117 | PRIV_KEYS = [
118 | # address , privkey
119 | AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
120 | AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
121 | AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
122 | AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
123 | AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
124 | AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
125 | AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
126 | AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
127 | AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
128 | AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
129 | AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
130 | AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
131 | ]
132 |
133 | def get_deterministic_priv_key(self):
134 | """Return a deterministic priv key in base58, that only depends on the node's index"""
135 | assert len(self.PRIV_KEYS) == MAX_NODES
136 | return self.PRIV_KEYS[self.index]
137 |
138 | def get_mem_rss_kilobytes(self):
139 | """Get the memory usage (RSS) per `ps`.
140 |
141 | Returns None if `ps` is unavailable.
142 | """
143 | assert self.running
144 |
145 | try:
146 | return int(subprocess.check_output(
147 | ["ps", "h", "-o", "rss", "{}".format(self.process.pid)],
148 | stderr=subprocess.DEVNULL).split()[-1])
149 |
150 | # Avoid failing on platforms where ps isn't installed.
151 | #
152 | # We could later use something like `psutils` to work across platforms.
153 | except (FileNotFoundError, subprocess.SubprocessError):
154 | self.log.exception("Unable to get memory usage")
155 | return None
156 |
157 | def _node_msg(self, msg: str) -> str:
158 | """Return a modified msg that identifies this node by its index as a debugging aid."""
159 | return "[node %d] %s" % (self.index, msg)
160 |
161 | def _raise_assertion_error(self, msg: str):
162 | """Raise an AssertionError with msg modified to identify this node."""
163 | raise AssertionError(self._node_msg(msg))
164 |
165 | def __del__(self):
166 | # Ensure that we don't leave any bitcoind processes lying around after
167 | # the test ends
168 | if self.process and self.cleanup_on_exit:
169 | # Should only happen on test failure
170 | # Avoid using logger, as that may have already been shutdown when
171 | # this destructor is called.
172 | print(self._node_msg("Cleaning up leftover process"))
173 | self.process.kill()
174 |
175 | def __getattr__(self, name):
176 | """Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
177 | if self.use_cli:
178 | return getattr(self.cli, name)
179 | else:
180 | assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
181 | return getattr(self.rpc, name)
182 |
183 | def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
184 | """Start the node."""
185 | if extra_args is None:
186 | extra_args = self.extra_args
187 |
188 | # Add a new stdout and stderr file each time bitcoind is started
189 | if stderr is None:
190 | stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
191 | if stdout is None:
192 | stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
193 | self.stderr = stderr
194 | self.stdout = stdout
195 |
196 | if cwd is None:
197 | cwd = self.cwd
198 |
199 | # Delete any existing cookie file -- if such a file exists (eg due to
200 | # unclean shutdown), it will get overwritten anyway by bitcoind, and
201 | # potentially interfere with our attempt to authenticate
202 | delete_cookie_file(self.datadir, self.chain)
203 |
204 | # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
205 | subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
206 |
207 | self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
208 |
209 | self.running = True
210 | self.log.debug("bitcoind started, waiting for RPC to come up")
211 |
212 | if self.start_perf:
213 | self._start_perf()
214 |
215 | def wait_for_rpc_connection(self):
216 | """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
217 | # Poll at a rate of four times per second
218 | poll_per_s = 4
219 | for _ in range(poll_per_s * self.rpc_timeout):
220 | if self.process.poll() is not None:
221 | raise FailedToStartError(self._node_msg(
222 | 'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
223 | try:
224 | rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.chain, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
225 | rpc.getblockcount()
226 | # If the call to getblockcount() succeeds then the RPC connection is up
227 | self.log.debug("RPC successfully started")
228 | if self.use_cli:
229 | return
230 | self.rpc = rpc
231 | self.rpc_connected = True
232 | self.url = self.rpc.url
233 | return
234 | except IOError as e:
235 | if e.errno != errno.ECONNREFUSED: # Port not yet open?
236 | raise # unknown IO error
237 | except JSONRPCException as e: # Initialization phase
238 | # -28 RPC in warmup
239 | # -342 Service unavailable, RPC server started but is shutting down due to error
240 | if e.error['code'] != -28 and e.error['code'] != -342:
241 | raise # unknown JSON RPC exception
242 | except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
243 | if "No RPC credentials" not in str(e):
244 | raise
245 | time.sleep(1.0 / poll_per_s)
246 | self._raise_assertion_error("Unable to connect to bitcoind")
247 |
248 | def generate(self, nblocks, maxtries=1000000):
249 | self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
250 | return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
251 |
252 | def get_wallet_rpc(self, wallet_name):
253 | if self.use_cli:
254 | return self.cli("-rpcwallet={}".format(wallet_name))
255 | else:
256 | assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
257 | wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
258 | return self.rpc / wallet_path
259 |
260 | def stop_node(self, expected_stderr='', wait=0):
261 | """Stop the node."""
262 | if not self.running:
263 | return
264 | self.log.debug("Stopping node")
265 | try:
266 | self.stop(wait=wait)
267 | except http.client.CannotSendRequest:
268 | self.log.exception("Unable to stop node.")
269 |
270 | # If there are any running perf processes, stop them.
271 | for profile_name in tuple(self.perf_subprocesses.keys()):
272 | self._stop_perf(profile_name)
273 |
274 | # Check that stderr is as expected
275 | self.stderr.seek(0)
276 | stderr = self.stderr.read().decode('utf-8').strip()
277 | if stderr != expected_stderr:
278 | raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
279 |
280 | self.stdout.close()
281 | self.stderr.close()
282 |
283 | del self.p2ps[:]
284 |
285 | def is_node_stopped(self):
286 | """Checks whether the node has stopped.
287 |
288 | Returns True if the node has stopped. False otherwise.
289 | This method is responsible for freeing resources (self.process)."""
290 | if not self.running:
291 | return True
292 | return_code = self.process.poll()
293 | if return_code is None:
294 | return False
295 |
296 | # process has stopped. Assert that it didn't return an error code.
297 | assert return_code == 0, self._node_msg(
298 | "Node returned non-zero exit code (%d) when stopping" % return_code)
299 | self.running = False
300 | self.process = None
301 | self.rpc_connected = False
302 | self.rpc = None
303 | self.log.debug("Node stopped")
304 | return True
305 |
306 | def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
307 | wait_until(self.is_node_stopped, timeout=timeout)
308 |
309 | @contextlib.contextmanager
310 | def assert_debug_log(self, expected_msgs, timeout=2):
311 | time_end = time.time() + timeout
312 | debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
313 | with open(debug_log, encoding='utf-8') as dl:
314 | dl.seek(0, 2)
315 | prev_size = dl.tell()
316 | try:
317 | yield
318 | finally:
319 | while True:
320 | found = True
321 | with open(debug_log, encoding='utf-8') as dl:
322 | dl.seek(prev_size)
323 | log = dl.read()
324 | print_log = " - " + "\n - ".join(log.splitlines())
325 | for expected_msg in expected_msgs:
326 | if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
327 | found = False
328 | if found:
329 | return
330 | if time.time() >= time_end:
331 | break
332 | time.sleep(0.05)
333 | self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
334 |
335 | @contextlib.contextmanager
336 | def assert_memory_usage_stable(self, *, increase_allowed=0.03):
337 | """Context manager that allows the user to assert that a node's memory usage (RSS)
338 | hasn't increased beyond some threshold percentage.
339 |
340 | Args:
341 | increase_allowed (float): the fractional increase in memory allowed until failure;
342 | e.g. `0.12` for up to 12% increase allowed.
343 | """
344 | before_memory_usage = self.get_mem_rss_kilobytes()
345 |
346 | yield
347 |
348 | after_memory_usage = self.get_mem_rss_kilobytes()
349 |
350 | if not (before_memory_usage and after_memory_usage):
351 | self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
352 | return
353 |
354 | perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1
355 |
356 | if perc_increase_memory_usage > increase_allowed:
357 | self._raise_assertion_error(
358 | "Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
359 | increase_allowed * 100, before_memory_usage, after_memory_usage,
360 | perc_increase_memory_usage * 100))
361 |
362 | @contextlib.contextmanager
363 | def profile_with_perf(self, profile_name):
364 | """
365 | Context manager that allows easy profiling of node activity using `perf`.
366 |
367 | See `test/functional/README.md` for details on perf usage.
368 |
369 | Args:
370 | profile_name (str): This string will be appended to the
371 | profile data filename generated by perf.
372 | """
373 | subp = self._start_perf(profile_name)
374 |
375 | yield
376 |
377 | if subp:
378 | self._stop_perf(profile_name)
379 |
380 | def _start_perf(self, profile_name=None):
381 | """Start a perf process to profile this node.
382 |
383 | Returns the subprocess running perf."""
384 | subp = None
385 |
386 | def test_success(cmd):
387 | return subprocess.call(
388 | # shell=True required for pipe use below
389 | cmd, shell=True,
390 | stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
391 |
392 | if not sys.platform.startswith('linux'):
393 | self.log.warning("Can't profile with perf; only available on Linux platforms")
394 | return None
395 |
396 | if not test_success('which perf'):
397 | self.log.warning("Can't profile with perf; must install perf-tools")
398 | return None
399 |
400 | if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
401 | self.log.warning(
402 | "perf output won't be very useful without debug symbols compiled into bitcoind")
403 |
404 | output_path = tempfile.NamedTemporaryFile(
405 | dir=self.datadir,
406 | prefix="{}.perf.data.".format(profile_name or 'test'),
407 | delete=False,
408 | ).name
409 |
410 | cmd = [
411 | 'perf', 'record',
412 | '-g', # Record the callgraph.
413 | '--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
414 | '-F', '101', # Sampling frequency in Hz.
415 | '-p', str(self.process.pid),
416 | '-o', output_path,
417 | ]
418 | subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
419 | self.perf_subprocesses[profile_name] = subp
420 |
421 | return subp
422 |
423 | def _stop_perf(self, profile_name):
424 | """Stop (and pop) a perf subprocess."""
425 | subp = self.perf_subprocesses.pop(profile_name)
426 | output_path = subp.args[subp.args.index('-o') + 1]
427 |
428 | subp.terminate()
429 | subp.wait(timeout=10)
430 |
431 | stderr = subp.stderr.read().decode()
432 | if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
433 | self.log.warning(
434 | "perf couldn't collect data! Try "
435 | "'sudo sysctl -w kernel.perf_event_paranoid=-1'")
436 | else:
437 | report_cmd = "perf report -i {}".format(output_path)
438 | self.log.info("See perf output by running '{}'".format(report_cmd))
439 |
440 | def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
441 | """Attempt to start the node and expect it to raise an error.
442 |
443 | extra_args: extra arguments to pass through to bitcoind
444 | expected_msg: regex that stderr should match when bitcoind fails
445 |
446 | Will throw if bitcoind starts without an error.
447 | Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
448 | with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
449 | tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
450 | try:
451 | self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
452 | self.wait_for_rpc_connection()
453 | self.stop_node()
454 | self.wait_until_stopped()
455 | except FailedToStartError as e:
456 | self.log.debug('bitcoind failed to start: %s', e)
457 | self.running = False
458 | self.process = None
459 | # Check stderr for expected message
460 | if expected_msg is not None:
461 | log_stderr.seek(0)
462 | stderr = log_stderr.read().decode('utf-8').strip()
463 | if match == ErrorMatch.PARTIAL_REGEX:
464 | if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
465 | self._raise_assertion_error(
466 | 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
467 | elif match == ErrorMatch.FULL_REGEX:
468 | if re.fullmatch(expected_msg, stderr) is None:
469 | self._raise_assertion_error(
470 | 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
471 | elif match == ErrorMatch.FULL_TEXT:
472 | if expected_msg != stderr:
473 | self._raise_assertion_error(
474 | 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
475 | else:
476 | if expected_msg is None:
477 | assert_msg = "bitcoind should have exited with an error"
478 | else:
479 | assert_msg = "bitcoind should have exited with expected error " + expected_msg
480 | self._raise_assertion_error(assert_msg)
481 |
482 | def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
483 | """Add a p2p connection to the node.
484 |
485 | This method adds the p2p connection to the self.p2ps list and also
486 | returns the connection to the caller."""
487 | if 'dstport' not in kwargs:
488 | kwargs['dstport'] = p2p_port(self.index)
489 | if 'dstaddr' not in kwargs:
490 | kwargs['dstaddr'] = '127.0.0.1'
491 |
492 | p2p_conn.peer_connect(**kwargs)()
493 | self.p2ps.append(p2p_conn)
494 | if wait_for_verack:
495 | p2p_conn.wait_for_verack()
496 |
497 | return p2p_conn
498 |
499 | @property
500 | def p2p(self):
501 | """Return the first p2p connection
502 |
503 | Convenience property - most tests only use a single p2p connection to each
504 | node, so this saves having to write node.p2ps[0] many times."""
505 | assert self.p2ps, self._node_msg("No p2p connection")
506 | return self.p2ps[0]
507 |
508 | def disconnect_p2ps(self):
509 | """Close all p2p connections to the node."""
510 | for p in self.p2ps:
511 | p.peer_disconnect()
512 | del self.p2ps[:]
513 |
514 | class TestNodeCLIAttr:
515 | def __init__(self, cli, command):
516 | self.cli = cli
517 | self.command = command
518 |
519 | def __call__(self, *args, **kwargs):
520 | return self.cli.send_cli(self.command, *args, **kwargs)
521 |
522 | def get_request(self, *args, **kwargs):
523 | return lambda: self(*args, **kwargs)
524 |
525 | def arg_to_cli(arg):
526 | if isinstance(arg, bool):
527 | return str(arg).lower()
528 | elif isinstance(arg, dict) or isinstance(arg, list):
529 | return json.dumps(arg)
530 | else:
531 | return str(arg)
532 |
533 | class TestNodeCLI():
534 | """Interface to bitcoin-cli for an individual node"""
535 |
536 | def __init__(self, binary, datadir):
537 | self.options = []
538 | self.binary = binary
539 | self.datadir = datadir
540 | self.input = None
541 | self.log = logging.getLogger('TestFramework.bitcoincli')
542 |
543 | def __call__(self, *options, input=None):
544 | # TestNodeCLI is callable with bitcoin-cli command-line options
545 | cli = TestNodeCLI(self.binary, self.datadir)
546 | cli.options = [str(o) for o in options]
547 | cli.input = input
548 | return cli
549 |
550 | def __getattr__(self, command):
551 | return TestNodeCLIAttr(self, command)
552 |
553 | def batch(self, requests):
554 | results = []
555 | for request in requests:
556 | try:
557 | results.append(dict(result=request()))
558 | except JSONRPCException as e:
559 | results.append(dict(error=e))
560 | return results
561 |
562 | def send_cli(self, command=None, *args, **kwargs):
563 | """Run bitcoin-cli command. Deserializes returned string as python object."""
564 | pos_args = [arg_to_cli(arg) for arg in args]
565 | named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
566 | assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
567 | p_args = [self.binary, "-datadir=" + self.datadir] + self.options
568 | if named_args:
569 | p_args += ["-named"]
570 | if command is not None:
571 | p_args += [command]
572 | p_args += pos_args + named_args
573 | self.log.debug("Running bitcoin-cli command: %s" % command)
574 | process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
575 | cli_stdout, cli_stderr = process.communicate(input=self.input)
576 | returncode = process.poll()
577 | if returncode:
578 | match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
579 | if match:
580 | code, message = match.groups()
581 | raise JSONRPCException(dict(code=int(code), message=message))
582 | # Ignore cli_stdout, raise with cli_stderr
583 | raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
584 | try:
585 | return json.loads(cli_stdout, parse_float=decimal.Decimal)
586 | except json.JSONDecodeError:
587 | return cli_stdout.rstrip("\n")
588 |
--------------------------------------------------------------------------------
/src/bitcoin/test_framework.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) 2014-2019 The Bitcoin Core developers
3 | # Distributed under the MIT software license, see the accompanying
4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 | """Base class for RPC testing."""
6 |
7 | import configparser
8 | from enum import Enum
9 | import logging
10 | import argparse
11 | import os
12 | import pdb
13 | import random
14 | import shutil
15 | import sys
16 | import tempfile
17 | import time
18 |
19 | from .authproxy import JSONRPCException
20 | from . import coverage
21 | from .test_node import TestNode
22 | from .mininode import NetworkThread
23 | from .util import (
24 | MAX_NODES,
25 | PortSeed,
26 | assert_equal,
27 | check_json_precision,
28 | connect_nodes_bi,
29 | disconnect_nodes,
30 | get_datadir_path,
31 | initialize_datadir,
32 | sync_blocks,
33 | sync_mempools,
34 | )
35 |
36 | class TestStatus(Enum):
37 | PASSED = 1
38 | FAILED = 2
39 | SKIPPED = 3
40 |
41 | TEST_EXIT_PASSED = 0
42 | TEST_EXIT_FAILED = 1
43 | TEST_EXIT_SKIPPED = 77
44 |
45 | TMPDIR_PREFIX = "bitcoin_func_test_"
46 |
47 |
48 | class SkipTest(Exception):
49 | """This exception is raised to skip a test"""
50 |
51 | def __init__(self, message):
52 | self.message = message
53 |
54 |
55 | class BitcoinTestMetaClass(type):
56 | """Metaclass for BitcoinTestFramework.
57 |
58 | Ensures that any attempt to register a subclass of `BitcoinTestFramework`
59 | adheres to a standard whereby the subclass overrides `set_test_params` and
60 | `run_test` but DOES NOT override either `__init__` or `main`. If any of
61 | those standards are violated, a ``TypeError`` is raised."""
62 |
63 | def __new__(cls, clsname, bases, dct):
64 | if not clsname == 'BitcoinTestFramework':
65 | if not ('run_test' in dct and 'set_test_params' in dct):
66 | raise TypeError("BitcoinTestFramework subclasses must override "
67 | "'run_test' and 'set_test_params'")
68 | if '__init__' in dct or 'main' in dct:
69 | raise TypeError("BitcoinTestFramework subclasses may not override "
70 | "'__init__' or 'main'")
71 |
72 | return super().__new__(cls, clsname, bases, dct)
73 |
74 |
75 | class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
76 | """Base class for a bitcoin test script.
77 |
78 | Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
79 |
80 | Individual tests can also override the following methods to customize the test setup:
81 |
82 | - add_options()
83 | - setup_chain()
84 | - setup_network()
85 | - setup_nodes()
86 |
87 | The __init__() and main() methods should not be overridden.
88 |
89 | This class also contains various public and private helper methods."""
90 |
91 | def __init__(self):
92 | """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
93 | self.chain = 'regtest'
94 | self.setup_clean_chain = False
95 | self.nodes = []
96 | self.network_thread = None
97 | self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
98 | self.supports_cli = False
99 | self.bind_to_localhost_only = True
100 | self.default_wallet_name = "default_wallet"
101 | self.wallet_data_filename = "wallet.dat"
102 | self.wallet_names = None
103 | self.set_test_params()
104 |
105 | assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
106 |
107 | def main(self):
108 | """Main function. This should not be overridden by the subclass test scripts."""
109 |
110 | self.parse_args()
111 |
112 | try:
113 | e = None
114 | self.setup()
115 | self.run_test()
116 | except BaseException as exception:
117 | e = exception
118 | self.shutdown(e = e, exit = True)
119 |
120 | def parse_args(self):
121 | parser = argparse.ArgumentParser(usage="%(prog)s [options]")
122 | parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
123 | help="Leave bitcoinds and test.* datadir on exit or error")
124 | parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
125 | help="Don't stop bitcoinds after the test execution")
126 | parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
127 | help="Directory for caching pregenerated datadirs (default: %(default)s)")
128 | parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
129 | parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
130 | help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the src.bitcoin.log file in the temporary test directory.")
131 | parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
132 | help="Print out all RPC calls as they are made")
133 | parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
134 | help="The seed to use for assigning port numbers (default: current process id)")
135 | parser.add_argument("--coveragedir", dest="coveragedir",
136 | help="Write tested RPC commands into this directory")
137 | parser.add_argument("--configfile", dest="configfile",
138 | default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
139 | help="Location of the test framework config file (default: %(default)s)")
140 | parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
141 | help="Attach a python debugger if test fails")
142 | parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
143 | help="use bitcoin-cli instead of RPC for all commands")
144 | parser.add_argument("--perf", dest="perf", default=False, action="store_true",
145 | help="profile running nodes with perf for the duration of the test")
146 | parser.add_argument("--randomseed", type=int,
147 | help="set a random seed for deterministically reproducing a previous test run")
148 | self.add_options(parser)
149 | self.options = parser.parse_args()
150 |
151 | # Methods to encapsulate setup and shutdown of test.
152 | def setup(self):
153 | """Call this method to startup the test object with options already set."""
154 |
155 | PortSeed.n = self.options.port_seed
156 |
157 | check_json_precision()
158 |
159 | self.options.cachedir = os.path.abspath(self.options.cachedir)
160 |
161 | config = configparser.ConfigParser()
162 | config.read_file(open(self.options.configfile))
163 | self.config = config
164 | self.options.bitcoind = os.getenv("BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/bitcoind' + config["environment"]["EXEEXT"])
165 | self.options.bitcoincli = os.getenv("BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/bitcoin-cli' + config["environment"]["EXEEXT"])
166 |
167 | os.environ['PATH'] = os.pathsep.join([
168 | os.path.join(config['environment']['BUILDDIR'], 'src'),
169 | os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
170 | os.environ['PATH']
171 | ])
172 |
173 | # Set up temp directory and start logging
174 | if self.options.tmpdir:
175 | self.options.tmpdir = os.path.abspath(self.options.tmpdir)
176 | os.makedirs(self.options.tmpdir, exist_ok=False)
177 | else:
178 | self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
179 | self._start_logging()
180 |
181 | # Seed the PRNG. Note that test runs are reproducible if and only if
182 | # a single thread accesses the PRNG. For more information, see
183 | # https://docs.python.org/3/library/random.html#notes-on-reproducibility.
184 | # The network thread shouldn't access random. If we need to change the
185 | # network thread to access randomness, it should instantiate its own
186 | # random.Random object.
187 | seed = self.options.randomseed
188 |
189 | if seed is None:
190 | seed = random.randrange(sys.maxsize)
191 | else:
192 | self.log.debug("User supplied random seed {}".format(seed))
193 |
194 | random.seed(seed)
195 | self.log.debug("PRNG seed is: {}".format(seed))
196 |
197 | self.log.debug('Setting up network thread')
198 | self.network_thread = NetworkThread()
199 | self.network_thread.start()
200 |
201 | self.success = TestStatus.FAILED
202 |
203 | if self.options.usecli:
204 | if not self.supports_cli:
205 | raise SkipTest("--usecli specified but test does not support using CLI")
206 | self.skip_if_no_cli()
207 | self.skip_test_if_missing_module()
208 | self.setup_chain()
209 | self.setup_network()
210 |
211 | def shutdown(self, e=None, exit=False):
212 | """Call this method to shutdown the test object and optionally handle an exception."""
213 |
214 | if e != None:
215 | self.handle_exception(e)
216 | else:
217 | self.success = TestStatus.PASSED
218 |
219 | if self.success == TestStatus.FAILED and self.options.pdbonfailure:
220 | print("Testcase failed. Attaching python debugger. Enter ? for help")
221 | pdb.set_trace()
222 |
223 | self.log.debug('Closing down network thread')
224 | self.network_thread.close()
225 | if not self.options.noshutdown:
226 | self.log.info("Stopping nodes")
227 | if self.nodes:
228 | self.stop_nodes()
229 | else:
230 | for node in self.nodes:
231 | node.cleanup_on_exit = False
232 | self.log.info("Note: bitcoinds were not stopped and may still be running")
233 |
234 | should_clean_up = (
235 | not self.options.nocleanup and
236 | not self.options.noshutdown and
237 | self.success != TestStatus.FAILED and
238 | not self.options.perf
239 | )
240 | if should_clean_up:
241 | self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
242 | cleanup_tree_on_exit = True
243 | elif self.options.perf:
244 | self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
245 | cleanup_tree_on_exit = False
246 | else:
247 | self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
248 | cleanup_tree_on_exit = False
249 |
250 | if self.success == TestStatus.PASSED:
251 | self.log.info("Tests successful")
252 | exit_code = TEST_EXIT_PASSED
253 | elif self.success == TestStatus.SKIPPED:
254 | self.log.info("Test skipped")
255 | exit_code = TEST_EXIT_SKIPPED
256 | else:
257 | self.log.error("Test failed. Test logging available at %s/src.bitcoin.log", self.options.tmpdir)
258 | self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
259 | exit_code = TEST_EXIT_FAILED
260 | logging.shutdown()
261 | if cleanup_tree_on_exit:
262 | shutil.rmtree(self.options.tmpdir)
263 |
264 | self.nodes.clear()
265 |
266 | if exit:
267 | sys.exit(exit_code)
268 |
269 | def handle_exception(self, e):
270 | if isinstance(e, JSONRPCException):
271 | self.log.exception("JSONRPC error")
272 | elif isinstance(e, SkipTest):
273 | self.log.warning("Test Skipped: %s" % e.message)
274 | self.success = TestStatus.SKIPPED
275 | elif isinstance(e, AssertionError):
276 | self.log.exception("Assertion failed")
277 | elif isinstance(e, KeyError):
278 | self.log.exception("Key error")
279 | elif isinstance(e, Exception):
280 | self.log.exception("Unexpected exception caught during testing")
281 | elif isinstance(e, KeyboardInterrupt):
282 | self.log.warning("Exiting after keyboard interrupt")
283 |
284 | # Methods to override in subclass test scripts.
285 | def set_test_params(self):
286 | """Tests must this method to change default values for number of nodes, topology, etc"""
287 | raise NotImplementedError
288 |
289 | def add_options(self, parser):
290 | """Override this method to add command-line options to the test"""
291 | pass
292 |
293 | def skip_test_if_missing_module(self):
294 | """Override this method to skip a test if a module is not compiled"""
295 | pass
296 |
297 | def setup_chain(self):
298 | """Override this method to customize blockchain setup"""
299 | self.log.info("Initializing test directory " + self.options.tmpdir)
300 | if self.setup_clean_chain:
301 | self._initialize_chain_clean()
302 | else:
303 | self._initialize_chain()
304 |
305 | def setup_network(self):
306 | """Override this method to customize test network topology"""
307 | self.setup_nodes()
308 |
309 | # Connect the nodes as a "chain". This allows us
310 | # to split the network between nodes 1 and 2 to get
311 | # two halves that can work on competing chains.
312 | for i in range(self.num_nodes - 1):
313 | connect_nodes_bi(self.nodes, i, i + 1)
314 | self.sync_all()
315 |
316 | def setup_nodes(self):
317 | """Override this method to customize test node setup"""
318 | extra_args = [[]] * self.num_nodes
319 | if hasattr(self, "extra_args"):
320 | extra_args = self.extra_args
321 | self.add_nodes(self.num_nodes, extra_args)
322 | self.start_nodes()
323 | if self.is_wallet_compiled():
324 | self.import_deterministic_coinbase_privkeys()
325 | if not self.setup_clean_chain:
326 | for n in self.nodes:
327 | assert_equal(n.getblockchaininfo()["blocks"], 199)
328 | # To ensure that all nodes are out of IBD, the most recent block
329 | # must have a timestamp not too old (see IsInitialBlockDownload()).
330 | self.log.debug('Generate a block with current time')
331 | block_hash = self.nodes[0].generate(1)[0]
332 | block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
333 | for n in self.nodes:
334 | n.submitblock(block)
335 | chain_info = n.getblockchaininfo()
336 | assert_equal(chain_info["blocks"], 200)
337 | assert_equal(chain_info["initialblockdownload"], False)
338 |
339 | def import_deterministic_coinbase_privkeys(self):
340 | for i in range(self.num_nodes):
341 | self.init_wallet(i)
342 |
343 | def init_wallet(self, i):
344 | wallet_name = self.default_wallet_name if self.wallet_names is None else self.wallet_names[i] if i < len(self.wallet_names) else False
345 | if wallet_name is not False:
346 | n = self.nodes[i]
347 | if wallet_name is not None:
348 | n.createwallet(wallet_name=wallet_name, load_on_startup=True, descriptors=False)
349 | n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
350 |
351 | def run_test(self):
352 | """Tests must override this method to define test logic"""
353 | raise NotImplementedError
354 |
355 | # Public helper methods. These can be accessed by the subclass test scripts.
356 |
357 | def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
358 | """Instantiate TestNode objects.
359 |
360 | Should only be called once after the nodes have been specified in
361 | set_test_params()."""
362 | if self.bind_to_localhost_only:
363 | extra_confs = [["bind=127.0.0.1"]] * num_nodes
364 | else:
365 | extra_confs = [[]] * num_nodes
366 | if extra_args is None:
367 | extra_args = [[]] * num_nodes
368 | if binary is None:
369 | binary = [self.options.bitcoind] * num_nodes
370 | assert_equal(len(extra_confs), num_nodes)
371 | assert_equal(len(extra_args), num_nodes)
372 | assert_equal(len(binary), num_nodes)
373 | for i in range(num_nodes):
374 | self.nodes.append(TestNode(
375 | i,
376 | get_datadir_path(self.options.tmpdir, i),
377 | chain=self.chain,
378 | rpchost=rpchost,
379 | timewait=self.rpc_timeout,
380 | bitcoind=binary[i],
381 | bitcoin_cli=self.options.bitcoincli,
382 | coverage_dir=self.options.coveragedir,
383 | cwd=self.options.tmpdir,
384 | extra_conf=extra_confs[i],
385 | extra_args=extra_args[i],
386 | use_cli=self.options.usecli,
387 | start_perf=self.options.perf,
388 | ))
389 |
390 | def start_node(self, i, *args, **kwargs):
391 | """Start a bitcoind"""
392 |
393 | node = self.nodes[i]
394 |
395 | node.start(*args, **kwargs)
396 | node.wait_for_rpc_connection()
397 |
398 | if self.options.coveragedir is not None:
399 | coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
400 |
401 | def start_nodes(self, extra_args=None, *args, **kwargs):
402 | """Start multiple bitcoinds"""
403 |
404 | if extra_args is None:
405 | extra_args = [None] * self.num_nodes
406 | assert_equal(len(extra_args), self.num_nodes)
407 | try:
408 | for i, node in enumerate(self.nodes):
409 | node.start(extra_args[i], *args, **kwargs)
410 | for node in self.nodes:
411 | node.wait_for_rpc_connection()
412 | except:
413 | # If one node failed to start, stop the others
414 | self.stop_nodes()
415 | raise
416 |
417 | if self.options.coveragedir is not None:
418 | for node in self.nodes:
419 | coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
420 |
421 | def stop_node(self, i, expected_stderr='', wait=0):
422 | """Stop a bitcoind test node"""
423 | self.nodes[i].stop_node(expected_stderr, wait=wait)
424 | self.nodes[i].wait_until_stopped()
425 |
426 | def stop_nodes(self, wait=0):
427 | """Stop multiple bitcoind test nodes"""
428 | for node in self.nodes:
429 | # Issue RPC to stop nodes
430 | node.stop_node(wait=wait)
431 |
432 | for node in self.nodes:
433 | # Wait for nodes to stop
434 | node.wait_until_stopped()
435 |
436 | def restart_node(self, i, extra_args=None):
437 | """Stop and start a test node"""
438 | self.stop_node(i)
439 | self.start_node(i, extra_args)
440 |
441 | def wait_for_node_exit(self, i, timeout):
442 | self.nodes[i].process.wait(timeout)
443 |
444 | def split_network(self):
445 | """
446 | Split the network of four nodes into nodes 0/1 and 2/3.
447 | """
448 | disconnect_nodes(self.nodes[1], 2)
449 | disconnect_nodes(self.nodes[2], 1)
450 | self.sync_all(self.nodes[:2])
451 | self.sync_all(self.nodes[2:])
452 |
453 | def join_network(self):
454 | """
455 | Join the (previously split) network halves together.
456 | """
457 | connect_nodes_bi(self.nodes, 1, 2)
458 | self.sync_all()
459 |
460 | def sync_blocks(self, nodes=None, **kwargs):
461 | sync_blocks(nodes or self.nodes, **kwargs)
462 |
463 | def sync_mempools(self, nodes=None, **kwargs):
464 | sync_mempools(nodes or self.nodes, **kwargs)
465 |
466 | def sync_all(self, nodes=None, **kwargs):
467 | self.sync_blocks(nodes, **kwargs)
468 | self.sync_mempools(nodes, **kwargs)
469 |
470 | # Private helper methods. These should not be accessed by the subclass test scripts.
471 |
472 | def _start_logging(self):
473 | # Add logger and logging handlers
474 | self.log = logging.getLogger('TestFramework.'+ self.options.tmpdir) # Assign new logger name to prevent temp path reuse.
475 | self.log.setLevel(logging.DEBUG)
476 | # Create file handler to log all messages
477 | fh = logging.FileHandler(self.options.tmpdir + '/src.bitcoin.log', encoding='utf-8')
478 | fh.setLevel(logging.DEBUG)
479 | # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
480 | ch = logging.StreamHandler(sys.stdout)
481 | # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
482 | ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
483 | ch.setLevel(ll)
484 | # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
485 | formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
486 | formatter.converter = time.gmtime
487 | fh.setFormatter(formatter)
488 | ch.setFormatter(formatter)
489 | # add the handlers to the logger
490 | self.log.addHandler(fh)
491 | self.log.addHandler(ch)
492 |
493 | if self.options.trace_rpc:
494 | rpc_logger = logging.getLogger("BitcoinRPC")
495 | rpc_logger.setLevel(logging.DEBUG)
496 | rpc_handler = logging.StreamHandler(sys.stdout)
497 | rpc_handler.setLevel(logging.DEBUG)
498 | rpc_logger.addHandler(rpc_handler)
499 |
500 | def _initialize_chain(self):
501 | """Initialize a pre-mined blockchain for use by the test.
502 |
503 | Create a cache of a 199-block-long chain
504 | Afterward, create num_nodes copies from the cache."""
505 |
506 | CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
507 | cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
508 | assert self.num_nodes <= MAX_NODES
509 |
510 | if not os.path.isdir(cache_node_dir):
511 | self.log.debug("Creating cache directory {}".format(cache_node_dir))
512 |
513 | initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain)
514 | self.nodes.append(
515 | TestNode(
516 | CACHE_NODE_ID,
517 | cache_node_dir,
518 | chain=self.chain,
519 | extra_conf=["bind=127.0.0.1"],
520 | extra_args=['-disablewallet'],
521 | rpchost=None,
522 | timewait=self.rpc_timeout,
523 | bitcoind=self.options.bitcoind,
524 | bitcoin_cli=self.options.bitcoincli,
525 | coverage_dir=None,
526 | cwd=self.options.tmpdir,
527 | ))
528 | self.start_node(CACHE_NODE_ID)
529 |
530 | # Wait for RPC connections to be ready
531 | self.nodes[CACHE_NODE_ID].wait_for_rpc_connection()
532 |
533 | # Create a 199-block-long chain; each of the 4 first nodes
534 | # gets 25 mature blocks and 25 immature.
535 | # The 4th node gets only 24 immature blocks so that the very last
536 | # block in the cache does not age too much (have an old tip age).
537 | # This is needed so that we are out of IBD when the test starts,
538 | # see the tip age check in IsInitialBlockDownload().
539 | for i in range(8):
540 | self.nodes[CACHE_NODE_ID].generatetoaddress(
541 | nblocks=25 if i != 7 else 24,
542 | address=TestNode.PRIV_KEYS[i % 4].address,
543 | )
544 |
545 | assert_equal(self.nodes[CACHE_NODE_ID].getblockchaininfo()["blocks"], 199)
546 |
547 | # Shut it down, and clean up cache directories:
548 | self.stop_nodes()
549 | self.nodes = []
550 |
551 | def cache_path(*paths):
552 | return os.path.join(cache_node_dir, self.chain, *paths)
553 |
554 | os.rmdir(cache_path('wallets')) # Remove empty wallets dir
555 | for entry in os.listdir(cache_path()):
556 | if entry not in ['chainstate', 'blocks']: # Only keep chainstate and blocks folder
557 | os.remove(cache_path(entry))
558 |
559 | for i in range(self.num_nodes):
560 | self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
561 | to_dir = get_datadir_path(self.options.tmpdir, i)
562 | shutil.copytree(cache_node_dir, to_dir)
563 | initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in bitcoin.conf
564 |
565 | def _initialize_chain_clean(self):
566 | """Initialize empty blockchain for use by the test.
567 |
568 | Create an empty blockchain and num_nodes wallets.
569 | Useful if a test case wants complete control over initialization."""
570 | for i in range(self.num_nodes):
571 | initialize_datadir(self.options.tmpdir, i, self.chain)
572 |
573 | def skip_if_no_py3_zmq(self):
574 | """Attempt to import the zmq package and skip the test if the import fails."""
575 | try:
576 | import zmq # noqa
577 | except ImportError:
578 | raise SkipTest("python3-zmq module not available.")
579 |
580 | def skip_if_no_bitcoind_zmq(self):
581 | """Skip the running test if bitcoind has not been compiled with zmq support."""
582 | if not self.is_zmq_compiled():
583 | raise SkipTest("bitcoind has not been built with zmq enabled.")
584 |
585 | def skip_if_no_wallet(self):
586 | """Skip the running test if wallet has not been compiled."""
587 | if not self.is_wallet_compiled():
588 | raise SkipTest("wallet has not been compiled.")
589 |
590 | def skip_if_no_cli(self):
591 | """Skip the running test if bitcoin-cli has not been compiled."""
592 | if not self.is_cli_compiled():
593 | raise SkipTest("bitcoin-cli has not been compiled.")
594 |
595 | def is_cli_compiled(self):
596 | """Checks whether bitcoin-cli was compiled."""
597 | return self.config["components"].getboolean("ENABLE_CLI")
598 |
599 | def is_wallet_compiled(self):
600 | """Checks whether the wallet module was compiled."""
601 | return self.config["components"].getboolean("ENABLE_WALLET")
602 |
603 | def is_zmq_compiled(self):
604 | """Checks whether the zmq module was compiled."""
605 | return self.config["components"].getboolean("ENABLE_ZMQ")
606 |
--------------------------------------------------------------------------------