├── README.txt ├── Abe ├── __init__.py ├── version.py ├── htdocs │ ├── favicon.ico │ ├── logo32.png │ ├── robots.txt │ ├── abe.css │ └── nethash.js ├── ripemd_via_hashlib.py ├── enumeration.py ├── genesis_tx.py ├── base58.py ├── firstbits.py ├── reconfigure.py ├── verify.py ├── BCDataStream.py ├── util.py ├── mixup.py ├── readconf.py ├── admin.py ├── deserialize.py └── upgrade.py ├── MANIFEST.in ├── .gitignore ├── README-SQLITE.txt ├── bct-LICENSE.txt ├── README-POSTGRES.txt ├── tools ├── abe_loader └── namecoin_dump.py ├── doc ├── jtobey.pubkey └── FAQ.html ├── setup.py ├── README-FASTCGI.txt ├── CHANGES.txt ├── README-MYSQL.txt ├── README.md ├── README-FIRSTBITS.txt ├── TODO.txt ├── abe.conf └── LICENSE.txt /README.txt: -------------------------------------------------------------------------------- 1 | README.md -------------------------------------------------------------------------------- /Abe/__init__.py: -------------------------------------------------------------------------------- 1 | pass 2 | -------------------------------------------------------------------------------- /Abe/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.8pre' 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include abe.conf 2 | include README-*.txt 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | abe.sqlite* 3 | tmpd 4 | MANIFEST 5 | build 6 | dist 7 | -------------------------------------------------------------------------------- /Abe/htdocs/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melvincarvalho/bitcoin-abe/master/Abe/htdocs/favicon.ico -------------------------------------------------------------------------------- /Abe/htdocs/logo32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/melvincarvalho/bitcoin-abe/master/Abe/htdocs/logo32.png -------------------------------------------------------------------------------- /Abe/htdocs/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Disallow: /chain/ 3 | Disallow: /block/ 4 | Disallow: /tx/ 5 | Disallow: /address/ 6 | Disallow: /unspent/ 7 | Disallow: /b/ 8 | Disallow: /t/ 9 | Disallow: /a/ 10 | -------------------------------------------------------------------------------- /Abe/ripemd_via_hashlib.py: -------------------------------------------------------------------------------- 1 | # RIPEMD hash interface via hashlib for those who don't have 2 | # Crypto.Hash.RIPEMD. 3 | 4 | import hashlib 5 | 6 | def new(data=''): 7 | h = hashlib.new('ripemd160') 8 | h.update(data) 9 | return h 10 | -------------------------------------------------------------------------------- /Abe/htdocs/abe.css: -------------------------------------------------------------------------------- 1 | td, th { 2 | font-family: Verdana, Helvetica, sans-serif; 3 | margin: 0; 4 | padding: 0 1ex; 5 | } 6 | td { 7 | border-width: 1px; 8 | border-style: solid; 9 | } 10 | table { 11 | border-spacing: 0; 12 | } 13 | .shortlink { 14 | font-size: smaller; 15 | } 16 | -------------------------------------------------------------------------------- /README-SQLITE.txt: -------------------------------------------------------------------------------- 1 | SQLite is not appropriate for a busy public service, since it does not 2 | support concurrent access. 3 | 4 | Ubuntu supplies the sqlite3 module in the python-pysqlite2 [sic] 5 | package. 6 | 7 | Create abe-sqlite.conf with contents: 8 | 9 | dbtype sqlite3 10 | connect-args abe.sqlite 11 | upgrade 12 | port 2750 13 | 14 | Perform the initial data load: 15 | 16 | python -m Abe.abe --config abe-sqlite.conf --commit-bytes 100000 --no-serve 17 | 18 | Look for output such as: 19 | 20 | block_tx 1 1 21 | block_tx 2 2 22 | ... 23 | 24 | This step may take several days depending on chain size and hardware. 25 | Then run the web server as: 26 | 27 | python -m Abe.abe --config abe-sqlite.conf 28 | 29 | You should see: 30 | 31 | Listening on http://localhost:2750 32 | 33 | Verify the installation by browsing the URL shown. 34 | -------------------------------------------------------------------------------- /bct-LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010 Gavin Andresen 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /README-POSTGRES.txt: -------------------------------------------------------------------------------- 1 | PostgreSQL on Debian/Ubuntu. 2 | 3 | Run the Bitcoin client to ensure that your copy of the block chain is 4 | up to date. 5 | 6 | Choose or create a system account to run Abe. Replace USER with its 7 | username throughout these instructions. 8 | 9 | apt-get install python2.7 python-crypto postgresql-8.4 python-psycopg2 10 | sudo -u postgres createdb abe 11 | sudo -u postgres createuser USER 12 | 13 | Add the following line to /etc/postgresql/*/main/pg_hba.conf: 14 | 15 | local abe USER ident 16 | 17 | Issue: 18 | 19 | sudo service postgresql reload 20 | 21 | Create file abe-pg.conf with contents: 22 | 23 | dbtype psycopg2 24 | connect-args {"database":"abe"} 25 | upgrade 26 | port 2750 27 | 28 | Perform the initial data load: 29 | 30 | python -m Abe.abe --config abe-pg.conf --commit-bytes 100000 --no-serve 31 | 32 | Look for output such as: 33 | 34 | block_tx 1 1 35 | block_tx 2 2 36 | ... 37 | 38 | This step may take several days depending on chain size and hardware. 39 | Then run the web server as: 40 | 41 | python -m Abe.abe --config abe-pg.conf 42 | 43 | You should see: 44 | 45 | Listening on http://localhost:2750 46 | 47 | Verify the installation by browsing the URL shown. 48 | -------------------------------------------------------------------------------- /tools/abe_loader: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # Copyright(C) 2013 by Abe developers. 4 | 5 | BLOCK_ACCEPTED="ProcessBlock: ACCEPTED" 6 | 7 | # XXX Should be an option. Maximum number of seconds between runs. 8 | MEMPOOL_INTERVAL=30 9 | 10 | usage() { 11 | cat <. 19 | 20 | def get(tx_hash_hex): 21 | """ 22 | Given the hexadecimal hash of the genesis transaction (as shown 23 | by, e.g., "bitcoind getblock 0") return the hexadecimal raw 24 | transaction. This works around a Bitcoind limitation described at 25 | https://bitcointalk.org/index.php?topic=119530.0 26 | """ 27 | 28 | # Main Bitcoin chain: 29 | if tx_hash_hex == "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b": 30 | return "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000" 31 | 32 | # Extract your chain's genesis transaction data from the first 33 | # block file and add it here, or better yet, patch your coin's 34 | # getrawtransaction to return it on request: 35 | #if tx_hash_hex == "" 36 | # return "" 37 | 38 | return None 39 | -------------------------------------------------------------------------------- /Abe/base58.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """encode/decode base58 in the same way that Bitcoin does""" 4 | 5 | import math 6 | 7 | __b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' 8 | __b58base = len(__b58chars) 9 | 10 | def b58encode(v): 11 | """ encode v, which is a string of bytes, to base58. 12 | """ 13 | 14 | long_value = 0L 15 | for (i, c) in enumerate(v[::-1]): 16 | long_value += ord(c) << (8*i) # 2x speedup vs. exponentiation 17 | 18 | result = '' 19 | while long_value >= __b58base: 20 | div, mod = divmod(long_value, __b58base) 21 | result = __b58chars[mod] + result 22 | long_value = div 23 | result = __b58chars[long_value] + result 24 | 25 | # Bitcoin does a little leading-zero-compression: 26 | # leading 0-bytes in the input become leading-1s 27 | nPad = 0 28 | for c in v: 29 | if c == '\0': nPad += 1 30 | else: break 31 | 32 | return (__b58chars[0]*nPad) + result 33 | 34 | def b58decode(v, length): 35 | """ decode v into a string of len bytes 36 | """ 37 | long_value = 0L 38 | for (i, c) in enumerate(v[::-1]): 39 | long_value += __b58chars.find(c) * (__b58base**i) 40 | 41 | result = '' 42 | while long_value >= 256: 43 | div, mod = divmod(long_value, 256) 44 | result = chr(mod) + result 45 | long_value = div 46 | result = chr(long_value) + result 47 | 48 | nPad = 0 49 | for c in v: 50 | if c == __b58chars[0]: nPad += 1 51 | else: break 52 | 53 | result = chr(0)*nPad + result 54 | if length is not None and len(result) != length: 55 | return None 56 | 57 | return result 58 | 59 | try: 60 | # Python Crypto library is at: http://www.dlitz.net/software/pycrypto/ 61 | # Needed for RIPEMD160 hash function, used to compute 62 | # Bitcoin addresses from internal public keys. 63 | import Crypto.Hash.SHA256 as SHA256 64 | import Crypto.Hash.RIPEMD160 as RIPEMD160 65 | have_crypto = True 66 | except ImportError: 67 | have_crypto = False 68 | 69 | def hash_160(public_key): 70 | if not have_crypto: 71 | return '' 72 | h1 = SHA256.new(public_key).digest() 73 | h2 = RIPEMD160.new(h1).digest() 74 | return h2 75 | 76 | def public_key_to_bc_address(public_key): 77 | if not have_crypto: 78 | return '' 79 | h160 = hash_160(public_key) 80 | return hash_160_to_bc_address(h160) 81 | 82 | def hash_160_to_bc_address(h160): 83 | if not have_crypto: 84 | return '' 85 | vh160 = "\x00"+h160 # \x00 is version 0 86 | h3=SHA256.new(SHA256.new(vh160).digest()).digest() 87 | addr=vh160+h3[0:4] 88 | return b58encode(addr) 89 | 90 | def bc_address_to_hash_160(addr): 91 | bytes = b58decode(addr, 25) 92 | return bytes[1:21] 93 | 94 | if __name__ == '__main__': 95 | x = '005cc87f4a3fdfe3a2346b6953267ca867282630d3f9b78e64'.decode('hex_codec') 96 | encoded = b58encode(x) 97 | print encoded, '19TbMSWwHvnxAKy12iNm3KdbGfzfaMFViT' 98 | print b58decode(encoded, len(x)).encode('hex_codec'), x.encode('hex_codec') 99 | -------------------------------------------------------------------------------- /Abe/firstbits.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright(C) 2011,2012 by Abe developers. 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as 6 | # published by the Free Software Foundation, either version 3 of the 7 | # License, or (at your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, but 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 | # Affero General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU Affero General Public 15 | # License along with this program. If not, see 16 | # . 17 | 18 | """Reconfigure an Abe instance to use or not use Firstbits.""" 19 | 20 | def populate_firstbits(store): 21 | blocks, fbs = 0, 0 22 | log_incr = 1000 23 | 24 | for addr_vers, block_id in store.selectall(""" 25 | SELECT c.chain_address_version, 26 | cc.block_id 27 | FROM chain c 28 | JOIN chain_candidate cc ON (c.chain_id = cc.chain_id) 29 | WHERE cc.block_height IS NOT NULL 30 | ORDER BY cc.chain_id, cc.block_height"""): 31 | fbs += store.do_vers_firstbits(addr_vers, int(block_id)) 32 | blocks += 1 33 | if blocks % log_incr == 0: 34 | store.commit() 35 | store.log.info("%d firstbits in %d blocks" % (fbs, blocks)) 36 | 37 | if blocks % log_incr > 0: 38 | store.commit() 39 | store.log.info("%d firstbits in %d blocks" % (fbs, blocks)) 40 | 41 | def create_firstbits(store): 42 | store.log.info("Creating firstbits table.") 43 | store.ddl( 44 | """CREATE TABLE abe_firstbits ( 45 | pubkey_id NUMERIC(26) NOT NULL, 46 | block_id NUMERIC(14) NOT NULL, 47 | address_version BIT VARYING(80) NOT NULL, 48 | firstbits VARCHAR(50) NOT NULL, 49 | PRIMARY KEY (address_version, pubkey_id, block_id), 50 | FOREIGN KEY (pubkey_id) REFERENCES pubkey (pubkey_id), 51 | FOREIGN KEY (block_id) REFERENCES block (block_id) 52 | )""") 53 | store.ddl( 54 | """CREATE INDEX x_abe_firstbits 55 | ON abe_firstbits (address_version, firstbits)""") 56 | 57 | def drop_firstbits(store): 58 | store.log.info("Dropping firstbits table.") 59 | store.ddl("DROP TABLE abe_firstbits") 60 | 61 | def reconfigure(store, args): 62 | have = store.config['use_firstbits'] == "true" 63 | want = args.use_firstbits 64 | if have == want: 65 | return 66 | lock = store.get_lock() 67 | try: 68 | # XXX Should temporarily store a new schema_version. 69 | if want: 70 | create_firstbits(store) 71 | populate_firstbits(store) 72 | store.config['use_firstbits'] = "true" 73 | else: 74 | drop_firstbits(store) 75 | store.config['use_firstbits'] = "false" 76 | 77 | store.use_firstbits = want 78 | store.save_configvar("use_firstbits") 79 | store.commit() 80 | 81 | finally: 82 | store.release_lock(lock) 83 | -------------------------------------------------------------------------------- /Abe/reconfigure.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright(C) 2012 by Abe developers. 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as 6 | # published by the Free Software Foundation, either version 3 of the 7 | # License, or (at your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, but 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 | # Affero General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU Affero General Public 15 | # License along with this program. If not, see 16 | # . 17 | 18 | """Reconfigure an Abe instance.""" 19 | 20 | import sys 21 | import logging 22 | 23 | import DataStore 24 | import readconf 25 | import firstbits 26 | 27 | def keep_scriptsig_reconfigure(store, args): 28 | have = store.keep_scriptsig 29 | want = args.keep_scriptsig 30 | if have == want: 31 | return 32 | if want: 33 | store.log.warn("Can not turn on keep-scriptsig: unimplemented") 34 | return 35 | lock = store.get_lock() 36 | try: 37 | # XXX Should use a temporary schema_version. 38 | store.drop_view_if_exists("txin_detail") 39 | 40 | store.drop_column_if_exists("txin", "txin_scriptSig") 41 | store.drop_column_if_exists("txin", "txin_sequence") 42 | store.config['keep_scriptsig'] = "false" 43 | 44 | store.keep_scriptsig = want 45 | store.refresh_ddl() 46 | store.ddl(store.get_ddl("txin_detail")) 47 | store.save_configvar("keep_scriptsig") 48 | store.commit() 49 | finally: 50 | store.release_lock(lock) 51 | 52 | def main(argv): 53 | conf = { 54 | "debug": None, 55 | "logging": None, 56 | } 57 | conf.update(DataStore.CONFIG_DEFAULTS) 58 | 59 | args, argv = readconf.parse_argv(argv, conf, 60 | strict=False) 61 | if argv and argv[0] in ('-h', '--help'): 62 | print ("""Usage: python -m Abe.reconfigure [-h] [--config=FILE] [--CONFIGVAR=VALUE]... 63 | 64 | Apply configuration changes to an existing Abe database, if possible. 65 | 66 | --help Show this help message and exit. 67 | --config FILE Read options from FILE. 68 | --use-firstbits {true|false} 69 | Turn Firstbits support on or off. 70 | --keep-scriptsig false Remove input validation scripts from the database. 71 | 72 | All configuration variables may be given as command arguments.""") 73 | return 0 74 | 75 | logging.basicConfig( 76 | stream=sys.stdout, 77 | level=logging.DEBUG, 78 | format="%(message)s") 79 | if args.logging is not None: 80 | import logging.config as logging_config 81 | logging_config.dictConfig(args.logging) 82 | 83 | store = DataStore.new(args) 84 | firstbits.reconfigure(store, args) 85 | keep_scriptsig_reconfigure(store, args) 86 | return 0 87 | 88 | if __name__ == '__main__': 89 | sys.exit(main(sys.argv[1:])) 90 | -------------------------------------------------------------------------------- /Abe/verify.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Prototype database validation script. Same args as abe.py. 3 | 4 | # Copyright(C) 2011 by Abe developers. 5 | 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU Affero General Public License as 8 | # published by the Free Software Foundation, either version 3 of the 9 | # License, or (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, but 12 | # WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 | # Affero General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU Affero General Public 17 | # License along with this program. If not, see 18 | # . 19 | 20 | import sys 21 | import DataStore 22 | import util 23 | import logging 24 | import readconf 25 | 26 | def verify_tx_merkle_hashes(store, logger, chain_id): 27 | checked, bad = 0, 0 28 | for block_id, merkle_root, num_tx in store.selectall(""" 29 | SELECT b.block_id, b.block_hashMerkleRoot, b.block_num_tx 30 | FROM block b 31 | JOIN chain_candidate cc ON (b.block_id = cc.block_id) 32 | WHERE cc.chain_id = ?""", (chain_id,)): 33 | merkle_root = store.hashout(merkle_root) 34 | tree = [] 35 | for (tx_hash,) in store.selectall(""" 36 | SELECT tx.tx_hash 37 | FROM block_tx bt 38 | JOIN tx ON (bt.tx_id = tx.tx_id) 39 | WHERE bt.block_id = ? 40 | ORDER BY bt.tx_pos""", (block_id,)): 41 | tree.append(store.hashout(tx_hash)) 42 | if len(tree) != num_tx: 43 | logger.warning("block %d: block_num_tx=%d but found %d", 44 | block_id, num_tx, len(tree)) 45 | root = util.merkle(tree) or DataStore.NULL_HASH 46 | if root != merkle_root: 47 | logger.error("block %d: block_hashMerkleRoot mismatch.", 48 | block_id) 49 | bad += 1 50 | checked += 1 51 | if checked % 1000 == 0: 52 | logger.info("%d Merkle trees, %d bad", checked, bad) 53 | if checked % 1000 > 0: 54 | logger.info("%d Merkle trees, %d bad", checked, bad) 55 | return checked, bad 56 | 57 | def main(argv): 58 | logging.basicConfig(level=logging.DEBUG) 59 | args, argv = readconf.parse_argv(argv, DataStore.CONFIG_DEFAULTS, 60 | strict=False) 61 | if argv and argv[0] in ('-h', '--help'): 62 | print "Usage: verify.py --dbtype=MODULE --connect-args=ARGS" 63 | return 0 64 | store = DataStore.new(args) 65 | logger = logging.getLogger("verify") 66 | checked, bad = 0, 0 67 | for (chain_id,) in store.selectall(""" 68 | SELECT chain_id FROM chain"""): 69 | logger.info("checking chain %d", chain_id) 70 | checked1, bad1 = verify_tx_merkle_hashes(store, logger, chain_id) 71 | checked += checked1 72 | bad += bad1 73 | logger.info("All chains: %d Merkle trees, %d bad", checked, bad) 74 | return bad and 1 75 | 76 | if __name__ == '__main__': 77 | sys.exit(main(sys.argv[1:])) 78 | -------------------------------------------------------------------------------- /doc/FAQ.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Abe: Frequently Asked Questions 4 | 7 | 8 | 9 |

Abe: Frequently Asked Questions

10 | 16 | 17 |

Where in the database are bitcoin 18 | addresses?

19 |

A bitcoin 20 | address is an encoding of 21 | the hash of the public 22 | part of a keypair in 23 | someone's wallet. 24 | Abe stores only the public key hash, 25 | in pubkey.pubkey_hash. Abe converts hash values to 26 | bitcoin addresses as needed using the hash_to_address 27 | function in abe.py.

28 |

How do I know 29 | what address_version and code3 to use 30 | for a new alt chain?

31 |

code3 can be any 3 characters, e.g. "BTC" 32 | for bitcoin. If people expect "DVC" (for example) to mean Devcoin, 33 | use that.

34 | 35 |

For address_version, if you have a valid 36 | address for the new chain, append it to 37 | http://abe.john-edwin-tobey.org/q/decode_address/. 38 | For example, Testnet address mgnQ32RSjvmTLB3jVZ9L2xUTT512cCX9b8 39 | gives 40 | http://abe.john-edwin-tobey.org/q/decode_address/mgnQ32RSjvmTLB3jVZ9L2xUTT512cCX9b8, 41 | which 42 | shows 6f:0de3da453bfd284cd1c94902dbb9bc28bbed139f. Take 43 | the part to the left of the colon (:) (6f 44 | for Testnet) and replace "XX" with it in "\u00XX" 45 | ("\u006f" for Testnet) That is the value 46 | for address_version in the config file's JSON 47 | format.

48 | 49 |

address_version comes from the first byte 50 | of the input to SHA256 used 51 | in address 52 | computation. In Bitcoin as of this writing, this information is 53 | in src/base58.h: 54 | 55 |

56 |     enum
57 |     {
58 |         PUBKEY_ADDRESS = 0,
59 |         SCRIPT_ADDRESS = 5,
60 |         PUBKEY_ADDRESS_TEST = 111,
61 |         SCRIPT_ADDRESS_TEST = 196,
62 |     };
63 | 
64 | 65 | The byte is 111 for Testnet and 0 for regular Bitcoin. You would 66 | translate byte 111 to a JSON string as follows: 111 = '6f' 67 | (hexadecimal). In JSON, a 1-byte string is encoded 68 | as "\u00XX" where XX are the hex digits. So Testnet 69 | would be "\u006f".

70 | 71 |

If you get the wrong address_version 72 | value, everything will work except for address display. You could 73 | look up addresses, but they would appear different on web pages.

74 | 75 | 76 | -------------------------------------------------------------------------------- /README-FASTCGI.txt: -------------------------------------------------------------------------------- 1 | Apache 2 FastCGI setup on Debian/Ubuntu 2 | ======================================= 3 | 4 | This document describes how to install and run Abe as a FastCGI 5 | process under Apache 2 on a Debian GNU/Linux or Ubuntu system. 6 | Advantages of FastCGI over the built-in HTTP server include: 7 | 8 | * lets browsers cache static content for better performance; 9 | * can integrate with an existing website, no :2750 in URLs. 10 | 11 | These instructions assume root privileges. To begin a privileged 12 | session in a terminal window, issue "sudo -i" (Ubuntu) or "su -" 13 | (Debian). 14 | 15 | Install required packages: 16 | 17 | apt-get install apache2 libapache2-mod-fcgid python-flup 18 | apt-get install python-crypto 19 | 20 | Change directory to the Abe distribution and install Abe: 21 | 22 | cd bitcoin-abe 23 | python setup.py install 24 | 25 | Replace YOUR.ABE.DOMAIN below with a domain that resolves to this 26 | host. The site will be http://YOUR.ABE.DOMAIN/. To embed Abe in an 27 | existing site (e.g., http://YOUR.DOMAIN/abe/) prepend a path (e.g., 28 | "/abe") in the Alias directives, place them in your existing 29 | sites-available file instead of a new VirtualHost, and merge or create 30 | your site's /robots.txt with adjusted paths from Abe/htdocs/robots.txt. 31 | 32 | Replace HTDOCS/DIRECTORY below with the directory containing abe.css; 33 | the Apache process must have permission to read it. The following 34 | command displays the correct value: 35 | 36 | python -m Abe.abe --print-htdocs-directory 37 | 38 | Optionally, replace "/usr/lib/cgi-bin" below with another directory; 39 | Apache must have the directory configured with Options +ExecCGI. 40 | 41 | Create file /etc/apache2/sites-available/abe with these contents: 42 | 43 | 44 | ServerName YOUR.ABE.DOMAIN 45 | Alias /static/ HTDOCS/DIRECTORY/ 46 | Alias /robots.txt HTDOCS/DIRECTORY/robots.txt 47 | Alias /favicon.ico HTDOCS/DIRECTORY/favicon.ico 48 | Alias / /usr/lib/cgi-bin/abe.fcgi/ 49 | 50 | # Raise this if you get server errors mentioning "mod_fcgid: 51 | # read data timeout in 40 seconds" 52 | #FcgidIOTimeout 40 53 | 54 | # Uncomment to log Abe requests. 55 | #ErrorLog /var/log/abe_error.log 56 | #LogLevel info 57 | #CustomLog /var/log/abe_access.log combined 58 | 59 | 60 | Enable the new configuration: 61 | 62 | a2ensite abe 63 | service apache2 reload 64 | 65 | Replace USER with your Unix user name and create file 66 | /usr/lib/cgi-bin/abe.fcgi with these contents: 67 | 68 | #! /usr/bin/python 69 | import subprocess, sys, os 70 | command=["sudo", "-u", "USER", "/home/USER/cgi-bin/abe", str(os.getpid())] 71 | subprocess.Popen(command, stdin=sys.stdin).wait() 72 | 73 | Make the file executable: 74 | 75 | chmod +x /usr/lib/cgi-bin/abe.fcgi 76 | 77 | Replace USER with your Unix user name and use visudo(1) to append 78 | the following to /etc/sudoers: 79 | 80 | # This allows the Apache account (www-data) to run Abe as USER. 81 | www-data ALL=(USER) NOPASSWD: /home/USER/cgi-bin/abe 82 | 83 | Put configuration such as database connection parameters in 84 | /home/USER/abe.conf or change the location below. See the sample 85 | abe.conf in the Abe distribution for file format. IMPORTANT: Make 86 | sure the configuration does NOT contain a "host" or "port" option. 87 | 88 | Create file /home/USER/cgi-bin/abe with these contents: 89 | 90 | #! /bin/sh 91 | PYTHONUNBUFFERED=1 exec python -m Abe.abe \ 92 | --config /home/USER/abe.conf --static-path static/ --watch-pid="$1" 93 | 94 | Make the file executable: 95 | 96 | chmod +x /home/USER/cgi-bin/abe 97 | 98 | Abe should be reachable at http://YOUR.ABE.DOMAIN/. Exit the 99 | privileged session: 100 | 101 | exit 102 | -------------------------------------------------------------------------------- /Abe/BCDataStream.py: -------------------------------------------------------------------------------- 1 | # 2 | # Workalike python implementation of Bitcoin's CDataStream class. 3 | # 4 | import struct 5 | import StringIO 6 | import mmap 7 | 8 | class SerializationError(Exception): 9 | """ Thrown when there's a problem deserializing or serializing """ 10 | 11 | class BCDataStream(object): 12 | def __init__(self): 13 | self.input = None 14 | self.read_cursor = 0 15 | 16 | def clear(self): 17 | self.input = None 18 | self.read_cursor = 0 19 | 20 | def write(self, bytes): # Initialize with string of bytes 21 | if self.input is None: 22 | self.input = bytes 23 | else: 24 | self.input += bytes 25 | 26 | def map_file(self, file, start): # Initialize with bytes from file 27 | self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) 28 | self.read_cursor = start 29 | def seek_file(self, position): 30 | self.read_cursor = position 31 | def close_file(self): 32 | self.input.close() 33 | 34 | def read_string(self): 35 | # Strings are encoded depending on length: 36 | # 0 to 252 : 1-byte-length followed by bytes (if any) 37 | # 253 to 65,535 : byte'253' 2-byte-length followed by bytes 38 | # 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes 39 | # ... and the Bitcoin client is coded to understand: 40 | # greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string 41 | # ... but I don't think it actually handles any strings that big. 42 | if self.input is None: 43 | raise SerializationError("call write(bytes) before trying to deserialize") 44 | 45 | try: 46 | length = self.read_compact_size() 47 | except IndexError: 48 | raise SerializationError("attempt to read past end of buffer") 49 | 50 | return self.read_bytes(length) 51 | 52 | def write_string(self, string): 53 | # Length-encoded as with read-string 54 | self.write_compact_size(len(string)) 55 | self.write(string) 56 | 57 | def read_bytes(self, length): 58 | try: 59 | result = self.input[self.read_cursor:self.read_cursor+length] 60 | self.read_cursor += length 61 | return result 62 | except IndexError: 63 | raise SerializationError("attempt to read past end of buffer") 64 | 65 | return '' 66 | 67 | def read_boolean(self): return self.read_bytes(1)[0] != chr(0) 68 | def read_int16 (self): return self._read_num(' ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=; 72 | 73 | Where is one of 1, 2, 4, 8 or 16. 74 | 75 | Without going into many details, the KEY_BLOCK_SIZE parameter affects both 76 | compression ratio and performance, and longer rows requires larger sizes as 77 | well. To save you the trouble, the following commands have been prepared to 78 | give you the greatest compression ratio. (NB: For the bigger tables the 79 | compression have been tested only on small subset of tables -- 1M rows.) 80 | 81 | ALTER TABLE txin ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; 82 | ALTER TABLE txout ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; 83 | ALTER TABLE block_txin ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; 84 | ALTER TABLE tx ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; 85 | ALTER TABLE block_tx ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; 86 | ALTER TABLE pubkey ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; 87 | ALTER TABLE block ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; 88 | ALTER TABLE chain_candidate ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; 89 | ALTER TABLE block_next ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; 90 | 91 | These settings gave been tested on a MySQL database with binary-type=binary 92 | and default settings for firtbits and scripsig. Compression of a full Abe 93 | database reduced its size from 36G (37254132 KiB) down to only 17G 94 | (17409008 KiB), a 53% compression ratio. 95 | 96 | To test for yourself, the following bash code prints out SQL commands to 97 | copy each table above into a compressed table for each key size. You can 98 | add a "LIMIT " at the end of the INSERT queries to set an upper limit on 99 | copied rows. 100 | 101 | for t in txin txout block_txin tx block_tx pubkey block chain_candidate block_next 102 | do 103 | for l in 1 2 4 8 16 104 | do 105 | echo "CREATE TABLE ${t}_kbs$l like $t;" 106 | echo "ALTER TABLE ${t}_kbs$l KEY_BLOCK_SIZE=$l ROW_FORMAT=COMPRESSED;" 107 | echo "INSERT INTO ${t}_kbs$l SELECT * FROM $t;" 108 | done 109 | done 110 | 111 | Then compare the size of your table's .ibd files for each KEY_BLOCK_SIZE. 112 | -------------------------------------------------------------------------------- /Abe/util.py: -------------------------------------------------------------------------------- 1 | # 2 | # Misc util routines 3 | # 4 | 5 | import re 6 | import base58 7 | import Crypto.Hash.SHA256 as SHA256 8 | 9 | try: 10 | import Crypto.Hash.RIPEMD160 as RIPEMD160 11 | except: 12 | import ripemd_via_hashlib as RIPEMD160 13 | 14 | # This function comes from bitcointools, bct-LICENSE.txt. 15 | def determine_db_dir(): 16 | import os 17 | import os.path 18 | import platform 19 | if platform.system() == "Darwin": 20 | return os.path.expanduser("~/Library/Application Support/Bitcoin/") 21 | elif platform.system() == "Windows": 22 | return os.path.join(os.environ['APPDATA'], "Bitcoin") 23 | return os.path.expanduser("~/.bitcoin") 24 | 25 | # This function comes from bitcointools, bct-LICENSE.txt. 26 | def long_hex(bytes): 27 | return bytes.encode('hex_codec') 28 | 29 | # This function comes from bitcointools, bct-LICENSE.txt. 30 | def short_hex(bytes): 31 | t = bytes.encode('hex_codec') 32 | if len(t) < 11: 33 | return t 34 | return t[0:4]+"..."+t[-4:] 35 | 36 | def double_sha256(s): 37 | return SHA256.new(SHA256.new(s).digest()).digest() 38 | 39 | # Based on CBlock::BuildMerkleTree(). 40 | def merkle(hashes): 41 | while len(hashes) > 1: 42 | size = len(hashes) 43 | out = [] 44 | for i in xrange(0, size, 2): 45 | i2 = min(i + 1, size - 1) 46 | out.append(double_sha256(hashes[i] + hashes[i2])) 47 | hashes = out 48 | return hashes and hashes[0] 49 | 50 | def block_hash(block): 51 | import BCDataStream 52 | ds = BCDataStream.BCDataStream() 53 | ds.write_int32(block['version']) 54 | ds.write(block['hashPrev']) 55 | ds.write(block['hashMerkleRoot']) 56 | ds.write_uint32(block['nTime']) 57 | ds.write_uint32(block['nBits']) 58 | ds.write_uint32(block['nNonce']) 59 | return double_sha256(ds.input) 60 | 61 | def pubkey_to_hash(pubkey): 62 | return RIPEMD160.new(SHA256.new(pubkey).digest()).digest() 63 | 64 | def calculate_target(nBits): 65 | return (nBits & 0xffffff) << (8 * ((nBits >> 24) - 3)) 66 | 67 | def target_to_difficulty(target): 68 | return ((1 << 224) - 1) * 1000 / (target + 1) / 1000.0 69 | 70 | def calculate_difficulty(nBits): 71 | return target_to_difficulty(calculate_target(nBits)) 72 | 73 | def work_to_difficulty(work): 74 | return work * ((1 << 224) - 1) * 1000 / (1 << 256) / 1000.0 75 | 76 | def target_to_work(target): 77 | # XXX will this round using the same rules as C++ Bitcoin? 78 | return int((1 << 256) / (target + 1)) 79 | 80 | def calculate_work(prev_work, nBits): 81 | if prev_work is None: 82 | return None 83 | return prev_work + target_to_work(calculate_target(nBits)) 84 | 85 | def work_to_target(work): 86 | return int((1 << 256) / work) - 1 87 | 88 | def get_search_height(n): 89 | if n < 2: 90 | return None 91 | if n & 1: 92 | return n >> 1 if n & 2 else n - (n >> 2) 93 | bit = 2 94 | while (n & bit) == 0: 95 | bit <<= 1 96 | return n - bit 97 | 98 | ADDRESS_RE = re.compile('[1-9A-HJ-NP-Za-km-z]{26,}\\Z') 99 | 100 | def possible_address(string): 101 | return ADDRESS_RE.match(string) 102 | 103 | def hash_to_address(version, hash): 104 | vh = version + hash 105 | return base58.b58encode(vh + double_sha256(vh)[:4]) 106 | 107 | def decode_check_address(address): 108 | if possible_address(address): 109 | version, hash = decode_address(address) 110 | if hash_to_address(version, hash) == address: 111 | return version, hash 112 | return None, None 113 | 114 | def decode_address(addr): 115 | bytes = base58.b58decode(addr, None) 116 | if len(bytes) < 25: 117 | bytes = ('\0' * (25 - len(bytes))) + bytes 118 | return bytes[:-24], bytes[-24:-4] 119 | 120 | class JsonrpcException(Exception): 121 | def __init__(ex, error, method, params): 122 | Exception.__init__(ex) 123 | ex.code = error['code'] 124 | ex.message = error['message'] 125 | ex.data = error.get('data') 126 | ex.method = method 127 | ex.params = params 128 | def __str__(ex): 129 | return ex.method + ": " + ex.message + " (code " + str(ex.code) + ")" 130 | 131 | class JsonrpcMethodNotFound(JsonrpcException): 132 | pass 133 | 134 | def jsonrpc(url, method, *params): 135 | import json, urllib 136 | postdata = json.dumps({"jsonrpc": "2.0", 137 | "method": method, "params": params, "id": "x"}) 138 | respdata = urllib.urlopen(url, postdata).read() 139 | resp = json.loads(respdata) 140 | if resp.get('error') is not None: 141 | if resp['error']['code'] == -32601: 142 | raise JsonrpcMethodNotFound(resp['error'], method, params) 143 | raise JsonrpcException(resp['error'], method, params) 144 | return resp['result'] 145 | 146 | def is_coinbase_tx(tx): 147 | return len(tx['txIn']) == 1 and tx['txIn'][0]['prevout_hash'] == \ 148 | "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" 149 | -------------------------------------------------------------------------------- /tools/namecoin_dump.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Dump the Namecoin name data to standard output. 3 | 4 | # Copyright(C) 2011 by Abe developers. 5 | 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU Affero General Public License as 8 | # published by the Free Software Foundation, either version 3 of the 9 | # License, or (at your option) any later version. 10 | # 11 | # This program is distributed in the hope that it will be useful, but 12 | # WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 | # Affero General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU Affero General Public 17 | # License along with this program. If not, see 18 | # . 19 | 20 | import sys 21 | import logging 22 | 23 | import Abe.DataStore 24 | import Abe.readconf 25 | from Abe.deserialize import script_GetOp, opcodes 26 | 27 | NAME_NEW = opcodes.OP_1 28 | NAME_FIRSTUPDATE = opcodes.OP_2 29 | NAME_UPDATE = opcodes.OP_3 30 | NAME_SCRIPT_MIN = '\x51' 31 | NAME_SCRIPT_MAX = '\x54' 32 | BLOCKS_TO_EXPIRE = 12000 33 | 34 | def iterate_name_updates(store, logger, chain_id): 35 | for height, tx_pos, txout_pos, script in store.selectall(""" 36 | SELECT cc.block_height, bt.tx_pos, txout.txout_pos, 37 | txout.txout_scriptPubKey 38 | FROM chain_candidate cc 39 | JOIN block_tx bt ON (cc.block_id = bt.block_id) 40 | JOIN txout ON (bt.tx_id = txout.tx_id) 41 | WHERE cc.chain_id = ? 42 | AND txout_scriptPubKey >= ? AND txout_scriptPubKey < ? 43 | ORDER BY cc.block_height, bt.tx_pos, txout.txout_pos""", 44 | (chain_id, store.binin(NAME_SCRIPT_MIN), 45 | store.binin(NAME_SCRIPT_MAX))): 46 | height = int(height) 47 | tx_pos = int(tx_pos) 48 | txout_pos = int(txout_pos) 49 | 50 | i = script_GetOp(store.binout(script)) 51 | try: 52 | name_op = i.next()[0] 53 | if name_op == NAME_NEW: 54 | continue # no effect on name map 55 | elif name_op == NAME_FIRSTUPDATE: 56 | 57 | is_first = True 58 | name = i.next()[1] 59 | newtx_hash = i.next()[1] 60 | #rand = i.next()[1] # XXX documented as optional; is it? 61 | value = i.next()[1] 62 | elif name_op == NAME_UPDATE: 63 | is_first = False 64 | name = i.next()[1] 65 | value = i.next()[1] 66 | else: 67 | logger.warning("Unexpected first op: %s", repr(name_op)) 68 | continue 69 | except StopIteration: 70 | logger.warning("Strange script at %d:%d:%d", 71 | height, tx_pos, txout_pos) 72 | continue 73 | yield (height, tx_pos, txout_pos, is_first, name, value) 74 | 75 | def get_expiration_depth(height): 76 | if height < 24000: 77 | return 12000 78 | if height < 48000: 79 | return height - 12000 80 | return 36000 81 | 82 | def dump(store, logger, chain_id): 83 | from collections import deque 84 | top = store.get_block_number(chain_id) 85 | expires = {} 86 | expiry_queue = deque() # XXX unneeded synchronization 87 | 88 | for x in iterate_name_updates(store, logger, chain_id): 89 | height, tx_pos, txout_pos, is_first, name, value = x 90 | while expiry_queue and expiry_queue[0]['block_id'] < height: 91 | e = expiry_queue.popleft() 92 | dead = e['name'] 93 | if expires[dead] == e['block_id']: 94 | print repr((e['block_id'], 'Expired', dead, None)) 95 | if expires.get(name, height) < height: 96 | type = 'Resurrected' 97 | elif is_first: 98 | type = 'First' 99 | else: 100 | type = 'Renewed' 101 | print repr((height, type, name, value)) 102 | expiry = height + get_expiration_depth(height) 103 | expires[name] = expiry 104 | expiry_queue.append({'block_id': expiry, 'name': name, 'value': value}) 105 | 106 | for e in expiry_queue: 107 | if expires[e['name']] > e['block_id']: 108 | pass 109 | elif e['block_id'] <= top: 110 | print repr((e['block_id'], 'Expired', e['name'], None)) 111 | else: 112 | print repr((e['block_id'], 'Until', e['name'], e['value'])) 113 | 114 | def main(argv): 115 | logging.basicConfig(level=logging.DEBUG) 116 | conf = { 117 | 'chain_id': None, 118 | } 119 | conf.update(Abe.DataStore.CONFIG_DEFAULTS) 120 | args, argv = Abe.readconf.parse_argv(argv, conf, strict=False) 121 | 122 | if argv and argv[0] in ('-h', '--help'): 123 | print "Usage: namecoin_dump.py --dbtype=MODULE --connect-args=ARGS" 124 | return 0 125 | elif argv: 126 | sys.stderr.write( 127 | "Error: unknown option `%s'\n" 128 | "See `namecoin_dump.py --help' for more information.\n" 129 | % (argv[0],)) 130 | return 1 131 | 132 | store = Abe.DataStore.new(args) 133 | logger = logging.getLogger(__name__) 134 | if args.chain_id is None: 135 | row = store.selectrow( 136 | "SELECT chain_id FROM chain WHERE chain_name = 'Namecoin'") 137 | if row is None: 138 | raise Exception("Can not find Namecoin chain in database.") 139 | args.chain_id = row[0] 140 | 141 | dump(store, logger, args.chain_id) 142 | return 0 143 | 144 | if __name__ == '__main__': 145 | sys.exit(main(sys.argv[1:])) 146 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Abe: a free block chain browser for Bitcoin-based currencies. 2 | https://github.com/bitcoin-abe/bitcoin-abe 3 | 4 | Copyright(C) 2011,2012,2013 by Abe developers. 5 | License: GNU Affero General Public License, see the file LICENSE.txt. 6 | Portions Copyright (c) 2010 Gavin Andresen, see bct-LICENSE.txt. 7 | 8 | Welcome to Abe! 9 | =============== 10 | 11 | This software reads the Bitcoin block file, transforms and loads the 12 | data into a database, and presents a web interface similar to Bitcoin 13 | Block Explorer, http://blockexplorer.com/. 14 | 15 | Abe draws inspiration from Bitcoin Block Explorer (BBE) and seeks some 16 | level of compatibility with it but uses a completely new 17 | implementation. 18 | 19 | Installation 20 | ------------ 21 | 22 | Issue: 23 | 24 | python setup.py install 25 | 26 | or simply run Abe from the directory containing setup.py. 27 | 28 | Abe depends on Python 2.7 (or 2.6), the pycrypto package, and an SQL 29 | database supporting ROLLBACK. Abe runs on PostgreSQL, MySQL's InnoDB 30 | engine, and SQLite. Other SQL databases may work with minor changes. 31 | Abe formerly ran on some ODBC configurations, Oracle, and IBM DB2, but 32 | we have not tested to be sure it still works. See the comments in 33 | abe.conf about dbtype for configuration examples. 34 | 35 | Abe works with files created by the original (Satoshi) Bitcoin client. 36 | You will need a copy of the block files (blk0001.dat, blk0002.dat, 37 | etc. in your Bitcoin directory or its blocks/ subdirectory). You may 38 | let Abe read the block files while Bitcoin runs, assuming Bitcoin only 39 | appends to the file. Prior to Bitcoin v0.8, this assumption seemed 40 | safe. Abe may need some fixes to avoid skipping blocks while current 41 | and future Bitcoin versions run. 42 | 43 | License 44 | ------- 45 | 46 | The GNU Affero General Public License (LICENSE.txt) requires whoever 47 | modifies this code and runs it on a server to make the modified code 48 | available to users of the server. You may do this by forking the 49 | Github project (if you received this code from Github.com), keeping 50 | your modifications in the new project, and linking to it in the page 51 | template. Or you may wish to satisfy the requirement by simply 52 | passing "--auto-agpl" to "python -m Abe.abe". This option makes all 53 | files in the directory containing abe.py and its subdirectories 54 | available to clients. See the comments in abe.conf for more 55 | information. 56 | 57 | Database 58 | -------- 59 | 60 | For usage, run "python -m Abe.abe --help" and see the comments in 61 | abe.conf. 62 | 63 | You will have to specify a database driver and connection arguments 64 | (dbtype and connect-args in abe.conf). The dbtype is the name of a 65 | Python module that supports your database. Known to work are psycopg2 66 | (for PostgreSQL) and sqlite3. The value of connect-args depends on 67 | your database configuration; consult the module's documentation of the 68 | connect() method. 69 | 70 | You may specify connect-args in any of the following forms: 71 | 72 | * omit connect-args to call connect() with no arguments 73 | 74 | * named arguments as a JSON object, e.g.: 75 | connect-args = { "database": "abe", "password": "b1tc0!n" } 76 | 77 | * positional arguments as a JSON array, e.g.: 78 | connect-args = ["abe", "abe", "b1tc0!n"] 79 | 80 | * a single string argument on one line, e.g.: 81 | connect-args = /var/lib/abe/abe.sqlite 82 | 83 | For JSON syntax, see http://www.json.org. 84 | 85 | Slow startup 86 | ------------ 87 | 88 | Reading the block files takes much too long, several days or more for 89 | the main BTC block chain as of 2013. However, if you use a persistent 90 | database, Abe remembers where it stopped reading and starts more 91 | quickly the second time. 92 | 93 | Replacing the Block File 94 | ------------------------ 95 | 96 | Abe does not currently handle block file changes gracefully. If you 97 | replace your copy of the block chain, you must rebuild Abe's database 98 | or (quicker) force a rescan. To force a rescan of all data 99 | directories, run Abe once with the "--rescan" option. 100 | 101 | Web server 102 | ---------- 103 | 104 | By default, Abe expects to be run in a FastCGI environment. For an 105 | overview of FastCGI setup, see README-FASTCGI.txt. 106 | 107 | To run the built-in HTTP server instead of FastCGI, specify a TCP port 108 | and network interface in abe.conf, e.g.: 109 | 110 | port 2750 111 | host 127.0.0.1 # or a domain name 112 | 113 | Input 114 | ----- 115 | 116 | To display Namecoin or any block chain with data somewhere other than 117 | the default Bitcoin directory, specify "datadir" in abe.conf, e.g.: 118 | 119 | datadir = /home/bitcoin/.namecoin 120 | 121 | Note that the web interface is currently unaware of name transactions, 122 | but see namecoin_dump.py in the tools directory. 123 | 124 | The datadir directive can include a new chain's basic configuration, 125 | e.g.: 126 | 127 | datadir += [{ 128 | "dirname": "/home/weeds/testnet", 129 | "chain": "Weeds", 130 | "code3": "WDS", 131 | "address_version": "o" }] 132 | 133 | Note that "+=" adds to the existing datadir configuration, while "=" 134 | replaces it. For help with address_version, please open doc/FAQ.html 135 | in a web browser. 136 | 137 | More information 138 | ---------------- 139 | 140 | Please see TODO.txt for a list of what is not yet implemented but 141 | would like to be. 142 | 143 | Forum thread: https://bitcointalk.org/index.php?topic=22785.0 144 | Newbies: https://bitcointalk.org/index.php?topic=51139.0 145 | 146 | Donations appreciated: 1PWC7PNHL1SgvZaN7xEtygenKjWobWsCuf (BTC) 147 | NJ3MSELK1cWnqUa6xhF2wUYAnz3RSrWXcK (NMC) 148 | -------------------------------------------------------------------------------- /README-FIRSTBITS.txt: -------------------------------------------------------------------------------- 1 | FIRSTBITS NOTES 2 | 3 | Abe experimentally supports bidirectional translation between 4 | addresses and firstbits as on http://firstbits.com/. Abe will 5 | disagree with other firstbits implementations in some cases until the 6 | algorithm is better defined and all implementations start to use it. 7 | 8 | This disagreement has security implications. Do not rely on the 9 | firstbits address reported by Abe to match the one on firstbits.com or 10 | another site when sending bitcoins. See this forum thread, and note 11 | that Abe does not currently implement the algorithm proposed there: 12 | https://bitcointalk.org/index.php?topic=16217.msg960077#msg960077 13 | 14 | This feature is disabled by default due to performance impact. To 15 | enable it, add "use-firstbits" to the configuration *before* first 16 | running a version that supports it. 17 | 18 | If you run without use-firstbits, Abe will default it to false and 19 | will never create the table. The Abe.reconfigure module turns 20 | firstbits on and off once you have upgraded Abe's schema. Stop all 21 | processes using the database, change the use-firstbits setting in 22 | abe.conf, and run: 23 | 24 | python -m Abe.reconfigure --config abe.conf 25 | 26 | I have tried a few dozen addresses, and they match firstbits.com. 27 | Please report issues in the forum thread 28 | (https://bitcointalk.org/index.php?topic=22785.msg949105#msg949105) or 29 | by email, PM, or the github issue system, since I will not spend much 30 | time testing. 31 | 32 | The new table has four columns: 33 | 34 | pubkey_id - identifies a public key hash in the pubkey table 35 | block_id - a block where this address first appeared in its chain 36 | address_version - second component of address, along with pubkey_hash 37 | firstbits - lowercase firstbits of the address in this chain 38 | 39 | Note that address_version for Bitcoin addresses is always "\0" (or 40 | "00" in hex). The field exists because Abe supports multiple 41 | currencies with different address versions, such as Bitcoin Testnet 42 | and Namecoin. 43 | 44 | To get from address to pubkey_hash and address_version, use, for 45 | example, /q/decode_address/ADDRESS. To get from pubkey_hash and 46 | address_version to address, use /q/hashtoaddress/HASH/VERSION. 47 | 48 | Note that the existence of an address in the table does not always 49 | imply that the address has the given firstbits. It will if the 50 | corresponding block is in the main chain. That is, if block_id 51 | matches a row in chain_candidate where in_longest=1 and chain_id=1 52 | (for Bitcoin, or the desired chain_id from the chain table). 53 | 54 | 55 | FIRSTBITS TECHNICAL DESIGN 56 | 57 | Maintenance of the abe_firstbits table imposes space and time costs on 58 | Abe instances. To keep things simple, Abe does not support firstbits 59 | calculation in only some chains and not others. If use_firstbits is 60 | in effect, a database invariant requires the table to contain all 61 | firstbits corresponding to chain_candidate rows where block_height is 62 | not null. If use_firstbits is false (the default) then Abe does not 63 | touch abe_firstbits. 64 | 65 | Finding firstbits requires a function that determines whether a given 66 | block is descended from another given block. Why? Because several 67 | firstbits records may collide with initial substrings of the new 68 | address, but only the ones in ancestral blocks can prevent it from 69 | receiving the firstbits. 70 | 71 | A naive implementation of is_descended_from(block, ancestor) would 72 | simply look up block's prev_block_id in the block table and repeat 73 | until it finds the block at ancestor's block_height. The result would 74 | be true iff that block is ancestor. But this would scale linearly 75 | with chain length, and I would like a faster function. 76 | 77 | A naive, fast implementation would introduce a block_ancestor table 78 | containing a row for each block pair whose first block is descended 79 | from its second block. But this table would grow as the square of the 80 | chain length, and that is too big. 81 | 82 | Abe's implementation (DataStore.is_descended_from) involves a new 83 | block table column, search_block_id. Like block.prev_block_id, 84 | search_block_id points to an earlier block in the chain, but the 85 | earlier block's height is found by a function other than 86 | block_height-1. The function depends only on block_height and allows 87 | is_descended_from to use a more-or-less binary search. A paper by 88 | Chris Okasaki describes a somewhat similar structure: "Purely 89 | Functional Random-Access Lists" 90 | http://cs.oberlin.edu/~jwalker/refs/fpca95.ps 91 | 92 | The get_search_height function in util.py computes the search_block_id 93 | block height. I am sure it could be improved: 94 | 95 | def get_search_height(n): 96 | if n < 2: 97 | return None 98 | if n & 1: 99 | return n >> 1 if n & 2 else n - (n >> 2) 100 | bit = 2 101 | while (n & bit) == 0: 102 | bit <<= 1 103 | return n - bit 104 | 105 | To find a block's ancestor at a given height, Abe tries the search 106 | block if it is not too far in the past. Otherwise, it tries the 107 | previous block. The pattern of height distances from block to search 108 | block should ensure reasonable worst-case performance, but I have not 109 | proven this. 110 | 111 | Given search_block_id, it should be possible to write 112 | is_descended_from as a stored procedure in databases that support it. 113 | This would be an optional performance and utility improvement, though. 114 | Abe would contain the same logic in generic Python code. 115 | 116 | An alternative table-based approach is libbitcoin's span_left and 117 | span_right. I have not got my head around the requirements for 118 | adjusting the span values when new side chains appear, though, and I 119 | think the more-or-less binary search suffices. 120 | 121 | John Tobey 122 | 2012-06-09 123 | -------------------------------------------------------------------------------- /Abe/htdocs/nethash.js: -------------------------------------------------------------------------------- 1 | // Copyright(C) 2013 by Abe developers. 2 | // 3 | // This program is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU Affero General Public License as 5 | // published by the Free Software Foundation, either version 3 of the 6 | // License, or (at your option) any later version. 7 | // 8 | // This program is distributed in the hope that it will be useful, but 9 | // WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | // Affero General Public License for more details. 12 | // 13 | // You should have received a copy of the GNU Affero General Public 14 | // License along with this program. If not, see 15 | // . 16 | 17 | "use strict"; 18 | 19 | var Abe = (function() { 20 | 21 | var SVG_NS = "http://www.w3.org/2000/svg"; 22 | var ABE_NS = "http://abe.bit/abe"; 23 | 24 | function draw(svg, interval) { 25 | var i, elts, node, windows, chart, lines, rows, work, first; 26 | var elapsed, worked, drawn, height, matrix; 27 | var hi = -Infinity, lo = Infinity; 28 | 29 | if (interval === undefined) { 30 | interval = 24*60*60; // 1 day 31 | } 32 | 33 | elts = svg.getElementsByTagNameNS(ABE_NS, "*"); 34 | 35 | // In inline SVG (FF 18.0) the above search returns empty. 36 | // Here is a workaround. 37 | if (elts.length === 0) { 38 | elts = []; 39 | Array.prototype.forEach.call( 40 | svg.getElementsByTagName("*"), 41 | function(elt) { 42 | if (elt.localName.indexOf("abe:") === 0) 43 | elts.push(elt); 44 | }); 45 | } 46 | 47 | rows = []; 48 | 49 | for (i = 0; i < elts.length; i++) { 50 | node = elts[i]; 51 | switch (node.localName.replace("abe:", "")) { 52 | case "nethash": 53 | rows.push(nodeToRow(node)); 54 | break; 55 | } 56 | } 57 | 58 | if (rows.length < 2) { 59 | alert("Not enough data to chart!"); 60 | return; 61 | } 62 | 63 | rows[0].work = 0; // clobber bogus value 64 | 65 | for (i = 1, work = 0; i < rows.length; i++) { 66 | work += rows[i].work; 67 | 68 | if (rows[i].nTime > rows[0].nTime) { 69 | first = work / (rows[i].nTime - rows[0].nTime); 70 | break; 71 | } 72 | } 73 | 74 | if (first === undefined) { 75 | alert("Can not make chart: block times do not increase!"); 76 | return; 77 | } 78 | 79 | function make_point(x, value) { 80 | var point = svg.createSVGPoint(); 81 | point.x = x 82 | point.y = value; 83 | if (value < lo) lo = value; 84 | if (value > hi) hi = value; 85 | return point; 86 | } 87 | 88 | function parse_window(s) { 89 | var m = /^(\d*(?:\.\d+)?)(d|days?)$/i.exec(s); 90 | var n; 91 | 92 | if (m) { 93 | n = Number(m[1]); 94 | if (n > 0) { 95 | switch (m[2].toLowerCase()) { 96 | case "d": case "day": case "days": return n * 24*60*60; 97 | default: break; 98 | } 99 | } 100 | } 101 | 102 | throw "Can not parse interval: " + s; 103 | } 104 | 105 | function make_line(elt) { 106 | var line = { elt: elt }; 107 | elt.points.initialize(make_point(0, Math.log(first))); 108 | line.window = parse_window(elt.getAttributeNS(ABE_NS, "window")); 109 | line.rate = first; 110 | line.oldShare = 1 / Math.exp(interval / line.window); 111 | line.newShare = 1 - line.oldShare; 112 | return line; 113 | } 114 | 115 | chart = svg.getElementById("chart"); 116 | lines = Array.prototype.map.call(chart.getElementsByTagName("polyline"), 117 | make_line) 118 | rows.sort(function(a, b) { return a.nTime - b.nTime; }); 119 | elapsed = 0; 120 | worked = 0; 121 | drawn = 0; 122 | 123 | function extend_line(line) { 124 | line.rate *= line.oldShare; 125 | line.rate += line.newShare * worked / interval; 126 | if (line.rate > 0) 127 | line.elt.points.appendItem(make_point(drawn, 128 | Math.log(line.rate))); 129 | } 130 | 131 | function tick(seconds, work) { 132 | 133 | elapsed += seconds; 134 | 135 | while (elapsed >= interval) { 136 | drawn++; 137 | lines.forEach(extend_line); 138 | elapsed -= interval; 139 | worked = 0; 140 | } 141 | 142 | worked += work; 143 | } 144 | 145 | for (i = 1; i < rows.length; i++) { 146 | tick(rows[i].nTime - rows[i-1].nTime, rows[i].work); 147 | } 148 | 149 | matrix = svg.createSVGMatrix(); 150 | matrix.a = 1 / drawn; 151 | 152 | if (lo !== hi) { 153 | height = svg.viewBox.baseVal.height; 154 | matrix.d = height / 1.1 / (lo - hi); 155 | matrix.f = height / 1.05 - lo * matrix.d; 156 | //matrix.f = 1 + lo / (hi - lo); 157 | } 158 | 159 | chart.transform.baseVal.initialize( 160 | chart.transform.baseVal.createSVGTransformFromMatrix(matrix)); 161 | } 162 | 163 | function nodeToRow(node) { 164 | return { 165 | nTime: Number(node.getAttributeNS(null, "t")), 166 | difficulty: Number(node.getAttributeNS(null, "d")), 167 | work: Number(node.getAttributeNS(null, "w")) 168 | }; 169 | } 170 | 171 | return { draw: draw }; 172 | })(); 173 | -------------------------------------------------------------------------------- /Abe/mixup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright(C) 2012 by Abe developers. 4 | 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Affero General Public License as 7 | # published by the Free Software Foundation, either version 3 of the 8 | # License, or (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, but 11 | # WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 | # Affero General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Affero General Public 16 | # License along with this program. If not, see 17 | # . 18 | 19 | """Load blocks in different order for testing.""" 20 | 21 | import sys 22 | import logging 23 | 24 | import DataStore 25 | import readconf 26 | import BCDataStream, util 27 | 28 | def mixup_blocks(store, ds, count, datadir_chain_id = None, seed = None): 29 | bytes_done = 0 30 | offsets = [] 31 | 32 | for i in xrange(count): 33 | if ds.read_cursor + 8 <= len(ds.input): 34 | offsets.append(ds.read_cursor) 35 | magic = ds.read_bytes(4) 36 | length = ds.read_int32() 37 | ds.read_cursor += length 38 | if ds.read_cursor <= len(ds.input): 39 | continue 40 | raise IOError("End of input after %d blocks" % i) 41 | 42 | if seed > 1 and seed <= count: 43 | for i in xrange(0, seed * int(count/seed), seed): 44 | offsets[i : i + seed] = offsets[i : i + seed][::-1] 45 | elif seed == -3: 46 | for i in xrange(0, 3 * int(count/3), 3): 47 | offsets[i : i + 3] = offsets[i+1 : i + 3] + [offsets[i]] 48 | print offsets 49 | elif seed: 50 | offsets = offsets[::-1] # XXX want random 51 | 52 | for offset in offsets: 53 | ds.read_cursor = offset 54 | magic = ds.read_bytes(4) 55 | length = ds.read_int32() 56 | 57 | # Assume blocks obey the respective policy if they get here. 58 | chain_id = datadir_chain_id 59 | if chain_id is None: 60 | rows = store.selectall(""" 61 | SELECT chain.chain_id 62 | FROM chain 63 | JOIN magic ON (chain.magic_id = magic.magic_id) 64 | WHERE magic.magic = ?""", 65 | (store.binin(magic),)) 66 | if len(rows) == 1: 67 | chain_id = rows[0][0] 68 | if chain_id is None: 69 | ds.read_cursor = offset 70 | raise ValueError( 71 | "Chain not found for magic number %s in block file at" 72 | " offset %d.", repr(magic), offset) 73 | break 74 | 75 | # XXX pasted out of DataStore.import_blkdat 76 | end = ds.read_cursor + length 77 | 78 | hash = util.double_sha256( 79 | ds.input[ds.read_cursor : ds.read_cursor + 80]) 80 | # XXX should decode target and check hash against it to 81 | # avoid loading garbage data. But not for merged-mined or 82 | # CPU-mined chains that use different proof-of-work 83 | # algorithms. Time to resurrect policy_id? 84 | 85 | block_row = store.selectrow(""" 86 | SELECT block_id, block_height, block_chain_work, 87 | block_nTime, block_total_seconds, 88 | block_total_satoshis, block_satoshi_seconds 89 | FROM block 90 | WHERE block_hash = ? 91 | """, (store.hashin(hash),)) 92 | 93 | if block_row: 94 | # Block header already seen. Don't import the block, 95 | # but try to add it to the chain. 96 | if chain_id is not None: 97 | b = { 98 | "block_id": block_row[0], 99 | "height": block_row[1], 100 | "chain_work": store.binout_int(block_row[2]), 101 | "nTime": block_row[3], 102 | "seconds": block_row[4], 103 | "satoshis": block_row[5], 104 | "ss": block_row[6]} 105 | if store.selectrow(""" 106 | SELECT 1 107 | FROM chain_candidate 108 | WHERE block_id = ? 109 | AND chain_id = ?""", 110 | (b['block_id'], chain_id)): 111 | store.log.info("block %d already in chain %d", 112 | b['block_id'], chain_id) 113 | b = None 114 | else: 115 | if b['height'] == 0: 116 | b['hashPrev'] = GENESIS_HASH_PREV 117 | else: 118 | b['hashPrev'] = 'dummy' # Fool adopt_orphans. 119 | store.offer_block_to_chains(b, frozenset([chain_id])) 120 | else: 121 | b = store.parse_block(ds, chain_id, magic, length) 122 | b["hash"] = hash 123 | chain_ids = frozenset([] if chain_id is None else [chain_id]) 124 | store.import_block(b, chain_ids = chain_ids) 125 | if ds.read_cursor != end: 126 | store.log.debug("Skipped %d bytes at block end", 127 | end - ds.read_cursor) 128 | 129 | bytes_done += length 130 | if bytes_done >= store.commit_bytes: 131 | store.log.debug("commit") 132 | store.commit() 133 | bytes_done = 0 134 | 135 | if bytes_done > 0: 136 | store.commit() 137 | 138 | def main(argv): 139 | conf = { 140 | "debug": None, 141 | "logging": None, 142 | "count": 200, 143 | "seed": 1, 144 | "blkfile": None, 145 | } 146 | conf.update(DataStore.CONFIG_DEFAULTS) 147 | 148 | args, argv = readconf.parse_argv(argv, conf, 149 | strict=False) 150 | if argv and argv[0] in ('-h', '--help'): 151 | print ("""Usage: python -m Abe.mixup [-h] [--config=FILE] [--CONFIGVAR=VALUE]... 152 | 153 | Load blocks out of order. 154 | 155 | --help Show this help message and exit. 156 | --config FILE Read options from FILE. 157 | --count NUMBER Load COUNT blocks. 158 | --blkfile FILE Load the first COUNT blocks from FILE. 159 | --seed NUMBER Random seed (not implemented; 0=file order). 160 | 161 | All configuration variables may be given as command arguments.""") 162 | return 0 163 | 164 | if args.blkfile is None: 165 | raise ValueError("--blkfile is required.") 166 | 167 | logging.basicConfig( 168 | stream=sys.stdout, 169 | level=logging.DEBUG, 170 | format="%(message)s") 171 | if args.logging is not None: 172 | import logging.config as logging_config 173 | logging_config.dictConfig(args.logging) 174 | 175 | store = DataStore.new(args) 176 | ds = BCDataStream.BCDataStream() 177 | file = open(args.blkfile, "rb") 178 | ds.map_file(file, 0) 179 | file.close() 180 | mixup_blocks(store, ds, int(args.count), None, int(args.seed or 0)) 181 | return 0 182 | 183 | if __name__ == '__main__': 184 | sys.exit(main(sys.argv[1:])) 185 | -------------------------------------------------------------------------------- /TODO.txt: -------------------------------------------------------------------------------- 1 | * MerkleRootMismatch reported when a non-final blocks/blk*.dat ends in 2 | a NUL span. Ignore the span. 3 | 4 | * datadir "loader" option seems not to override default-loader. 5 | 6 | * Block/tx blacklist. 7 | 8 | * Consider allowing unlimited script length. 9 | 10 | * UnicodeEncodeError on non-ASCII MySQLdb connect params. 11 | 12 | * Bugs affecting bytea hashin? 13 | File "Abe/DataStore.py", line 1539, in import_block 14 | tx['tx_id'] = store.import_tx(tx, pos == 0) 15 | File "Abe/DataStore.py", line 1875, in import_tx 16 | store.intin(tx['lockTime']), len(tx['tx']))) 17 | File "Abe/DataStore.py", line 418, in sql 18 | store.cursor.execute(cached, params) 19 | IntegrityError: duplicate key value violates unique constraint "tx_tx_hash_key" 20 | 21 | Failed to catch up {'blkfile_number': 1, 'dirname': '/home/bitcoin/.bitcoin', 'c 22 | hain_id': None, 'id': Decimal('1'), 'blkfile_offset': 434255480} 23 | Traceback (most recent call last): 24 | File "Abe/DataStore.py", line 2220, in catch_up 25 | try: 26 | File "Abe/DataStore.py", line 2243, in catch_up_dir 27 | while (True): 28 | File "Abe/DataStore.py", line 2359, in import_blkdat 29 | chain_ids = frozenset([] if chain_id is None else [chain_id]) 30 | File "Abe/DataStore.py", line 1655, in import_block 31 | (block_id, txin_id, oblock_id)) 32 | File "Abe/DataStore.py", line 418, in sql 33 | store.cursor.execute(cached, params) 34 | TransactionRollbackError: deadlock detected 35 | DETAIL: Process 8102 waits for ShareLock on transaction 3744470; blocked by pro 36 | cess 20041. 37 | Process 20041 waits for ShareLock on transaction 3744468; blocked by process 8102. 38 | HINT: See server log for query details. 39 | CONTEXT: SQL statement "SELECT 1 FROM ONLY "public"."block" x WHERE "block_id" OPERATOR(pg_catalog.=) $1 FOR SHARE OF x" 40 | 41 | * Compress standard txout scripts by representing them as a pubkey_id 42 | and script type. Multi-signature outputs require thought; leave 43 | pubkey_id null and retain the script for starters. 44 | 45 | * Support new script types. Fix the firstbits table on upgrade. 46 | Recent non-zero outputs showing as Unknown: 47 | tx_hash=b728387a3cf1dfcff1eef13706816327907f79f9366a7098ee48fc0c00ad2726, 48 | Bitcoin 140921, 64:f816...d335 CHECKSIG. 49 | tx_hash=9740e7d646f5278603c04706a366716e5e87212c57395e0d24761c0ae784b2c6, 50 | Bitcoin 141460, 76:4c55...652e CHECKSIG. Various in Bitcoin 150951: 51 | DUP HASH160 0: EQUALVERIFY CHECKSIG. 52 | tx_hash=9969603dca74d14d29d1d5f56b94c7872551607f8c2d6837ab9715c60721b50e, 53 | Bitcoin 154012, 8:04678afd04678afd DROP SHA256 32:894e...e95c EQUAL, 54 | redeemed with 7:04678afd0467. 55 | tx_hash=b8fd633e7713a43d5ac87266adc78444669b987a56b3a65fb92d58c2c4b0e84d, 56 | Bitcoin 163685, raw 142a9bc5447d664c1d0141392a842d23dba45c4f13b175. 57 | tx_hash=60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1, 58 | Bitcoin 164467, 1 65:04cc...8ac4 65:0461...42af 2 CHECKMULTISIG, 59 | multisig script, redeemed with 0: 71:3044...2b01. 60 | tx_hash=f003f0c1193019db2497a675fd05d9f2edddf9b67c59e677c48d3dbd4ed5f00b, 61 | Bitcoin 165116, raw 62 | 76a91407e761706c63b36e5a328fab1d94e9397f40704d88b0. 63 | tx_hash=fa735229f650a8a12bcf2f14cca5a8593513f0aabc52f8687ee148c9f9ab6665, 64 | Bitcoin 166533, IFDUP IF 2SWAP VERIFY 2OVER DEPTH. 65 | tx_hash=b38bb421d9a54c58ea331c4b4823dd498f1e42e25ac96d3db643308fcc70503e, 66 | Bitcoin 168910, DUP DUP DUP. 67 | tx_hash=9c08a4d78931342b37fd5f72900fb9983087e6f46c4a097d8a1f52c74e28eaf6, 68 | Bitcoin 170052, HASH160 20:19a7...960e EQUAL, pay-to-script-hash, 69 | redeemed with 37:5121...51ae. 70 | tx_hash=c0b69d1e5ed13732dbd704604f7c08bc96549cc556c464aa42cc7525b3897987, 71 | Bitcoin 170766, 3 DROP DROP 1, redeemed with 1. 72 | tx_hash=aea682d68a3ea5e3583e088dcbd699a5d44d4b083f02ad0aaf2598fe1fa4dfd4, 73 | Bitcoin 170877, MIN 3 EQUAL, redeemed with 1:03 1:03. 74 | Cf. http://blockchain.info/strange-transactions 75 | 76 | * Highlight any objects that are not in a main chain as such. In 77 | particular, don't imply that an output was "redeemed" when the 78 | redeeming transaction is not in the chain. 79 | 80 | * Write a test suite. Include reorgs, duplicate coinbases, and upgrades. 81 | 82 | * JSON/JSONP support in /q functions. 83 | 84 | * Search by firstbits. 85 | 86 | * Show firstbits on address history page. 87 | 88 | * Consider adjusting statistics for duplicate coinbase transactions. 89 | 90 | * Avoid duplicate in/out points on block pages by using DISTINCT or 91 | equivalent. This affects blocks common to more than one chain. 92 | 93 | * Consider fixing: /block/HASH returns "Block not found" for block not 94 | in any chain. 95 | 96 | * Defunct abe.fcgi processes not letting abe.py exit. Unseen in a while. 97 | 98 | * Bitcoin 140176 failed to catch up: str(exception) == "1". Should 99 | probably limit the kinds of exceptions that catch_up converts to 100 | warnings. 101 | 102 | * Test for a portable "begin transaction" and use it. 103 | 104 | * Consider retesting SQL flavour every time on startup. 105 | 106 | * Add search by date/time within chain. 107 | 108 | * Show auxiliary proof-of-work data. 109 | 110 | * Consider porting to libbitcoin. 111 | 112 | * Use explicit constraint names. 113 | 114 | * Speed up initial load by disabling unneeded constraints temporarily. 115 | 116 | * Consider showing amount of time ago along with or instead of 117 | absolute times. 118 | 119 | * Separate HTML from data access. 120 | 121 | * Consider denormalization for performance: prevout_hash in txin, 122 | pubkey_hash in txout, etc. 123 | 124 | * Factor the coin-days destroyed feature as an optional add-on. Work 125 | begin in branch no-statistics. 126 | 127 | * Slight variation in CoinDD et al. among sites indicates a bug. 128 | Possibly fixed and due to corrupt data. 129 | 130 | * Clean up and document limits on search result size. 131 | 132 | * Admin interface to delete chains, etc. 133 | 134 | * Add a tool to upload transactions if bitcoind supports it. 135 | 136 | * Perhaps write a validation module to check db data. Begun as verify.py. 137 | 138 | * Abstract SQL into the DataStore class. 139 | 140 | * Report block size, raw blocks, average transactions per block, fees 141 | per block, ... 142 | 143 | * Report the Merkle branch of a particular transaction to support SPV. 144 | 145 | * Test as multithreaded or forking server. 146 | 147 | * Perhaps support searching by hash/address non-initial substring. 148 | 149 | * /q functions: 150 | latesthash hextarget 151 | decimaltarget probability hashestowin 152 | nextretarget estimate 153 | avgtxsize avgtxvalue avgblocksize 154 | interval eta avgtxnumber 155 | addressfirstseen mytransactions 156 | reorglog 157 | 158 | * BTC-specific /q functions: 159 | bcperblock changeparams 160 | totalbc(future block numbers) 161 | 162 | * Perhaps create a BlockExplorer.com compatibility mode. 163 | 164 | * Perhaps show nethash interval statistics on the chain summary page. 165 | 166 | * Perhaps create an "SQL script" DataStore subclass that outputs flat 167 | SQL INSERTs. 168 | 169 | * Context-sensitive help as on http://blockexplorer.com. 170 | 171 | * Simplify the process of adding new chains. 172 | 173 | * Present Namecoin name operations as an optional add-on. 174 | 175 | * Extract info from MultiCoin config files. 176 | 177 | * Perhaps add coin tracking features: allow the administrator to 178 | specify "interesting" (e.g., suspected fraudulent) transactions and 179 | addresses, then add a hyperlink from objects linked to them. 180 | 181 | * Perhaps track and report coin difficulty as proposed by casascius: 182 | http://forum.bitcoin.org/index.php?topic=10755.0 183 | 184 | * Perhaps track and report collectible transaction bytes: size of 185 | transactions whose outputs are all spent, minus Merkle shrubbery 186 | needed to verify the rest. 187 | 188 | * Perhaps support email notification about address activity. 189 | -------------------------------------------------------------------------------- /Abe/readconf.py: -------------------------------------------------------------------------------- 1 | # Copyright(C) 2011,2012,2013 by Abe developers. 2 | 3 | # This program is free software: you can redistribute it and/or modify 4 | # it under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation, either version 3 of the License, or 6 | # (at your option) any later version. 7 | # 8 | # This program is distributed in the hope that it will be useful, but 9 | # WITHOUT ANY WARRANTY; without even the implied warranty of 10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | # Affero General Public License for more details. 12 | # 13 | # You should have received a copy of the GNU General Public License 14 | # along with this program. If not, see 15 | # . 16 | 17 | def looks_like_json(val): 18 | return val[:1] in ('"', '[', '{') or val in ('true', 'false', 'null') 19 | 20 | def parse_argv(argv, conf={}, config_name='config', strict=False): 21 | arg_dict = conf.copy() 22 | args = lambda var: arg_dict[var] 23 | args.func_dict = arg_dict 24 | 25 | i = 0 26 | while i < len(argv): 27 | arg = argv[i] 28 | 29 | if arg == '--': 30 | i += 1 31 | break 32 | if arg[:2] != '--': 33 | break 34 | 35 | # Strip leading "--" to form a config variable. 36 | # --var=val and --var val are the same. --var+=val is different. 37 | split = arg[2:].split('=', 1) 38 | adding = False 39 | if len(split) == 1: 40 | var = split[0] 41 | if i + 1 < len(argv) and argv[i + 1][:2] != '--': 42 | i += 1 43 | val = argv[i] 44 | else: 45 | val = True 46 | else: 47 | var, val = split 48 | if var[-1:] == '+': 49 | var = var[:-1] 50 | adding = True 51 | 52 | if val is not True and looks_like_json(val): 53 | val = parse_json(val) 54 | 55 | var = var.replace('-', '_') 56 | if var == config_name: 57 | _include(set(), val, arg_dict, config_name, strict) 58 | elif var not in conf: 59 | break 60 | elif adding: 61 | add(arg_dict, var, val) 62 | else: 63 | arg_dict[var] = val 64 | i += 1 65 | 66 | return args, argv[i:] 67 | 68 | def include(filename, conf={}, config_name='config', strict=False): 69 | _include(set(), filename, conf, config_name, strict) 70 | return conf 71 | 72 | class _Reader: 73 | __slots__ = ['fp', 'lineno', 'line'] 74 | def __init__(rdr, fp): 75 | rdr.fp = fp 76 | rdr.lineno = 1 77 | rdr.line = rdr.fp.read(1) 78 | def eof(rdr): 79 | return rdr.line == '' 80 | def getc(rdr): 81 | if rdr.eof(): 82 | return '' 83 | ret = rdr.line[-1] 84 | if ret == '\n': 85 | rdr.lineno += 1 86 | rdr.line = '' 87 | c = rdr.fp.read(1) 88 | if c == '': 89 | rdr.line = '' 90 | rdr.line += c 91 | return ret 92 | def peek(rdr): 93 | if rdr.eof(): 94 | return '' 95 | return rdr.line[-1] 96 | def _readline(rdr): 97 | ret = rdr.fp.readline() 98 | rdr.line += ret 99 | return ret 100 | def readline(rdr): 101 | ret = rdr.peek() + rdr._readline() 102 | rdr.getc() # Consume the newline if not at EOF. 103 | return ret 104 | def get_error_context(rdr, e): 105 | e.lineno = rdr.lineno 106 | if not rdr.eof(): 107 | e.offset = len(rdr.line) 108 | if rdr.peek() != '\n': 109 | rdr._readline() 110 | e.text = rdr.line 111 | 112 | def _include(seen, filename, conf, config_name, strict): 113 | if filename in seen: 114 | raise Exception('Config file recursion') 115 | 116 | with open(filename) as fp: 117 | rdr = _Reader(fp) 118 | try: 119 | entries = read(rdr) 120 | except SyntaxError, e: 121 | if e.filename is None: 122 | e.filename = filename 123 | if e.lineno is None: 124 | rdr.get_error_context(e) 125 | raise 126 | for var, val, additive in entries: 127 | var = var.replace('-', '_') 128 | if var == config_name: 129 | import os 130 | _include(seen | set(filename), 131 | os.path.join(os.path.dirname(filename), val), conf, 132 | config_name, strict) 133 | elif var not in conf: 134 | if strict: 135 | raise ValueError( 136 | "Unknown parameter `%s' in %s" % (var, filename)) 137 | elif additive and conf[var] is not None: 138 | add(conf, var, val) 139 | else: 140 | conf[var] = val 141 | return 142 | 143 | def read(rdr): 144 | """ 145 | Read name-value pairs from file and return the results as a list 146 | of triples: (name, value, additive) where "additive" is true if 147 | "+=" occurred between name and value. 148 | 149 | "NAME=VALUE" and "NAME VALUE" are equivalent. Whitespace around 150 | names and values is ignored, as are lines starting with '#' and 151 | empty lines. Values may be JSON strings, arrays, or objects. A 152 | value that does not start with '"' or '{' or '[' and is not a 153 | boolean is read as a one-line string. A line with just "NAME" 154 | stores True as the value. 155 | """ 156 | entries = [] 157 | def store(name, value, additive): 158 | entries.append((name, value, additive)) 159 | 160 | def skipspace(rdr): 161 | while rdr.peek() in (' ', '\t', '\r'): 162 | rdr.getc() 163 | 164 | while True: 165 | skipspace(rdr) 166 | if rdr.eof(): 167 | break 168 | if rdr.peek() == '\n': 169 | rdr.getc() 170 | continue 171 | if rdr.peek() == '#': 172 | rdr.readline() 173 | continue 174 | 175 | name = '' 176 | while rdr.peek() not in (' ', '\t', '\r', '\n', '=', '+', ''): 177 | name += rdr.getc() 178 | 179 | if rdr.peek() not in ('=', '+'): 180 | skipspace(rdr) 181 | 182 | if rdr.peek() in ('\n', ''): 183 | store(name, True, False) 184 | continue 185 | 186 | additive = False 187 | 188 | if rdr.peek() in ('=', '+'): 189 | if rdr.peek() == '+': 190 | rdr.getc() 191 | if rdr.peek() != '=': 192 | raise SyntaxError("'+' without '='") 193 | additive = True 194 | rdr.getc() 195 | skipspace(rdr) 196 | 197 | if rdr.peek() in ('"', '[', '{'): 198 | js = scan_json(rdr) 199 | try: 200 | store(name, parse_json(js), additive) 201 | except ValueError, e: 202 | raise wrap_json_error(rdr, js, e) 203 | continue 204 | 205 | # Unquoted, one-line string. 206 | value = '' 207 | while rdr.peek() not in ('\n', ''): 208 | value += rdr.getc() 209 | value = value.strip() 210 | 211 | # Booleans and null. 212 | if value == 'true': 213 | value = True 214 | elif value == 'false': 215 | value = False 216 | elif value == 'null': 217 | value = None 218 | 219 | store(name, value, additive) 220 | 221 | return entries 222 | 223 | def add(conf, var, val): 224 | if var not in conf: 225 | conf[var] = val 226 | return 227 | 228 | if isinstance(val, dict) and isinstance(conf[var], dict): 229 | conf[var].update(val) 230 | return 231 | 232 | if not isinstance(conf[var], list): 233 | conf[var] = [conf[var]] 234 | if isinstance(val, list): 235 | conf[var] += val 236 | else: 237 | conf[var].append(val) 238 | 239 | # Scan to end of JSON object. Grrr, why can't json.py do this without 240 | # reading all of fp? 241 | 242 | def _scan_json_string(rdr): 243 | ret = rdr.getc() # '"' 244 | while True: 245 | c = rdr.getc() 246 | if c == '': 247 | raise SyntaxError('End of file in JSON string') 248 | 249 | # Accept raw control characters for readability. 250 | if c == '\n': 251 | c = '\\n' 252 | if c == '\r': 253 | c = '\\r' 254 | if c == '\t': 255 | c = '\\t' 256 | 257 | ret += c 258 | if c == '"': 259 | return ret 260 | if c == '\\': 261 | ret += rdr.getc() 262 | 263 | def _scan_json_nonstring(rdr): 264 | # Assume we are at a number or true|false|null. 265 | # Scan the token. 266 | ret = '' 267 | while rdr.peek() != '' and rdr.peek() in '-+0123456789.eEtrufalsn': 268 | ret += rdr.getc() 269 | return ret 270 | 271 | def _scan_json_space(rdr): 272 | # Scan whitespace including "," and ":". Strip comments for good measure. 273 | ret = '' 274 | while not rdr.eof() and rdr.peek() in ' \t\r\n,:#': 275 | c = rdr.getc() 276 | if c == '#': 277 | c = rdr.readline() and '\n' 278 | ret += c 279 | return ret 280 | 281 | def _scan_json_compound(rdr): 282 | # Scan a JSON array or object. 283 | ret = rdr.getc() 284 | if ret == '{': end = '}' 285 | if ret == '[': end = ']' 286 | ret += _scan_json_space(rdr) 287 | if rdr.peek() == end: 288 | return ret + rdr.getc() 289 | while True: 290 | if rdr.eof(): 291 | raise SyntaxError('End of file in JSON value') 292 | ret += scan_json(rdr) 293 | ret += _scan_json_space(rdr) 294 | if rdr.peek() == end: 295 | return ret + rdr.getc() 296 | 297 | def scan_json(rdr): 298 | # Scan a JSON value. 299 | c = rdr.peek() 300 | if c == '"': 301 | return _scan_json_string(rdr) 302 | if c in ('[', '{'): 303 | return _scan_json_compound(rdr) 304 | ret = _scan_json_nonstring(rdr) 305 | if ret == '': 306 | raise SyntaxError('Invalid JSON') 307 | return ret 308 | 309 | def parse_json(js): 310 | import json 311 | return json.loads(js) 312 | 313 | def wrap_json_error(rdr, js, e): 314 | import re 315 | match = re.search(r'(.*): line (\d+) column (\d+)', e.message, re.DOTALL) 316 | if match: 317 | e = SyntaxError(match.group(1)) 318 | json_lineno = int(match.group(2)) 319 | e.lineno = rdr.lineno - js.count('\n') + json_lineno - 1 320 | e.text = js.split('\n')[json_lineno - 1] 321 | e.offset = int(match.group(3)) 322 | if json_lineno == 1 and json_line1_column_bug(): 323 | e.offset += 1 324 | return e 325 | 326 | def json_line1_column_bug(): 327 | ret = False 328 | try: 329 | parse_json("{:") 330 | except ValueError, e: 331 | if "column 1" in e.message: 332 | ret = True 333 | finally: 334 | return ret 335 | -------------------------------------------------------------------------------- /Abe/admin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright(C) 2012,2013 by Abe developers. 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as 6 | # published by the Free Software Foundation, either version 3 of the 7 | # License, or (at your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, but 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 | # Affero General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU Affero General Public 15 | # License along with this program. If not, see 16 | # . 17 | 18 | """Delete a chain from the database, etc.""" 19 | 20 | import sys 21 | import logging 22 | 23 | import DataStore 24 | import readconf 25 | 26 | def commit(store): 27 | store.commit() 28 | store.log.info("Commit.") 29 | 30 | def log_rowcount(store, msg): 31 | store.log.info(msg, store.cursor.rowcount) 32 | 33 | def link_txin(store): 34 | store.log.info( 35 | "Linking missed transaction inputs to their previous outputs.") 36 | 37 | store.sql(""" 38 | UPDATE txin SET txout_id = ( 39 | SELECT txout_id 40 | FROM unlinked_txin JOIN txout JOIN tx ON (txout.tx_id = tx.tx_id) 41 | WHERE txin.txin_id = unlinked_txin.txin_id 42 | AND tx.tx_hash = unlinked_txin.txout_tx_hash 43 | AND txout.txout_pos = unlinked_txin.txout_pos) 44 | WHERE txout_id IS NULL""") 45 | log_rowcount(store, "Updated %d txout_id.") 46 | commit(store) 47 | 48 | store.sql(""" 49 | DELETE FROM unlinked_txin 50 | WHERE (SELECT txout_id FROM txin 51 | WHERE txin.txin_id = unlinked_txin.txin_id) IS NOT NULL""") 52 | log_rowcount(store, "Deleted %d unlinked_txin.") 53 | commit(store) 54 | 55 | def delete_tx(store, id_or_hash): 56 | try: 57 | tx_id = int(id_or_hash) 58 | except ValueError: 59 | (tx_id,) = store.selectrow( 60 | "SELECT tx_id FROM tx WHERE tx_hash = ?", 61 | (store.hashin_hex(id_or_hash),)) 62 | store.log.info("Deleting transaction with tx_id=%d", tx_id) 63 | 64 | store.sql(""" 65 | DELETE FROM unlinked_txin WHERE txin_id IN ( 66 | SELECT txin_id FROM txin WHERE tx_id = ?)""", 67 | (tx_id,)) 68 | log_rowcount(store, "Deleted %d from unlinked_txin.") 69 | 70 | store.sql("DELETE FROM txin WHERE tx_id = ?", (tx_id,)) 71 | log_rowcount(store, "Deleted %d from txin.") 72 | 73 | store.sql("DELETE FROM txout WHERE tx_id = ?", (tx_id,)) 74 | log_rowcount(store, "Deleted %d from txout.") 75 | 76 | store.sql("DELETE FROM tx WHERE tx_id = ?", (tx_id,)) 77 | log_rowcount(store, "Deleted %d from tx.") 78 | 79 | commit(store) 80 | 81 | def rewind_datadir(store, dirname): 82 | store.sql(""" 83 | UPDATE datadir 84 | SET blkfile_number = 1, blkfile_offset = 0 85 | WHERE dirname = ? 86 | AND (blkfile_number > 1 OR blkfile_offset > 0)""", 87 | (dirname,)) 88 | log_rowcount(store, "Datadir blockfile pointers rewound: %d") 89 | commit(store) 90 | 91 | def rewind_chain_blockfile(store, name, chain_id): 92 | store.sql(""" 93 | UPDATE datadir 94 | SET blkfile_number = 1, blkfile_offset = 0 95 | WHERE chain_id = ? 96 | AND (blkfile_number > 1 OR blkfile_offset > 0)""", 97 | (chain_id,)) 98 | log_rowcount(store, "Datadir blockfile pointers rewound: %d") 99 | 100 | def chain_name_to_id(store, name): 101 | (chain_id,) = store.selectrow( 102 | "SELECT chain_id FROM chain WHERE chain_name = ?", (name,)) 103 | return chain_id 104 | 105 | def del_chain_blocks_1(store, name, chain_id): 106 | store.sql("UPDATE chain SET chain_last_block_id = NULL WHERE chain_id = ?", 107 | (chain_id,)) 108 | store.log.info("Nulled %s chain_last_block_id.", name) 109 | 110 | store.sql(""" 111 | UPDATE block 112 | SET prev_block_id = NULL, 113 | search_block_id = NULL 114 | WHERE block_id IN ( 115 | SELECT block_id FROM chain_candidate WHERE chain_id = ?)""", 116 | (chain_id,)) 117 | log_rowcount(store, "Disconnected %d blocks from chain.") 118 | commit(store) 119 | 120 | store.sql(""" 121 | DELETE FROM orphan_block WHERE block_id IN ( 122 | SELECT block_id FROM chain_candidate WHERE chain_id = ?)""", 123 | (chain_id,)) 124 | log_rowcount(store, "Deleted %d from orphan_block.") 125 | commit(store) 126 | 127 | store.sql(""" 128 | DELETE FROM block_next WHERE block_id IN ( 129 | SELECT block_id FROM chain_candidate WHERE chain_id = ?)""", 130 | (chain_id,)) 131 | log_rowcount(store, "Deleted %d from block_next.") 132 | commit(store) 133 | 134 | store.sql(""" 135 | DELETE FROM block_txin WHERE block_id IN ( 136 | SELECT block_id FROM chain_candidate WHERE chain_id = ?)""", 137 | (chain_id,)) 138 | log_rowcount(store, "Deleted %d from block_txin.") 139 | commit(store) 140 | 141 | if store.use_firstbits: 142 | store.sql(""" 143 | DELETE FROM abe_firstbits WHERE block_id IN ( 144 | SELECT block_id FROM chain_candidate WHERE chain_id = ?)""", 145 | (chain_id,)) 146 | log_rowcount(store, "Deleted %d from abe_firstbits.") 147 | commit(store) 148 | 149 | def del_chain_block_tx(store, name, chain_id): 150 | store.sql(""" 151 | DELETE FROM block_tx WHERE block_id IN ( 152 | SELECT block_id FROM chain_candidate WHERE chain_id = ?)""", 153 | (chain_id,)) 154 | log_rowcount(store, "Deleted %d from block_tx.") 155 | commit(store) 156 | 157 | def delete_chain_blocks(store, name, chain_id = None): 158 | if chain_id is None: 159 | chain_id = chain_name_to_id(store, name) 160 | 161 | store.log.info("Deleting blocks in chain %s", name) 162 | del_chain_blocks_1(store, name, chain_id) 163 | del_chain_block_tx(store, name, chain_id) 164 | del_chain_blocks_2(store, name, chain_id) 165 | 166 | def delete_chain_transactions(store, name, chain_id = None): 167 | if chain_id is None: 168 | chain_id = chain_name_to_id(store, name) 169 | 170 | store.log.info("Deleting transactions and blocks in chain %s", name) 171 | del_chain_blocks_1(store, name, chain_id) 172 | 173 | store.sql(""" 174 | DELETE FROM unlinked_txin WHERE txin_id IN ( 175 | SELECT txin.txin_id 176 | FROM chain_candidate cc 177 | JOIN block_tx bt ON (cc.block_id = bt.block_id) 178 | JOIN txin ON (bt.tx_id = txin.tx_id) 179 | WHERE cc.chain_id = ?)""", (chain_id,)) 180 | log_rowcount(store, "Deleted %d from unlinked_txin.") 181 | 182 | store.sql(""" 183 | DELETE FROM txin WHERE tx_id IN ( 184 | SELECT bt.tx_id 185 | FROM chain_candidate cc 186 | JOIN block_tx bt ON (cc.block_id = bt.block_id) 187 | WHERE cc.chain_id = ?)""", (chain_id,)) 188 | log_rowcount(store, "Deleted %d from txin.") 189 | commit(store) 190 | 191 | store.sql(""" 192 | DELETE FROM txout WHERE tx_id IN ( 193 | SELECT bt.tx_id 194 | FROM chain_candidate cc 195 | JOIN block_tx bt ON (cc.block_id = bt.block_id) 196 | WHERE cc.chain_id = ?)""", (chain_id,)) 197 | log_rowcount(store, "Deleted %d from txout.") 198 | commit(store) 199 | 200 | tx_ids = [] 201 | for row in store.selectall(""" 202 | SELECT tx_id 203 | FROM chain_candidate cc 204 | JOIN block_tx bt ON (cc.block_id = bt.block_id) 205 | WHERE cc.chain_id = ?""", (chain_id,)): 206 | tx_ids.append(int(row[0])) 207 | 208 | del_chain_block_tx(store, name, chain_id) 209 | 210 | deleted = 0 211 | store.log.info("Deleting from tx...") 212 | 213 | for tx_id in tx_ids: 214 | store.sql("DELETE FROM tx WHERE tx_id = ?", (tx_id,)) 215 | cnt = store.cursor.rowcount 216 | 217 | if cnt > 0: 218 | deleted += 1 219 | if deleted % 10000 == 0: 220 | store.log.info("Deleting tx: %d", deleted) 221 | commit(store) 222 | 223 | store.log.info("Deleted %d from tx.", deleted) 224 | commit(store) 225 | 226 | del_chain_blocks_2(store, name, chain_id) 227 | 228 | def del_chain_blocks_2(store, name, chain_id): 229 | block_ids = [] 230 | for row in store.selectall( 231 | "SELECT block_id FROM chain_candidate WHERE chain_id = ?", (chain_id,)): 232 | block_ids.append(int(row[0])) 233 | 234 | store.sql(""" 235 | DELETE FROM chain_candidate WHERE chain_id = ?""", 236 | (chain_id,)) 237 | log_rowcount(store, "Deleted %d from chain_candidate.") 238 | 239 | deleted = 0 240 | for block_id in block_ids: 241 | store.sql("DELETE FROM block WHERE block_id = ?", (block_id,)) 242 | deleted += store.cursor.rowcount 243 | store.log.info("Deleted %d from block.", deleted) 244 | 245 | rewind_chain_blockfile(store, name, chain_id) 246 | commit(store) 247 | 248 | def main(argv): 249 | conf = { 250 | "debug": None, 251 | "logging": None, 252 | } 253 | conf.update(DataStore.CONFIG_DEFAULTS) 254 | 255 | args, argv = readconf.parse_argv(argv, conf, 256 | strict=False) 257 | if argv and argv[0] in ('-h', '--help'): 258 | print ("""Usage: python -m Abe.admin [-h] [--config=FILE] COMMAND... 259 | 260 | Options: 261 | 262 | --help Show this help message and exit. 263 | --config FILE Abe configuration file. 264 | 265 | Commands: 266 | 267 | delete-chain-blocks NAME Delete all blocks in the specified chain 268 | from the database. 269 | 270 | delete-chain-transactions NAME Delete all blocks and transactions in 271 | the specified chain. 272 | 273 | delete-tx TX_ID Delete the specified transaction. 274 | delete-tx TX_HASH 275 | 276 | link-txin Link transaction inputs to previous outputs. 277 | 278 | rewind-datadir DIRNAME Reset the pointer to force a rescan of 279 | blockfiles in DIRNAME.""") 280 | return 0 281 | 282 | logging.basicConfig( 283 | stream=sys.stdout, 284 | level=logging.DEBUG, 285 | format="%(message)s") 286 | if args.logging is not None: 287 | import logging.config as logging_config 288 | logging_config.dictConfig(args.logging) 289 | 290 | store = DataStore.new(args) 291 | 292 | while len(argv) != 0: 293 | command = argv.pop(0) 294 | if command == 'delete-chain-blocks': 295 | delete_chain_blocks(store, argv.pop(0)) 296 | elif command == 'delete-chain-transactions': 297 | delete_chain_transactions(store, argv.pop(0)) 298 | elif command == 'delete-tx': 299 | delete_tx(store, argv.pop(0)) 300 | elif command == 'rewind-datadir': 301 | rewind_datadir(store, argv.pop(0)) 302 | elif command == 'link-txin': 303 | link_txin(store) 304 | else: 305 | raise ValueError("Unknown command: " + command) 306 | 307 | return 0 308 | 309 | if __name__ == '__main__': 310 | sys.exit(main(sys.argv[1:])) 311 | -------------------------------------------------------------------------------- /abe.conf: -------------------------------------------------------------------------------- 1 | # Config file for Abe. 2 | 3 | # dbtype and connect-args are required. 4 | # If not configured to run under FastCGI, host OR port is required. 5 | 6 | # "NAME=VALUE" and "NAME VALUE" are equivalent. Just "NAME" sets 7 | # value to True. Values of the form "..." or [...] or {...} are 8 | # parsed as extended JSON, as are the values true, false, and null. 9 | # Abe extends JSON syntax to allow strings to contain control 10 | # characters (tab, LF, and CR) and to allow comments (from # to end of 11 | # line) between tokens. 12 | 13 | # "NAME += VALUE" appends VALUE to an array or updates an object 14 | # defined by NAME. Hyphen in NAME is read as underscore, so 15 | # "connect_args" and "connect-args" are synonymous. Later values 16 | # override earlier ones (unless using "+="). 17 | 18 | # All values may be passed as command-line arguments by preceding NAME 19 | # with "--". Example: "python -m Abe.abe --upgrade --dbtype=sqlite3 20 | # --connect-args=abe.sqlite --port 2750". 21 | 22 | # dbtype is the name of a Python module. connect-args are arguments 23 | # to pass to the module's "connect" function. Consult the module's 24 | # documentation for values. The user needs permission to create 25 | # tables. Consult your database documentation on how to configure a 26 | # database. See also README.txt. 27 | 28 | # PostgreSQL example; see also README-POSTGRES.txt: 29 | #dbtype = psycopg2 30 | #connect-args = { "database": "abe" } 31 | 32 | # MySQL example; see also README-MYSQL.txt: 33 | #dbtype MySQLdb 34 | #connect-args {"user":"root","db":"abe"} 35 | 36 | # SQLite3 example: 37 | #dbtype = sqlite3 38 | #connect-args = abe.sqlite 39 | 40 | # Oracle example: 41 | #dbtype=cx_Oracle 42 | #connect-args=user/pass@SID 43 | 44 | # ODBC examples. Don't use the same schema via ODBC and non-ODBC! 45 | #dbtype = pyodbc 46 | #connect-args = DSN=Abe;PWD=B!tc0iN; 47 | #connect-args = DRIVER={ORACLE};DB=ORCL;UID=abe;PWD=Bitcoin; 48 | #connect-args = DRIVER={PostgreSQL ANSI};Database=abe; 49 | 50 | # Workaround for ODBC drivers that begin in autocommit mode: 51 | #connect-args {"":"DRIVER={PostgreSQL ANSI};Database=abe;","autocommit":false} 52 | 53 | # IBM DB2 example: 54 | #dbtype = ibm_db_dbi 55 | #connect-args {"dsn":"DATABASE=abe;UID=db2inst1;PWD=B!tCo1N","conn_options":{"102":0}} 56 | 57 | # Specify port and/or host to serve HTTP instead of FastCGI: 58 | #port 2750 59 | #host localhost 60 | 61 | # Specify no-serve to exit immediately after importing block files: 62 | #no-serve 63 | 64 | # Specify no-load to start abe server without ever loading the 65 | # blockchain - this is useful if you have a dedicated instance loading 66 | # blocks into your Abe database. 67 | #no-load 68 | 69 | # "upgrade" tells Abe to upgrade database objects automatically after 70 | # code updates: 71 | #upgrade 72 | # 73 | # Sometimes upgrades fail and leave the database unusable. Sometimes 74 | # upgrades take several minutes or longer. You may get an idea of the 75 | # time required by looking at comments near the bottom of 76 | # Abe/upgrade.py. For example, if your schema version is Abe18 and 77 | # you are upgrading to schema version Abe19, the relevant lines are: 78 | # 79 | # ('Abe18', add_block_num_tx), # Seconds 80 | # ('Abe18.1', add_block_ss_destroyed), # Seconds 81 | # ('Abe18.2', init_block_tx_sums), # 5 minutes 82 | # ('Abe18.3', replace_chain_summary), # Fast 83 | # 84 | # In this case, you could expect a 5-minute upgrade. The following 85 | # SQL shows the current schema version, provided that it is at least 86 | # Abe13: 87 | # 88 | # SELECT configvar_value FROM configvar 89 | # WHERE configvar_name = 'schema_version' 90 | # 91 | # "python -m Abe.abe -v" shows the schema version that the software 92 | # requires. Sometimes you can upgrade a live database by running 93 | # "python -m Abe.abe --config YOUR_ABE_CONFIG --no-serve --upgrade" 94 | # but this often results in server errors and may sometimes corrupt 95 | # the data. It is best if the upgrade process has exclusive access to 96 | # the database. 97 | 98 | # Include another configuration file, relative to this one's directory. 99 | #config ../secret/abe.conf 100 | 101 | # Add or replace object fields. This example assumes connect-args 102 | # previously held an object such as { "user": "abe" }. 103 | #connect-args += { "password": "B!tc0iN" } 104 | 105 | # datadir lists directories to scan for block files. 106 | #datadir += /home/bitcoin/.bitcoin/testnet 107 | #datadir += /home/bitcoin/.namecoin 108 | 109 | #datadir += [{ 110 | # "dirname": "/home/bitcoin/.bitcoin", 111 | # "loader": "rpc" # See the comments for default-loader below. 112 | # }] 113 | 114 | # datadir can supply information about new currencies. 115 | # Note that "address_version" is a byte string: the byte (or, perhaps 116 | # someday, several bytes) preceding the public key hash before base-58 117 | # conversion. Example: 118 | # 119 | #datadir += [{ 120 | # "dirname": "/home/weeds/testnet", 121 | # "chain": "Weeds", 122 | # "code3": "WDS", 123 | # "address_version": "\u00f3" }] 124 | #datadir += [{ 125 | # "dirname":"/home/namecoin/.namecoin/testnet", 126 | # "chain":"NCTestnet", 127 | # "code3":"NC0", 128 | # "address_version":"\u006f"}] 129 | 130 | # ignore-bit8-chains: list of chains for which block version bit 8 does 131 | # NOT indicate a Namecoin-style merge-mined auxiliary proof-of-work. 132 | #ignore-bit8-chains = ["Bitcoin", "Testnet"] 133 | 134 | # Set binary-type=hex if you want to store binary data (hashes, 135 | # scripts, keys, etc.) as hexadecimal strings regardless of the 136 | # database's binary data support. 137 | # 138 | # To use experimental support for PostgreSQL BYTEA storage, set 139 | # binary-type=pg-bytea. This may become the default on databases that 140 | # support it. 141 | # 142 | # To use MySQL binary types, set binary-type=binary. This may become the 143 | # default on databases that support it. 144 | # 145 | # The binary-type setting affects only the first run, where Abe 146 | # creates its tables. 147 | #binary-type hex 148 | 149 | # Some databases have trouble with the large integers that Abe uses 150 | # for statistics. Setting int-type=str causes Abe to pass certain 151 | # integers to the database as strings and cast them to floating point 152 | # in SQL expressions that calculate statistics. Try this if SQLite 153 | # complains "OverflowError: long too big to convert". 154 | #int-type str 155 | 156 | # URL component for static content: logos, style sheets. 157 | #static-path = 158 | 159 | # Filesystem location of static content, if served by Abe. 160 | #document-root = Abe/htdocs 161 | 162 | # Uncomment "auto-agpl" to add a "Source" link to each page pointing 163 | # to a "/download" URL that streams the directory containing abe.py 164 | # and all subdirectories as a compressed TAR archive. This exposes 165 | # files outside of the htdocs directory to the client, so use it with 166 | # caution. 167 | #auto-agpl 168 | 169 | # Directory name and tarfile name prefix for auto-agpl source 170 | # download. 171 | #download-name = abe 172 | 173 | # Primitive template system until I separate view from controller. 174 | #template_vars += { 175 | # "CONTENT_TYPE": "text/html; charset=utf-8", 176 | # "APPNAME": "Abe", 177 | # "DONATIONS_BTC": "1PWC7PNHL1SgvZaN7xEtygenKjWobWsCuf", 178 | # "DONATIONS_NMC": "NJ3MSELK1cWnqUa6xhF2wUYAnz3RSrWXcK" 179 | #} 180 | #template_vars += { "DONATIONS_BTC": "1YourBitcoinAddressHere" } 181 | #template_vars += { "HOMEPAGE": "chain/Bitcoin" } 182 | #template = " 183 | # 184 | # 185 | # 186 | # 188 | # 189 | # %(title)s 190 | # 191 | # 192 | #

\"Abe %(h1)s 194 | #

195 | # %(body)s 196 | #

197 | # 198 | # Powered by %(APPNAME)s 199 | # 200 | # Tips appreciated! 201 | # BTC 202 | # NMC 203 | #

204 | # 205 | # 206 | #" 207 | 208 | # Save blocks to the database after reading this many bytes from a 209 | # file. Abe may run faster with a higher value, for example 10000, 210 | # while loading lots of data with a single process. Non-zero values 211 | # can lead to errors when two processes load transaction data 212 | # simultaneously. 213 | #commit-bytes = 0 214 | 215 | # "rescan" causes Abe to search all block files for new blocks. This 216 | # can take several minutes on a large chain, longer if many of the 217 | # blocks are not already in Abe's database. You might want to do this 218 | # if Abe becomes stuck at an old block number: new blocks are loaded 219 | # but not attached to the chain. You must run once with "rescan" 220 | # after switching to another copy of the block files. 221 | #rescan 222 | 223 | # For displaying short links, base-url overrides the first part of the 224 | # URL. This does not affect the link target, only the visible text. 225 | # The value must include the trailing slash (/) if applicable. 226 | #base-url = http://abe.example.org/ 227 | 228 | # History pages of addresses with more than this many receipts will be 229 | # not be shown. This protects against denial of service. Use -1 for 230 | # no limit. This also limits the total inputs shown by 231 | # /unspent/ADDR|ADDR|... 232 | #address-history-rows-max 1000 233 | 234 | # Argument to logging.config.dictConfig. Requires Python 2.7 or later. 235 | # http://docs.python.org/library/logging.config.html#logging-config-dictschema 236 | #logging = { 237 | # "version":1, 238 | # "handlers": { 239 | # "console": { 240 | # "class": "logging.StreamHandler", 241 | # "formatter": "full", 242 | # "level": "DEBUG"}}, 243 | # "formatters": { 244 | # "full": { 245 | # "format": "%(asctime)s [%(process)d:%(threadName)s] %(name)s %(levelname)s - %(message)s"}}, 246 | # "root": { 247 | # "handlers": ["console"], 248 | # "level": "DEBUG"}} 249 | 250 | # --log-sql logs all queries. This is quite verbose, so it is disabled 251 | # by default. 252 | #log-sql 253 | 254 | # Create and use the abe_firstbits table. This affects only the first 255 | # run, where Abe creates its tables, or the first run after an upgrade 256 | # to firstbits-enabled Abe. This is disabled by default. 257 | #use-firstbits 258 | 259 | # shortlink-type=firstbits constructs address short links using 260 | # firstbits. This is the default when use-firstbits is true. 261 | # Otherwise, shortlink-type is a number of address characters to use. 262 | # Fewer characters result in more collisions, where the page shows a 263 | # list of matches instead of redirecting to the address history. 264 | #shortlink-type 10 265 | 266 | # keep-scriptsig=false prevents storage and display of transaction 267 | # input signature scripts (scriptSig) and sequence numbers. This 268 | # reduces the database size by about 20%. 269 | #keep-scriptsig 270 | 271 | # Add transactions to the database. The genesis transaction is 272 | # unavailable via RPC and must be specified to enable full loading 273 | # over RPC. See Abe/genesis_tx.py. 274 | #import-tx += 01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000 275 | 276 | # Use default-loader=blkfile to force Abe to load data by scanning 277 | # blockfiles. Use default-loader=rpc to force the use of RPC 278 | # with a running bitcoind (or *coin daemon). This requires: 279 | # 280 | # * A readable bitcoin.conf with the RPC username and password 281 | # 282 | # * HTTP access to bitcoind (SSL is not yet supported.) 283 | # 284 | # * Bitcoin 0.8 or newer with the -txindex option. (Run bitcoind 285 | # once with -reindex if you previously ran it without -txindex.) 286 | # 287 | # * A "datadir" option with "chain":"Bitcoin" (or other chain 288 | # name) 289 | # 290 | # * The "import-tx" option specifying the genesis transaction, if 291 | # "bitcoid getrawtransaction" does not return it and it is not 292 | # in Abe/genesis_tx.py. (Bitcoin's genesis transaction is in 293 | # Abe/genesis_tx.py. Otherwise, it requires manually parsing 294 | # the first blockfile.) 295 | # 296 | # By default, Abe tries RPC and falls back to blkfile if that fails. 297 | # Use "rpc,blkfile" to specify this behaviour across upgrades, or 298 | # "default" to let Abe choose. The "loader" specified in a "datadir" 299 | # entry takes precedence over "default-loader". 300 | # 301 | #default-loader = default 302 | -------------------------------------------------------------------------------- /Abe/deserialize.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # 4 | 5 | from BCDataStream import * 6 | from enumeration import Enumeration 7 | from base58 import public_key_to_bc_address, hash_160_to_bc_address 8 | import logging 9 | import socket 10 | import time 11 | from util import short_hex, long_hex 12 | import struct 13 | 14 | def parse_CAddress(vds): 15 | d = {} 16 | d['nVersion'] = vds.read_int32() 17 | d['nTime'] = vds.read_uint32() 18 | d['nServices'] = vds.read_uint64() 19 | d['pchReserved'] = vds.read_bytes(12) 20 | d['ip'] = socket.inet_ntoa(vds.read_bytes(4)) 21 | d['port'] = socket.htons(vds.read_uint16()) 22 | return d 23 | 24 | def deserialize_CAddress(d): 25 | return d['ip']+":"+str(d['port'])+" (lastseen: %s)"%(time.ctime(d['nTime']),) 26 | 27 | def parse_setting(setting, vds): 28 | if setting[0] == "f": # flag (boolean) settings 29 | return str(vds.read_boolean()) 30 | elif setting == "addrIncoming": 31 | return "" # bitcoin 0.4 purposely breaks addrIncoming setting in encrypted wallets. 32 | elif setting[0:4] == "addr": # CAddress 33 | d = parse_CAddress(vds) 34 | return deserialize_CAddress(d) 35 | elif setting == "nTransactionFee": 36 | return vds.read_int64() 37 | elif setting == "nLimitProcessors": 38 | return vds.read_int32() 39 | return 'unknown setting' 40 | 41 | def parse_TxIn(vds): 42 | d = {} 43 | d['prevout_hash'] = vds.read_bytes(32) 44 | d['prevout_n'] = vds.read_uint32() 45 | d['scriptSig'] = vds.read_bytes(vds.read_compact_size()) 46 | d['sequence'] = vds.read_uint32() 47 | return d 48 | 49 | def deserialize_TxIn(d, transaction_index=None, owner_keys=None): 50 | if d['prevout_hash'] == "\x00"*32: 51 | result = "TxIn: COIN GENERATED" 52 | result += " coinbase:"+d['scriptSig'].encode('hex_codec') 53 | elif transaction_index is not None and d['prevout_hash'] in transaction_index: 54 | p = transaction_index[d['prevout_hash']]['txOut'][d['prevout_n']] 55 | result = "TxIn: value: %f"%(p['value']/1.0e8,) 56 | result += " prev("+long_hex(d['prevout_hash'][::-1])+":"+str(d['prevout_n'])+")" 57 | else: 58 | result = "TxIn: prev("+long_hex(d['prevout_hash'][::-1])+":"+str(d['prevout_n'])+")" 59 | pk = extract_public_key(d['scriptSig']) 60 | result += " pubkey: "+pk 61 | result += " sig: "+decode_script(d['scriptSig']) 62 | if d['sequence'] < 0xffffffff: result += " sequence: "+hex(d['sequence']) 63 | return result 64 | 65 | def parse_TxOut(vds): 66 | d = {} 67 | d['value'] = vds.read_int64() 68 | d['scriptPubKey'] = vds.read_bytes(vds.read_compact_size()) 69 | return d 70 | 71 | def deserialize_TxOut(d, owner_keys=None): 72 | result = "TxOut: value: %f"%(d['value']/1.0e8,) 73 | pk = extract_public_key(d['scriptPubKey']) 74 | result += " pubkey: "+pk 75 | result += " Script: "+decode_script(d['scriptPubKey']) 76 | if owner_keys is not None: 77 | if pk in owner_keys: result += " Own: True" 78 | else: result += " Own: False" 79 | return result 80 | 81 | def parse_Transaction(vds): 82 | d = {} 83 | start_pos = vds.read_cursor 84 | d['version'] = vds.read_int32() 85 | n_vin = vds.read_compact_size() 86 | d['txIn'] = [] 87 | for i in xrange(n_vin): 88 | d['txIn'].append(parse_TxIn(vds)) 89 | n_vout = vds.read_compact_size() 90 | d['txOut'] = [] 91 | for i in xrange(n_vout): 92 | d['txOut'].append(parse_TxOut(vds)) 93 | d['lockTime'] = vds.read_uint32() 94 | d['__data__'] = vds.input[start_pos:vds.read_cursor] 95 | return d 96 | 97 | def deserialize_Transaction(d, transaction_index=None, owner_keys=None, print_raw_tx=False): 98 | result = "%d tx in, %d out\n"%(len(d['txIn']), len(d['txOut'])) 99 | for txIn in d['txIn']: 100 | result += deserialize_TxIn(txIn, transaction_index) + "\n" 101 | for txOut in d['txOut']: 102 | result += deserialize_TxOut(txOut, owner_keys) + "\n" 103 | if print_raw_tx == True: 104 | result += "Transaction hex value: " + d['__data__'].encode('hex') + "\n" 105 | 106 | return result 107 | 108 | def parse_MerkleTx(vds): 109 | d = parse_Transaction(vds) 110 | d['hashBlock'] = vds.read_bytes(32) 111 | n_merkleBranch = vds.read_compact_size() 112 | d['merkleBranch'] = vds.read_bytes(32*n_merkleBranch) 113 | d['nIndex'] = vds.read_int32() 114 | return d 115 | 116 | def deserialize_MerkleTx(d, transaction_index=None, owner_keys=None): 117 | tx = deserialize_Transaction(d, transaction_index, owner_keys) 118 | result = "block: "+(d['hashBlock'][::-1]).encode('hex_codec') 119 | result += " %d hashes in merkle branch\n"%(len(d['merkleBranch'])/32,) 120 | return result+tx 121 | 122 | def parse_WalletTx(vds): 123 | d = parse_MerkleTx(vds) 124 | n_vtxPrev = vds.read_compact_size() 125 | d['vtxPrev'] = [] 126 | for i in xrange(n_vtxPrev): 127 | d['vtxPrev'].append(parse_MerkleTx(vds)) 128 | 129 | d['mapValue'] = {} 130 | n_mapValue = vds.read_compact_size() 131 | for i in xrange(n_mapValue): 132 | key = vds.read_string() 133 | value = vds.read_string() 134 | d['mapValue'][key] = value 135 | n_orderForm = vds.read_compact_size() 136 | d['orderForm'] = [] 137 | for i in xrange(n_orderForm): 138 | first = vds.read_string() 139 | second = vds.read_string() 140 | d['orderForm'].append( (first, second) ) 141 | d['fTimeReceivedIsTxTime'] = vds.read_uint32() 142 | d['timeReceived'] = vds.read_uint32() 143 | d['fromMe'] = vds.read_boolean() 144 | d['spent'] = vds.read_boolean() 145 | 146 | return d 147 | 148 | def deserialize_WalletTx(d, transaction_index=None, owner_keys=None): 149 | result = deserialize_MerkleTx(d, transaction_index, owner_keys) 150 | result += "%d vtxPrev txns\n"%(len(d['vtxPrev']),) 151 | result += "mapValue:"+str(d['mapValue']) 152 | if len(d['orderForm']) > 0: 153 | result += "\n"+" orderForm:"+str(d['orderForm']) 154 | result += "\n"+"timeReceived:"+time.ctime(d['timeReceived']) 155 | result += " fromMe:"+str(d['fromMe'])+" spent:"+str(d['spent']) 156 | return result 157 | 158 | # The CAuxPow (auxiliary proof of work) structure supports merged mining. 159 | # A flag in the block version field indicates the structure's presence. 160 | # As of 8/2011, the Original Bitcoin Client does not use it. CAuxPow 161 | # originated in Namecoin; see 162 | # https://github.com/vinced/namecoin/blob/mergedmine/doc/README_merged-mining.md. 163 | def parse_AuxPow(vds): 164 | d = parse_MerkleTx(vds) 165 | n_chainMerkleBranch = vds.read_compact_size() 166 | d['chainMerkleBranch'] = vds.read_bytes(32*n_chainMerkleBranch) 167 | d['chainIndex'] = vds.read_int32() 168 | d['parentBlock'] = parse_BlockHeader(vds) 169 | return d 170 | 171 | def parse_BlockHeader(vds): 172 | d = {} 173 | header_start = vds.read_cursor 174 | d['version'] = vds.read_int32() 175 | d['hashPrev'] = vds.read_bytes(32) 176 | d['hashMerkleRoot'] = vds.read_bytes(32) 177 | d['nTime'] = vds.read_uint32() 178 | d['nBits'] = vds.read_uint32() 179 | d['nNonce'] = vds.read_uint32() 180 | header_end = vds.read_cursor 181 | d['__header__'] = vds.input[header_start:header_end] 182 | return d 183 | 184 | def parse_Block(vds): 185 | d = parse_BlockHeader(vds) 186 | d['transactions'] = [] 187 | # if d['version'] & (1 << 8): 188 | # d['auxpow'] = parse_AuxPow(vds) 189 | nTransactions = vds.read_compact_size() 190 | for i in xrange(nTransactions): 191 | d['transactions'].append(parse_Transaction(vds)) 192 | 193 | return d 194 | 195 | def deserialize_Block(d, print_raw_tx=False): 196 | result = "Time: "+time.ctime(d['nTime'])+" Nonce: "+str(d['nNonce']) 197 | result += "\nnBits: 0x"+hex(d['nBits']) 198 | result += "\nhashMerkleRoot: 0x"+d['hashMerkleRoot'][::-1].encode('hex_codec') 199 | result += "\nPrevious block: "+d['hashPrev'][::-1].encode('hex_codec') 200 | result += "\n%d transactions:\n"%len(d['transactions']) 201 | for t in d['transactions']: 202 | result += deserialize_Transaction(t, print_raw_tx=print_raw_tx)+"\n" 203 | result += "\nRaw block header: "+d['__header__'].encode('hex_codec') 204 | return result 205 | 206 | def parse_BlockLocator(vds): 207 | d = { 'hashes' : [] } 208 | nHashes = vds.read_compact_size() 209 | for i in xrange(nHashes): 210 | d['hashes'].append(vds.read_bytes(32)) 211 | return d 212 | 213 | def deserialize_BlockLocator(d): 214 | result = "Block Locator top: "+d['hashes'][0][::-1].encode('hex_codec') 215 | return result 216 | 217 | opcodes = Enumeration("Opcodes", [ 218 | ("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED", 219 | "OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7", 220 | "OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16", 221 | "OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY", 222 | "OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP", 223 | "OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT", 224 | "OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND", 225 | "OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL", 226 | "OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV", 227 | "OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR", 228 | "OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN", 229 | "OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX", 230 | "OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160", 231 | "OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG", 232 | "OP_CHECKMULTISIGVERIFY", 233 | "OP_NOP1", "OP_NOP2", "OP_NOP3", "OP_NOP4", "OP_NOP5", "OP_NOP6", "OP_NOP7", "OP_NOP8", "OP_NOP9", "OP_NOP10", 234 | ("OP_INVALIDOPCODE", 0xFF), 235 | ]) 236 | 237 | def script_GetOp(bytes): 238 | i = 0 239 | while i < len(bytes): 240 | vch = None 241 | opcode = ord(bytes[i]) 242 | i += 1 243 | 244 | if opcode <= opcodes.OP_PUSHDATA4: 245 | nSize = opcode 246 | if opcode == opcodes.OP_PUSHDATA1: 247 | nSize = ord(bytes[i]) 248 | i += 1 249 | elif opcode == opcodes.OP_PUSHDATA2: 250 | (nSize,) = struct.unpack_from(' len(bytes): 256 | vch = "_INVALID_"+bytes[i:] 257 | i = len(bytes) 258 | else: 259 | vch = bytes[i:i+nSize] 260 | i += nSize 261 | 262 | yield (opcode, vch, i) 263 | 264 | def script_GetOpName(opcode): 265 | try: 266 | return (opcodes.whatis(opcode)).replace("OP_", "") 267 | except KeyError: 268 | return "InvalidOp_"+str(opcode) 269 | 270 | def decode_script(bytes): 271 | result = '' 272 | for (opcode, vch, i) in script_GetOp(bytes): 273 | if len(result) > 0: result += " " 274 | if opcode <= opcodes.OP_PUSHDATA4: 275 | result += "%d:"%(opcode,) 276 | result += short_hex(vch) 277 | else: 278 | result += script_GetOpName(opcode) 279 | return result 280 | 281 | def match_decoded(decoded, to_match): 282 | if len(decoded) != len(to_match): 283 | return False; 284 | for i in range(len(decoded)): 285 | if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4: 286 | continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent. 287 | if to_match[i] != decoded[i][0]: 288 | return False 289 | return True 290 | 291 | def extract_public_key(bytes, version='\x00'): 292 | try: 293 | decoded = [ x for x in script_GetOp(bytes) ] 294 | except struct.error: 295 | return "(None)" 296 | 297 | # non-generated TxIn transactions push a signature 298 | # (seventy-something bytes) and then their public key 299 | # (33 or 65 bytes) onto the stack: 300 | match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ] 301 | if match_decoded(decoded, match): 302 | return public_key_to_bc_address(decoded[1][1], version=version) 303 | 304 | # The Genesis Block, self-payments, and pay-by-IP-address payments look like: 305 | # 65 BYTES:... CHECKSIG 306 | match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ] 307 | if match_decoded(decoded, match): 308 | return public_key_to_bc_address(decoded[0][1], version=version) 309 | 310 | # Pay-by-Bitcoin-address TxOuts look like: 311 | # DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG 312 | match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ] 313 | if match_decoded(decoded, match): 314 | return hash_160_to_bc_address(decoded[2][1], version=version) 315 | 316 | # BIP11 TxOuts look like one of these: 317 | # Note that match_decoded is dumb, so OP_1 actually matches OP_1/2/3/etc: 318 | multisigs = [ 319 | [ opcodes.OP_1, opcodes.OP_PUSHDATA4, opcodes.OP_1, opcodes.OP_CHECKMULTISIG ], 320 | [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ], 321 | [ opcodes.OP_3, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ] 322 | ] 323 | for match in multisigs: 324 | if match_decoded(decoded, match): 325 | return "["+','.join([public_key_to_bc_address(decoded[i][1]) for i in range(1,len(decoded)-1)])+"]" 326 | 327 | # BIP16 TxOuts look like: 328 | # HASH160 20 BYTES:... EQUAL 329 | match = [ opcodes.OP_HASH160, 0x14, opcodes.OP_EQUAL ] 330 | if match_decoded(decoded, match): 331 | return hash_160_to_bc_address(decoded[1][1], version="\x05") 332 | 333 | return "(None)" 334 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | GNU AFFERO GENERAL PUBLIC LICENSE 2 | Version 3, 19 November 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU Affero General Public License is a free, copyleft license for 11 | software and other kinds of works, specifically designed to ensure 12 | cooperation with the community in the case of network server software. 13 | 14 | The licenses for most software and other practical works are designed 15 | to take away your freedom to share and change the works. By contrast, 16 | our General Public Licenses are intended to guarantee your freedom to 17 | share and change all versions of a program--to make sure it remains free 18 | software for all its users. 19 | 20 | When we speak of free software, we are referring to freedom, not 21 | price. Our General Public Licenses are designed to make sure that you 22 | have the freedom to distribute copies of free software (and charge for 23 | them if you wish), that you receive source code or can get it if you 24 | want it, that you can change the software or use pieces of it in new 25 | free programs, and that you know you can do these things. 26 | 27 | Developers that use our General Public Licenses protect your rights 28 | with two steps: (1) assert copyright on the software, and (2) offer 29 | you this License which gives you legal permission to copy, distribute 30 | and/or modify the software. 31 | 32 | A secondary benefit of defending all users' freedom is that 33 | improvements made in alternate versions of the program, if they 34 | receive widespread use, become available for other developers to 35 | incorporate. Many developers of free software are heartened and 36 | encouraged by the resulting cooperation. However, in the case of 37 | software used on network servers, this result may fail to come about. 38 | The GNU General Public License permits making a modified version and 39 | letting the public access it on a server without ever releasing its 40 | source code to the public. 41 | 42 | The GNU Affero General Public License is designed specifically to 43 | ensure that, in such cases, the modified source code becomes available 44 | to the community. It requires the operator of a network server to 45 | provide the source code of the modified version running there to the 46 | users of that server. Therefore, public use of a modified version, on 47 | a publicly accessible server, gives the public access to the source 48 | code of the modified version. 49 | 50 | An older license, called the Affero General Public License and 51 | published by Affero, was designed to accomplish similar goals. This is 52 | a different license, not a version of the Affero GPL, but Affero has 53 | released a new version of the Affero GPL which permits relicensing under 54 | this license. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | TERMS AND CONDITIONS 60 | 61 | 0. Definitions. 62 | 63 | "This License" refers to version 3 of the GNU Affero General Public License. 64 | 65 | "Copyright" also means copyright-like laws that apply to other kinds of 66 | works, such as semiconductor masks. 67 | 68 | "The Program" refers to any copyrightable work licensed under this 69 | License. Each licensee is addressed as "you". "Licensees" and 70 | "recipients" may be individuals or organizations. 71 | 72 | To "modify" a work means to copy from or adapt all or part of the work 73 | in a fashion requiring copyright permission, other than the making of an 74 | exact copy. The resulting work is called a "modified version" of the 75 | earlier work or a work "based on" the earlier work. 76 | 77 | A "covered work" means either the unmodified Program or a work based 78 | on the Program. 79 | 80 | To "propagate" a work means to do anything with it that, without 81 | permission, would make you directly or secondarily liable for 82 | infringement under applicable copyright law, except executing it on a 83 | computer or modifying a private copy. Propagation includes copying, 84 | distribution (with or without modification), making available to the 85 | public, and in some countries other activities as well. 86 | 87 | To "convey" a work means any kind of propagation that enables other 88 | parties to make or receive copies. Mere interaction with a user through 89 | a computer network, with no transfer of a copy, is not conveying. 90 | 91 | An interactive user interface displays "Appropriate Legal Notices" 92 | to the extent that it includes a convenient and prominently visible 93 | feature that (1) displays an appropriate copyright notice, and (2) 94 | tells the user that there is no warranty for the work (except to the 95 | extent that warranties are provided), that licensees may convey the 96 | work under this License, and how to view a copy of this License. If 97 | the interface presents a list of user commands or options, such as a 98 | menu, a prominent item in the list meets this criterion. 99 | 100 | 1. Source Code. 101 | 102 | The "source code" for a work means the preferred form of the work 103 | for making modifications to it. "Object code" means any non-source 104 | form of a work. 105 | 106 | A "Standard Interface" means an interface that either is an official 107 | standard defined by a recognized standards body, or, in the case of 108 | interfaces specified for a particular programming language, one that 109 | is widely used among developers working in that language. 110 | 111 | The "System Libraries" of an executable work include anything, other 112 | than the work as a whole, that (a) is included in the normal form of 113 | packaging a Major Component, but which is not part of that Major 114 | Component, and (b) serves only to enable use of the work with that 115 | Major Component, or to implement a Standard Interface for which an 116 | implementation is available to the public in source code form. A 117 | "Major Component", in this context, means a major essential component 118 | (kernel, window system, and so on) of the specific operating system 119 | (if any) on which the executable work runs, or a compiler used to 120 | produce the work, or an object code interpreter used to run it. 121 | 122 | The "Corresponding Source" for a work in object code form means all 123 | the source code needed to generate, install, and (for an executable 124 | work) run the object code and to modify the work, including scripts to 125 | control those activities. However, it does not include the work's 126 | System Libraries, or general-purpose tools or generally available free 127 | programs which are used unmodified in performing those activities but 128 | which are not part of the work. For example, Corresponding Source 129 | includes interface definition files associated with source files for 130 | the work, and the source code for shared libraries and dynamically 131 | linked subprograms that the work is specifically designed to require, 132 | such as by intimate data communication or control flow between those 133 | subprograms and other parts of the work. 134 | 135 | The Corresponding Source need not include anything that users 136 | can regenerate automatically from other parts of the Corresponding 137 | Source. 138 | 139 | The Corresponding Source for a work in source code form is that 140 | same work. 141 | 142 | 2. Basic Permissions. 143 | 144 | All rights granted under this License are granted for the term of 145 | copyright on the Program, and are irrevocable provided the stated 146 | conditions are met. This License explicitly affirms your unlimited 147 | permission to run the unmodified Program. The output from running a 148 | covered work is covered by this License only if the output, given its 149 | content, constitutes a covered work. This License acknowledges your 150 | rights of fair use or other equivalent, as provided by copyright law. 151 | 152 | You may make, run and propagate covered works that you do not 153 | convey, without conditions so long as your license otherwise remains 154 | in force. You may convey covered works to others for the sole purpose 155 | of having them make modifications exclusively for you, or provide you 156 | with facilities for running those works, provided that you comply with 157 | the terms of this License in conveying all material for which you do 158 | not control copyright. Those thus making or running the covered works 159 | for you must do so exclusively on your behalf, under your direction 160 | and control, on terms that prohibit them from making any copies of 161 | your copyrighted material outside their relationship with you. 162 | 163 | Conveying under any other circumstances is permitted solely under 164 | the conditions stated below. Sublicensing is not allowed; section 10 165 | makes it unnecessary. 166 | 167 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 168 | 169 | No covered work shall be deemed part of an effective technological 170 | measure under any applicable law fulfilling obligations under article 171 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 172 | similar laws prohibiting or restricting circumvention of such 173 | measures. 174 | 175 | When you convey a covered work, you waive any legal power to forbid 176 | circumvention of technological measures to the extent such circumvention 177 | is effected by exercising rights under this License with respect to 178 | the covered work, and you disclaim any intention to limit operation or 179 | modification of the work as a means of enforcing, against the work's 180 | users, your or third parties' legal rights to forbid circumvention of 181 | technological measures. 182 | 183 | 4. Conveying Verbatim Copies. 184 | 185 | You may convey verbatim copies of the Program's source code as you 186 | receive it, in any medium, provided that you conspicuously and 187 | appropriately publish on each copy an appropriate copyright notice; 188 | keep intact all notices stating that this License and any 189 | non-permissive terms added in accord with section 7 apply to the code; 190 | keep intact all notices of the absence of any warranty; and give all 191 | recipients a copy of this License along with the Program. 192 | 193 | You may charge any price or no price for each copy that you convey, 194 | and you may offer support or warranty protection for a fee. 195 | 196 | 5. Conveying Modified Source Versions. 197 | 198 | You may convey a work based on the Program, or the modifications to 199 | produce it from the Program, in the form of source code under the 200 | terms of section 4, provided that you also meet all of these conditions: 201 | 202 | a) The work must carry prominent notices stating that you modified 203 | it, and giving a relevant date. 204 | 205 | b) The work must carry prominent notices stating that it is 206 | released under this License and any conditions added under section 207 | 7. This requirement modifies the requirement in section 4 to 208 | "keep intact all notices". 209 | 210 | c) You must license the entire work, as a whole, under this 211 | License to anyone who comes into possession of a copy. This 212 | License will therefore apply, along with any applicable section 7 213 | additional terms, to the whole of the work, and all its parts, 214 | regardless of how they are packaged. This License gives no 215 | permission to license the work in any other way, but it does not 216 | invalidate such permission if you have separately received it. 217 | 218 | d) If the work has interactive user interfaces, each must display 219 | Appropriate Legal Notices; however, if the Program has interactive 220 | interfaces that do not display Appropriate Legal Notices, your 221 | work need not make them do so. 222 | 223 | A compilation of a covered work with other separate and independent 224 | works, which are not by their nature extensions of the covered work, 225 | and which are not combined with it such as to form a larger program, 226 | in or on a volume of a storage or distribution medium, is called an 227 | "aggregate" if the compilation and its resulting copyright are not 228 | used to limit the access or legal rights of the compilation's users 229 | beyond what the individual works permit. Inclusion of a covered work 230 | in an aggregate does not cause this License to apply to the other 231 | parts of the aggregate. 232 | 233 | 6. Conveying Non-Source Forms. 234 | 235 | You may convey a covered work in object code form under the terms 236 | of sections 4 and 5, provided that you also convey the 237 | machine-readable Corresponding Source under the terms of this License, 238 | in one of these ways: 239 | 240 | a) Convey the object code in, or embodied in, a physical product 241 | (including a physical distribution medium), accompanied by the 242 | Corresponding Source fixed on a durable physical medium 243 | customarily used for software interchange. 244 | 245 | b) Convey the object code in, or embodied in, a physical product 246 | (including a physical distribution medium), accompanied by a 247 | written offer, valid for at least three years and valid for as 248 | long as you offer spare parts or customer support for that product 249 | model, to give anyone who possesses the object code either (1) a 250 | copy of the Corresponding Source for all the software in the 251 | product that is covered by this License, on a durable physical 252 | medium customarily used for software interchange, for a price no 253 | more than your reasonable cost of physically performing this 254 | conveying of source, or (2) access to copy the 255 | Corresponding Source from a network server at no charge. 256 | 257 | c) Convey individual copies of the object code with a copy of the 258 | written offer to provide the Corresponding Source. This 259 | alternative is allowed only occasionally and noncommercially, and 260 | only if you received the object code with such an offer, in accord 261 | with subsection 6b. 262 | 263 | d) Convey the object code by offering access from a designated 264 | place (gratis or for a charge), and offer equivalent access to the 265 | Corresponding Source in the same way through the same place at no 266 | further charge. You need not require recipients to copy the 267 | Corresponding Source along with the object code. If the place to 268 | copy the object code is a network server, the Corresponding Source 269 | may be on a different server (operated by you or a third party) 270 | that supports equivalent copying facilities, provided you maintain 271 | clear directions next to the object code saying where to find the 272 | Corresponding Source. Regardless of what server hosts the 273 | Corresponding Source, you remain obligated to ensure that it is 274 | available for as long as needed to satisfy these requirements. 275 | 276 | e) Convey the object code using peer-to-peer transmission, provided 277 | you inform other peers where the object code and Corresponding 278 | Source of the work are being offered to the general public at no 279 | charge under subsection 6d. 280 | 281 | A separable portion of the object code, whose source code is excluded 282 | from the Corresponding Source as a System Library, need not be 283 | included in conveying the object code work. 284 | 285 | A "User Product" is either (1) a "consumer product", which means any 286 | tangible personal property which is normally used for personal, family, 287 | or household purposes, or (2) anything designed or sold for incorporation 288 | into a dwelling. In determining whether a product is a consumer product, 289 | doubtful cases shall be resolved in favor of coverage. For a particular 290 | product received by a particular user, "normally used" refers to a 291 | typical or common use of that class of product, regardless of the status 292 | of the particular user or of the way in which the particular user 293 | actually uses, or expects or is expected to use, the product. A product 294 | is a consumer product regardless of whether the product has substantial 295 | commercial, industrial or non-consumer uses, unless such uses represent 296 | the only significant mode of use of the product. 297 | 298 | "Installation Information" for a User Product means any methods, 299 | procedures, authorization keys, or other information required to install 300 | and execute modified versions of a covered work in that User Product from 301 | a modified version of its Corresponding Source. The information must 302 | suffice to ensure that the continued functioning of the modified object 303 | code is in no case prevented or interfered with solely because 304 | modification has been made. 305 | 306 | If you convey an object code work under this section in, or with, or 307 | specifically for use in, a User Product, and the conveying occurs as 308 | part of a transaction in which the right of possession and use of the 309 | User Product is transferred to the recipient in perpetuity or for a 310 | fixed term (regardless of how the transaction is characterized), the 311 | Corresponding Source conveyed under this section must be accompanied 312 | by the Installation Information. But this requirement does not apply 313 | if neither you nor any third party retains the ability to install 314 | modified object code on the User Product (for example, the work has 315 | been installed in ROM). 316 | 317 | The requirement to provide Installation Information does not include a 318 | requirement to continue to provide support service, warranty, or updates 319 | for a work that has been modified or installed by the recipient, or for 320 | the User Product in which it has been modified or installed. Access to a 321 | network may be denied when the modification itself materially and 322 | adversely affects the operation of the network or violates the rules and 323 | protocols for communication across the network. 324 | 325 | Corresponding Source conveyed, and Installation Information provided, 326 | in accord with this section must be in a format that is publicly 327 | documented (and with an implementation available to the public in 328 | source code form), and must require no special password or key for 329 | unpacking, reading or copying. 330 | 331 | 7. Additional Terms. 332 | 333 | "Additional permissions" are terms that supplement the terms of this 334 | License by making exceptions from one or more of its conditions. 335 | Additional permissions that are applicable to the entire Program shall 336 | be treated as though they were included in this License, to the extent 337 | that they are valid under applicable law. If additional permissions 338 | apply only to part of the Program, that part may be used separately 339 | under those permissions, but the entire Program remains governed by 340 | this License without regard to the additional permissions. 341 | 342 | When you convey a copy of a covered work, you may at your option 343 | remove any additional permissions from that copy, or from any part of 344 | it. (Additional permissions may be written to require their own 345 | removal in certain cases when you modify the work.) You may place 346 | additional permissions on material, added by you to a covered work, 347 | for which you have or can give appropriate copyright permission. 348 | 349 | Notwithstanding any other provision of this License, for material you 350 | add to a covered work, you may (if authorized by the copyright holders of 351 | that material) supplement the terms of this License with terms: 352 | 353 | a) Disclaiming warranty or limiting liability differently from the 354 | terms of sections 15 and 16 of this License; or 355 | 356 | b) Requiring preservation of specified reasonable legal notices or 357 | author attributions in that material or in the Appropriate Legal 358 | Notices displayed by works containing it; or 359 | 360 | c) Prohibiting misrepresentation of the origin of that material, or 361 | requiring that modified versions of such material be marked in 362 | reasonable ways as different from the original version; or 363 | 364 | d) Limiting the use for publicity purposes of names of licensors or 365 | authors of the material; or 366 | 367 | e) Declining to grant rights under trademark law for use of some 368 | trade names, trademarks, or service marks; or 369 | 370 | f) Requiring indemnification of licensors and authors of that 371 | material by anyone who conveys the material (or modified versions of 372 | it) with contractual assumptions of liability to the recipient, for 373 | any liability that these contractual assumptions directly impose on 374 | those licensors and authors. 375 | 376 | All other non-permissive additional terms are considered "further 377 | restrictions" within the meaning of section 10. If the Program as you 378 | received it, or any part of it, contains a notice stating that it is 379 | governed by this License along with a term that is a further 380 | restriction, you may remove that term. If a license document contains 381 | a further restriction but permits relicensing or conveying under this 382 | License, you may add to a covered work material governed by the terms 383 | of that license document, provided that the further restriction does 384 | not survive such relicensing or conveying. 385 | 386 | If you add terms to a covered work in accord with this section, you 387 | must place, in the relevant source files, a statement of the 388 | additional terms that apply to those files, or a notice indicating 389 | where to find the applicable terms. 390 | 391 | Additional terms, permissive or non-permissive, may be stated in the 392 | form of a separately written license, or stated as exceptions; 393 | the above requirements apply either way. 394 | 395 | 8. Termination. 396 | 397 | You may not propagate or modify a covered work except as expressly 398 | provided under this License. Any attempt otherwise to propagate or 399 | modify it is void, and will automatically terminate your rights under 400 | this License (including any patent licenses granted under the third 401 | paragraph of section 11). 402 | 403 | However, if you cease all violation of this License, then your 404 | license from a particular copyright holder is reinstated (a) 405 | provisionally, unless and until the copyright holder explicitly and 406 | finally terminates your license, and (b) permanently, if the copyright 407 | holder fails to notify you of the violation by some reasonable means 408 | prior to 60 days after the cessation. 409 | 410 | Moreover, your license from a particular copyright holder is 411 | reinstated permanently if the copyright holder notifies you of the 412 | violation by some reasonable means, this is the first time you have 413 | received notice of violation of this License (for any work) from that 414 | copyright holder, and you cure the violation prior to 30 days after 415 | your receipt of the notice. 416 | 417 | Termination of your rights under this section does not terminate the 418 | licenses of parties who have received copies or rights from you under 419 | this License. If your rights have been terminated and not permanently 420 | reinstated, you do not qualify to receive new licenses for the same 421 | material under section 10. 422 | 423 | 9. Acceptance Not Required for Having Copies. 424 | 425 | You are not required to accept this License in order to receive or 426 | run a copy of the Program. Ancillary propagation of a covered work 427 | occurring solely as a consequence of using peer-to-peer transmission 428 | to receive a copy likewise does not require acceptance. However, 429 | nothing other than this License grants you permission to propagate or 430 | modify any covered work. These actions infringe copyright if you do 431 | not accept this License. Therefore, by modifying or propagating a 432 | covered work, you indicate your acceptance of this License to do so. 433 | 434 | 10. Automatic Licensing of Downstream Recipients. 435 | 436 | Each time you convey a covered work, the recipient automatically 437 | receives a license from the original licensors, to run, modify and 438 | propagate that work, subject to this License. You are not responsible 439 | for enforcing compliance by third parties with this License. 440 | 441 | An "entity transaction" is a transaction transferring control of an 442 | organization, or substantially all assets of one, or subdividing an 443 | organization, or merging organizations. If propagation of a covered 444 | work results from an entity transaction, each party to that 445 | transaction who receives a copy of the work also receives whatever 446 | licenses to the work the party's predecessor in interest had or could 447 | give under the previous paragraph, plus a right to possession of the 448 | Corresponding Source of the work from the predecessor in interest, if 449 | the predecessor has it or can get it with reasonable efforts. 450 | 451 | You may not impose any further restrictions on the exercise of the 452 | rights granted or affirmed under this License. For example, you may 453 | not impose a license fee, royalty, or other charge for exercise of 454 | rights granted under this License, and you may not initiate litigation 455 | (including a cross-claim or counterclaim in a lawsuit) alleging that 456 | any patent claim is infringed by making, using, selling, offering for 457 | sale, or importing the Program or any portion of it. 458 | 459 | 11. Patents. 460 | 461 | A "contributor" is a copyright holder who authorizes use under this 462 | License of the Program or a work on which the Program is based. The 463 | work thus licensed is called the contributor's "contributor version". 464 | 465 | A contributor's "essential patent claims" are all patent claims 466 | owned or controlled by the contributor, whether already acquired or 467 | hereafter acquired, that would be infringed by some manner, permitted 468 | by this License, of making, using, or selling its contributor version, 469 | but do not include claims that would be infringed only as a 470 | consequence of further modification of the contributor version. For 471 | purposes of this definition, "control" includes the right to grant 472 | patent sublicenses in a manner consistent with the requirements of 473 | this License. 474 | 475 | Each contributor grants you a non-exclusive, worldwide, royalty-free 476 | patent license under the contributor's essential patent claims, to 477 | make, use, sell, offer for sale, import and otherwise run, modify and 478 | propagate the contents of its contributor version. 479 | 480 | In the following three paragraphs, a "patent license" is any express 481 | agreement or commitment, however denominated, not to enforce a patent 482 | (such as an express permission to practice a patent or covenant not to 483 | sue for patent infringement). To "grant" such a patent license to a 484 | party means to make such an agreement or commitment not to enforce a 485 | patent against the party. 486 | 487 | If you convey a covered work, knowingly relying on a patent license, 488 | and the Corresponding Source of the work is not available for anyone 489 | to copy, free of charge and under the terms of this License, through a 490 | publicly available network server or other readily accessible means, 491 | then you must either (1) cause the Corresponding Source to be so 492 | available, or (2) arrange to deprive yourself of the benefit of the 493 | patent license for this particular work, or (3) arrange, in a manner 494 | consistent with the requirements of this License, to extend the patent 495 | license to downstream recipients. "Knowingly relying" means you have 496 | actual knowledge that, but for the patent license, your conveying the 497 | covered work in a country, or your recipient's use of the covered work 498 | in a country, would infringe one or more identifiable patents in that 499 | country that you have reason to believe are valid. 500 | 501 | If, pursuant to or in connection with a single transaction or 502 | arrangement, you convey, or propagate by procuring conveyance of, a 503 | covered work, and grant a patent license to some of the parties 504 | receiving the covered work authorizing them to use, propagate, modify 505 | or convey a specific copy of the covered work, then the patent license 506 | you grant is automatically extended to all recipients of the covered 507 | work and works based on it. 508 | 509 | A patent license is "discriminatory" if it does not include within 510 | the scope of its coverage, prohibits the exercise of, or is 511 | conditioned on the non-exercise of one or more of the rights that are 512 | specifically granted under this License. You may not convey a covered 513 | work if you are a party to an arrangement with a third party that is 514 | in the business of distributing software, under which you make payment 515 | to the third party based on the extent of your activity of conveying 516 | the work, and under which the third party grants, to any of the 517 | parties who would receive the covered work from you, a discriminatory 518 | patent license (a) in connection with copies of the covered work 519 | conveyed by you (or copies made from those copies), or (b) primarily 520 | for and in connection with specific products or compilations that 521 | contain the covered work, unless you entered into that arrangement, 522 | or that patent license was granted, prior to 28 March 2007. 523 | 524 | Nothing in this License shall be construed as excluding or limiting 525 | any implied license or other defenses to infringement that may 526 | otherwise be available to you under applicable patent law. 527 | 528 | 12. No Surrender of Others' Freedom. 529 | 530 | If conditions are imposed on you (whether by court order, agreement or 531 | otherwise) that contradict the conditions of this License, they do not 532 | excuse you from the conditions of this License. If you cannot convey a 533 | covered work so as to satisfy simultaneously your obligations under this 534 | License and any other pertinent obligations, then as a consequence you may 535 | not convey it at all. For example, if you agree to terms that obligate you 536 | to collect a royalty for further conveying from those to whom you convey 537 | the Program, the only way you could satisfy both those terms and this 538 | License would be to refrain entirely from conveying the Program. 539 | 540 | 13. Remote Network Interaction; Use with the GNU General Public License. 541 | 542 | Notwithstanding any other provision of this License, if you modify the 543 | Program, your modified version must prominently offer all users 544 | interacting with it remotely through a computer network (if your version 545 | supports such interaction) an opportunity to receive the Corresponding 546 | Source of your version by providing access to the Corresponding Source 547 | from a network server at no charge, through some standard or customary 548 | means of facilitating copying of software. This Corresponding Source 549 | shall include the Corresponding Source for any work covered by version 3 550 | of the GNU General Public License that is incorporated pursuant to the 551 | following paragraph. 552 | 553 | Notwithstanding any other provision of this License, you have 554 | permission to link or combine any covered work with a work licensed 555 | under version 3 of the GNU General Public License into a single 556 | combined work, and to convey the resulting work. The terms of this 557 | License will continue to apply to the part which is the covered work, 558 | but the work with which it is combined will remain governed by version 559 | 3 of the GNU General Public License. 560 | 561 | 14. Revised Versions of this License. 562 | 563 | The Free Software Foundation may publish revised and/or new versions of 564 | the GNU Affero General Public License from time to time. Such new versions 565 | will be similar in spirit to the present version, but may differ in detail to 566 | address new problems or concerns. 567 | 568 | Each version is given a distinguishing version number. If the 569 | Program specifies that a certain numbered version of the GNU Affero General 570 | Public License "or any later version" applies to it, you have the 571 | option of following the terms and conditions either of that numbered 572 | version or of any later version published by the Free Software 573 | Foundation. If the Program does not specify a version number of the 574 | GNU Affero General Public License, you may choose any version ever published 575 | by the Free Software Foundation. 576 | 577 | If the Program specifies that a proxy can decide which future 578 | versions of the GNU Affero General Public License can be used, that proxy's 579 | public statement of acceptance of a version permanently authorizes you 580 | to choose that version for the Program. 581 | 582 | Later license versions may give you additional or different 583 | permissions. However, no additional obligations are imposed on any 584 | author or copyright holder as a result of your choosing to follow a 585 | later version. 586 | 587 | 15. Disclaimer of Warranty. 588 | 589 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 590 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 591 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 592 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 593 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 594 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 595 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 596 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 597 | 598 | 16. Limitation of Liability. 599 | 600 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 601 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 602 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 603 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 604 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 605 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 606 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 607 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 608 | SUCH DAMAGES. 609 | 610 | 17. Interpretation of Sections 15 and 16. 611 | 612 | If the disclaimer of warranty and limitation of liability provided 613 | above cannot be given local legal effect according to their terms, 614 | reviewing courts shall apply local law that most closely approximates 615 | an absolute waiver of all civil liability in connection with the 616 | Program, unless a warranty or assumption of liability accompanies a 617 | copy of the Program in return for a fee. 618 | 619 | END OF TERMS AND CONDITIONS 620 | 621 | How to Apply These Terms to Your New Programs 622 | 623 | If you develop a new program, and you want it to be of the greatest 624 | possible use to the public, the best way to achieve this is to make it 625 | free software which everyone can redistribute and change under these terms. 626 | 627 | To do so, attach the following notices to the program. It is safest 628 | to attach them to the start of each source file to most effectively 629 | state the exclusion of warranty; and each file should have at least 630 | the "copyright" line and a pointer to where the full notice is found. 631 | 632 | 633 | Copyright (C) 634 | 635 | This program is free software: you can redistribute it and/or modify 636 | it under the terms of the GNU Affero General Public License as published by 637 | the Free Software Foundation, either version 3 of the License, or 638 | (at your option) any later version. 639 | 640 | This program is distributed in the hope that it will be useful, 641 | but WITHOUT ANY WARRANTY; without even the implied warranty of 642 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 643 | GNU Affero General Public License for more details. 644 | 645 | You should have received a copy of the GNU Affero General Public License 646 | along with this program. If not, see . 647 | 648 | Also add information on how to contact you by electronic and paper mail. 649 | 650 | If your software can interact with users remotely through a computer 651 | network, you should also make sure that it provides a way for users to 652 | get its source. For example, if your program is a web application, its 653 | interface could display a "Source" link that leads users to an archive 654 | of the code. There are many ways you could offer source, and different 655 | solutions will be better for different programs; see section 13 for the 656 | specific requirements. 657 | 658 | You should also get your employer (if you work as a programmer) or school, 659 | if any, to sign a "copyright disclaimer" for the program, if necessary. 660 | For more information on this, and how to apply and follow the GNU AGPL, see 661 | . 662 | -------------------------------------------------------------------------------- /Abe/upgrade.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright(C) 2011,2012,2013 by Abe developers. 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as 6 | # published by the Free Software Foundation, either version 3 of the 7 | # License, or (at your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, but 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 | # Affero General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU Affero General Public 15 | # License along with this program. If not, see 16 | # . 17 | 18 | """Upgrade to the current database schema.""" 19 | 20 | import os 21 | import sys 22 | import DataStore 23 | import util 24 | 25 | def run_upgrades_locked(store, upgrades): 26 | for i in xrange(len(upgrades) - 1): 27 | vers, func = upgrades[i] 28 | if store.config['schema_version'] == vers: 29 | sv = upgrades[i+1][0] 30 | store.log.warning("Upgrading schema to version: %s", sv) 31 | func(store) 32 | if sv[:3] == 'Abe': 33 | store.sql( 34 | "UPDATE configvar SET configvar_value = ?" 35 | " WHERE configvar_name = 'schema_version'", 36 | (sv,)) 37 | if store.cursor.rowcount != 1: 38 | raise Exception("Failed to update schema_version"); 39 | else: 40 | store.sql( 41 | "UPDATE config SET schema_version = ? WHERE config_id = 1", 42 | (sv,)) 43 | store.commit() 44 | store.config['schema_version'] = sv 45 | 46 | def run_upgrades(store, upgrades): 47 | """Guard against concurrent upgrades.""" 48 | lock = store.get_lock() 49 | try: 50 | run_upgrades_locked(store, upgrades) 51 | finally: 52 | store.release_lock(lock) 53 | 54 | def add_block_value_in(store): 55 | store.sql("ALTER TABLE block ADD block_value_in NUMERIC(30)") 56 | def add_block_value_out(store): 57 | store.sql("ALTER TABLE block ADD block_value_out NUMERIC(30)") 58 | def add_block_total_satoshis(store): 59 | store.sql("ALTER TABLE block ADD block_total_satoshis NUMERIC(26)") 60 | def add_block_total_seconds(store): 61 | store.sql("ALTER TABLE block ADD block_total_seconds NUMERIC(20)") 62 | def add_block_satoshi_seconds(store): 63 | store.sql("ALTER TABLE block ADD block_satoshi_seconds NUMERIC(28)") 64 | def add_block_total_ss(store): 65 | store.sql("ALTER TABLE block ADD block_total_ss NUMERIC(28)") 66 | def add_satoshi_seconds_destroyed(store): 67 | store.sql("ALTER TABLE block_tx ADD satoshi_seconds_destroyed NUMERIC(28)") 68 | def add_cc_block_height(store): 69 | store.sql("ALTER TABLE chain_candidate ADD block_height NUMERIC(14)") 70 | 71 | def init_cc_block_height(store): 72 | store.sql( 73 | """UPDATE chain_candidate cc 74 | SET block_height = ( 75 | SELECT block_height 76 | FROM block b 77 | WHERE b.block_id = cc.block_id) 78 | """) 79 | 80 | def index_cc_block_height(store): 81 | store.sql( 82 | """CREATE INDEX x_cc_chain_block_height 83 | ON chain_candidate (chain_id, block_height)""") 84 | 85 | def index_cc_block(store): 86 | store.sql( 87 | """CREATE INDEX x_cc_block ON chain_candidate (block_id)""") 88 | 89 | def create_block_txin(store): 90 | store.sql( 91 | """CREATE TABLE block_txin ( 92 | block_id NUMERIC(14), 93 | txin_id NUMERIC(26), 94 | out_block_id NUMERIC(14), 95 | PRIMARY KEY (block_id, txin_id) 96 | )""") 97 | 98 | def index_block_tx_tx(store): 99 | try: 100 | store.sql("DROP INDEX x_block_tx_tx") 101 | except: 102 | store.rollback() 103 | store.sql("CREATE INDEX x_block_tx_tx ON block_tx (tx_id)") 104 | 105 | def init_block_txin(store): 106 | store.log.info("Initializing block_txin.") 107 | count = int(store.selectrow("SELECT COUNT(1) FROM block_txin")[0] or 0) 108 | tried = 0 109 | added = 0 110 | seen = set() 111 | 112 | store.log.info("...loading existing keys") 113 | cur = store.conn.cursor() 114 | cur.execute(store.sql_transform(""" 115 | SELECT block_id, txin_id FROM block_txin""")) 116 | for row in cur: 117 | seen.add(row) 118 | 119 | store.log.info("...finding output blocks") 120 | cur.execute(store.sql_transform(""" 121 | SELECT bt.block_id, txin.txin_id, obt.block_id 122 | FROM block_tx bt 123 | JOIN txin USING (tx_id) 124 | JOIN txout USING (txout_id) 125 | JOIN block_tx obt ON (txout.tx_id = obt.tx_id)""")) 126 | for row in cur: 127 | (block_id, txin_id, oblock_id) = row 128 | 129 | if (block_id, txin_id) not in seen: 130 | # If oblock is an ancestor of block, insert into block_txin. 131 | if store.is_descended_from(block_id, oblock_id): 132 | store.sql(""" 133 | INSERT INTO block_txin (block_id, txin_id, out_block_id) 134 | VALUES (?, ?, ?)""", 135 | (block_id, txin_id, oblock_id)) 136 | count += 1 137 | added += 1 138 | if count % 1000 == 0: 139 | store.commit() 140 | store.log.info("commit %d", count) 141 | tried += 1 142 | if tried % 1000 == 0: 143 | sys.stdout.write('\r%d/%d ' % (added, tried)) 144 | sys.stdout.flush() 145 | 146 | store.log.info('done.') 147 | 148 | def init_block_value_in(store): 149 | store.log.info("Calculating block_value_in.") 150 | for row in store.selectall(""" 151 | SELECT b.block_id, SUM(txout.txout_value) 152 | FROM block b 153 | JOIN block_tx USING (block_id) 154 | JOIN txin USING (tx_id) 155 | LEFT JOIN txout USING (txout_id) 156 | GROUP BY b.block_id 157 | """): 158 | store.sql("UPDATE block SET block_value_in = ? WHERE block_id = ?", 159 | (int(row[1] or 0), row[0])) 160 | 161 | def init_block_value_out(store): 162 | store.log.info("Calculating block_value_out.") 163 | for row in store.selectall(""" 164 | SELECT b.block_id, SUM(txout.txout_value) 165 | FROM block b 166 | JOIN block_tx USING (block_id) 167 | JOIN txout USING (tx_id) 168 | GROUP BY b.block_id 169 | """): 170 | store.sql("UPDATE block SET block_value_out = ? WHERE block_id = ?", 171 | (int(row[1]), row[0])) 172 | 173 | def init_block_totals(store): 174 | store.log.info("Calculating block total generated and age.") 175 | last_chain_id = None 176 | stats = None 177 | for row in store.selectall(""" 178 | SELECT cc.chain_id, b.prev_block_id, b.block_id, 179 | b.block_value_out - b.block_value_in, b.block_nTime 180 | FROM chain_candidate cc 181 | JOIN block b USING (block_id) 182 | WHERE cc.block_height IS NOT NULL 183 | ORDER BY cc.chain_id, cc.block_height"""): 184 | 185 | chain_id, prev_id, block_id, generated, nTime = row 186 | generated = int(generated) 187 | nTime = int(nTime) 188 | 189 | if chain_id != last_chain_id: 190 | stats = {} 191 | last_chain_id = chain_id 192 | 193 | if prev_id is None: 194 | stats[block_id] = { 195 | "chain_start": nTime, 196 | "satoshis": generated} 197 | else: 198 | stats[block_id] = { 199 | "chain_start": stats[prev_id]['chain_start'], 200 | "satoshis": generated + stats[prev_id]['satoshis']} 201 | 202 | store.sql("UPDATE block SET block_total_seconds = ?," 203 | " block_total_satoshis = ?" 204 | " WHERE block_id = ?", 205 | (nTime - stats[block_id]['chain_start'], 206 | stats[block_id]['satoshis'], block_id)) 207 | 208 | def init_satoshi_seconds_destroyed(store): 209 | store.log.info("Calculating satoshi-seconds destroyed.") 210 | cur = store.conn.cursor() 211 | count = 0 212 | step = 100 213 | start = 1 214 | stop = int(store.selectrow("SELECT MAX(block_id) FROM block_tx")[0]) 215 | while start <= stop: 216 | cur.execute(store.sql_transform(""" 217 | SELECT bt.block_id, bt.tx_id, 218 | SUM(txout.txout_value * (b.block_nTime - ob.block_nTime)) 219 | FROM block b 220 | JOIN block_tx bt USING (block_id) 221 | JOIN txin USING (tx_id) 222 | JOIN txout USING (txout_id) 223 | JOIN block_tx obt ON (txout.tx_id = obt.tx_id) 224 | JOIN block_txin bti ON ( 225 | bti.block_id = bt.block_id AND 226 | bti.txin_id = txin.txin_id AND 227 | obt.block_id = bti.out_block_id) 228 | JOIN block ob ON (bti.out_block_id = ob.block_id) 229 | WHERE bt.block_id >= ? 230 | AND bt.block_id < ? 231 | GROUP BY bt.block_id, bt.tx_id"""), (start, start + step)) 232 | for row in cur: 233 | block_id, tx_id, destroyed = row 234 | sys.stdout.write("\rssd: " + str(count) + " ") 235 | count += 1 236 | store.sql("UPDATE block_tx SET satoshi_seconds_destroyed = ?" 237 | " WHERE block_id = ? AND tx_id = ?", 238 | (destroyed, block_id, tx_id)) 239 | start += step 240 | store.log.info("done.") 241 | 242 | def set_0_satoshi_seconds_destroyed(store): 243 | store.log.info("Setting NULL to 0 in satoshi_seconds_destroyed.") 244 | cur = store.conn.cursor() 245 | cur.execute(store.sql_transform(""" 246 | SELECT bt.block_id, bt.tx_id 247 | FROM block_tx bt 248 | JOIN block b USING (block_id) 249 | WHERE b.block_height IS NOT NULL 250 | AND bt.satoshi_seconds_destroyed IS NULL""")) 251 | for row in cur: 252 | store.sql(""" 253 | UPDATE block_tx bt SET satoshi_seconds_destroyed = 0 254 | WHERE block_id = ? AND tx_id = ?""", row) 255 | 256 | def init_block_satoshi_seconds(store, ): 257 | store.log.info("Calculating satoshi-seconds.") 258 | cur = store.conn.cursor() 259 | stats = {} 260 | cur.execute(store.sql_transform(""" 261 | SELECT b.block_id, b.block_total_satoshis, b.block_nTime, 262 | b.prev_block_id, SUM(bt.satoshi_seconds_destroyed), 263 | b.block_height 264 | FROM block b 265 | JOIN block_tx bt ON (b.block_id = bt.block_id) 266 | GROUP BY b.block_id, b.block_total_satoshis, b.block_nTime, 267 | b.prev_block_id, b.block_height 268 | ORDER BY b.block_height""")) 269 | count = 0 270 | while True: 271 | row = cur.fetchone() 272 | if row is None: 273 | break 274 | block_id, satoshis, nTime, prev_id, destroyed, height = row 275 | satoshis = int(satoshis) 276 | destroyed = int(destroyed) 277 | if height is None: 278 | continue 279 | if prev_id is None: 280 | stats[block_id] = { 281 | "satoshis": satoshis, 282 | "ss": 0, 283 | "total_ss": 0, 284 | "nTime": nTime} 285 | else: 286 | created = (stats[prev_id]['satoshis'] 287 | * (nTime - stats[prev_id]['nTime'])) 288 | stats[block_id] = { 289 | "satoshis": satoshis, 290 | "ss": stats[prev_id]['ss'] + created - destroyed, 291 | "total_ss": stats[prev_id]['total_ss'] + created, 292 | "nTime": nTime} 293 | store.sql(""" 294 | UPDATE block 295 | SET block_satoshi_seconds = ?, 296 | block_total_ss = ?, 297 | block_ss_destroyed = ? 298 | WHERE block_id = ?""", 299 | (store.intin(stats[block_id]['ss']), 300 | store.intin(stats[block_id]['total_ss']), 301 | store.intin(destroyed), 302 | block_id)) 303 | count += 1 304 | if count % 1000 == 0: 305 | store.commit() 306 | store.log.info("Updated %d blocks", count) 307 | if count % 1000 != 0: 308 | store.log.info("Updated %d blocks", count) 309 | 310 | def index_block_nTime(store): 311 | store.log.info("Indexing block_nTime.") 312 | store.sql("CREATE INDEX x_block_nTime ON block (block_nTime)") 313 | 314 | def replace_chain_summary(store): 315 | store.sql("DROP VIEW chain_summary") 316 | store.sql(store.get_ddl('chain_summary')) 317 | 318 | def drop_block_ss_columns(store): 319 | """Drop columns that may have been added in error.""" 320 | for c in ['created', 'destroyed']: 321 | try: 322 | store.sql("ALTER TABLE block DROP COLUMN block_ss_" + c) 323 | except: 324 | store.rollback() 325 | 326 | def add_constraint(store, table, name, constraint): 327 | try: 328 | store.sql("ALTER TABLE " + table + " ADD CONSTRAINT " + name + 329 | " " + constraint) 330 | except: 331 | store.log.exception( 332 | "Failed to create constraint on table " + table + ": " + 333 | constraint + "; ignoring error.") 334 | store.rollback() 335 | 336 | def add_fk_block_txin_block_id(store): 337 | add_constraint(store, "block_txin", "fk1_block_txin", 338 | "FOREIGN KEY (block_id) REFERENCES block (block_id)") 339 | 340 | def add_fk_block_txin_tx_id(store): 341 | add_constraint(store, "block_txin", "fk2_block_txin", 342 | "FOREIGN KEY (txin_id) REFERENCES txin (txin_id)") 343 | 344 | def add_fk_block_txin_out_block_id(store): 345 | add_constraint(store, "block_txin", "fk3_block_txin", 346 | "FOREIGN KEY (out_block_id) REFERENCES block (block_id)") 347 | 348 | def add_chk_block_txin_out_block_id_nn(store): 349 | add_constraint(store, "block_txin", "chk3_block_txin", 350 | "CHECK (out_block_id IS NOT NULL)") 351 | 352 | def create_x_cc_block_id(store): 353 | store.sql("CREATE INDEX x_cc_block_id ON chain_candidate (block_id)") 354 | 355 | def reverse_binary_hashes(store): 356 | if store.config['binary_type'] != 'hex': 357 | raise Error( 358 | 'To support search by hash prefix, we have to reverse all values' 359 | ' in block.block_hash, block.block_hashMerkleRoot, tx.tx_hash,' 360 | ' orphan_block.block_hashPrev, and unlinked_txin.txout_tx_hash.' 361 | ' This has not been automated. You may perform this step manually,' 362 | ' then issue "UPDATE config SET schema_version = \'9.1\'" and' 363 | ' rerun this program.') 364 | 365 | def drop_x_cc_block_id(store): 366 | """Redundant with x_cc_block""" 367 | store.sql("DROP INDEX x_cc_block_id") 368 | 369 | def create_x_cc_block_height(store): 370 | store.sql( 371 | "CREATE INDEX x_cc_block_height ON chain_candidate (block_height)") 372 | 373 | def create_txout_approx(store): 374 | store.sql(store.get_ddl('txout_approx')) 375 | 376 | def add_fk_chain_candidate_block_id(store): 377 | add_constraint(store, "chain_candidate", "fk1_chain_candidate", 378 | "FOREIGN KEY (block_id) REFERENCES block (block_id)") 379 | 380 | def create_configvar(store): 381 | store.sql(store.get_ddl('configvar')) 382 | 383 | def configure(store): 384 | store.args.binary_type = store.config['binary_type'] 385 | store.configure() 386 | store.save_config() 387 | 388 | def populate_abe_sequences(store): 389 | if store.config['sequence_type'] == 'update': 390 | try: 391 | store.sql("""CREATE TABLE abe_sequences ( 392 | key VARCHAR(100) NOT NULL PRIMARY KEY, 393 | nextid NUMERIC(30) 394 | )""") 395 | except: 396 | store.rollback() 397 | for t in ['block', 'tx', 'txin', 'txout', 'pubkey', 398 | 'chain', 'magic', 'policy']: 399 | (last_id,) = store.selectrow("SELECT MAX(" + t + "_id) FROM " + t) 400 | if last_id is None: 401 | continue 402 | store.sql("UPDATE abe_sequences SET nextid = ? WHERE key = ?" 403 | " AND nextid <= ?", 404 | (last_id + 1, t, last_id)) 405 | if store.cursor.rowcount < 1: 406 | store.sql("INSERT INTO abe_sequences (key, nextid)" 407 | " VALUES (?, ?)", (t, last_id + 1)) 408 | 409 | def add_datadir_chain_id(store): 410 | store.sql("ALTER TABLE datadir ADD chain_id NUMERIC(10) NULL") 411 | 412 | def noop(store): 413 | pass 414 | 415 | def rescan_if_missed_blocks(store): 416 | """ 417 | Due to a bug, some blocks may have been loaded but not placed in 418 | a chain. If so, reset all datadir offsets to 0 to force a rescan. 419 | """ 420 | (bad,) = store.selectrow(""" 421 | SELECT COUNT(1) 422 | FROM block 423 | LEFT JOIN chain_candidate USING (block_id) 424 | WHERE chain_id IS NULL 425 | """) 426 | if bad > 0: 427 | store.sql( 428 | "UPDATE datadir SET blkfile_number = 1, blkfile_offset = 0") 429 | 430 | def insert_missed_blocks(store): 431 | """ 432 | Rescanning doesn't always work due to timeouts and resource 433 | constraints. This may help. 434 | """ 435 | missed = [] 436 | for row in store.selectall(""" 437 | SELECT b.block_id 438 | FROM block b 439 | LEFT JOIN chain_candidate cc ON (b.block_id = cc.block_id) 440 | WHERE chain_id IS NULL 441 | ORDER BY b.block_height 442 | """): 443 | missed.append(row[0]) 444 | if not missed: 445 | return 446 | store.log.info("Attempting to repair %d missed blocks.", len(missed)) 447 | inserted = 0 448 | for block_id in missed: 449 | # Insert block if its previous block is in the chain. 450 | # XXX This won't work if we want to support forks. 451 | # XXX This doesn't work for unattached blocks. 452 | store.sql(""" 453 | INSERT INTO chain_candidate ( 454 | chain_id, block_id, block_height, in_longest) 455 | SELECT cc.chain_id, b.block_id, b.block_height, 0 456 | FROM chain_candidate cc 457 | JOIN block prev ON (cc.block_id = prev.block_id) 458 | JOIN block b ON (b.prev_block_id = prev.block_id) 459 | WHERE b.block_id = ?""", (block_id,)) 460 | inserted += store.cursor.rowcount 461 | store.commit() # XXX not sure why PostgreSQL needs this. 462 | store.log.info("Inserted %d rows into chain_candidate.", inserted) 463 | 464 | def repair_missed_blocks(store): 465 | store.log.info("Finding longest chains.") 466 | best_work = [] 467 | for row in store.selectall(""" 468 | SELECT cc.chain_id, MAX(b.block_chain_work) 469 | FROM chain_candidate cc 470 | JOIN block b USING (block_id) 471 | GROUP BY cc.chain_id"""): 472 | best_work.append(row) 473 | best = [] 474 | for row in best_work: 475 | chain_id, bcw = row 476 | (block_id,) = store.selectrow(""" 477 | SELECT MIN(block_id) 478 | FROM block b 479 | JOIN chain_candidate cc USING (block_id) 480 | WHERE cc.chain_id = ? 481 | AND b.block_chain_work = ? 482 | """, (chain_id, bcw)) 483 | (in_longest,) = store.selectrow(""" 484 | SELECT in_longest 485 | FROM chain_candidate 486 | WHERE chain_id = ? 487 | AND block_id = ? 488 | """, (chain_id, block_id)) 489 | if in_longest == 1: 490 | store.log.info("Chain %d already has the block of greatest work.", 491 | chain_id) 492 | continue 493 | best.append([chain_id, block_id]) 494 | store.sql(""" 495 | UPDATE chain 496 | SET chain_last_block_id = ? 497 | WHERE chain_id = ?""", 498 | (block_id, chain_id)) 499 | if store.cursor.rowcount == 1: 500 | store.log.info("Chain %d block %d", chain_id, block_id) 501 | else: 502 | raise Exception("Wrong rowcount updating chain " + str(chain_id)) 503 | if not best: 504 | return 505 | store.log.info("Marking blocks in longest chains.") 506 | for elt in best: 507 | chain_id, block_id = elt 508 | count = 0 509 | while True: 510 | store.sql(""" 511 | UPDATE chain_candidate 512 | SET in_longest = 1 513 | WHERE chain_id = ? 514 | AND block_id = ?""", 515 | (chain_id, block_id)) 516 | if store.cursor.rowcount != 1: 517 | raise Exception("Wrong rowcount updating chain_candidate (" 518 | + str(chain_id) + ", " + str(block_id) + ")") 519 | count += 1 520 | row = store.selectrow(""" 521 | SELECT b.prev_block_id, cc.in_longest 522 | FROM block b 523 | JOIN chain_candidate cc ON (b.prev_block_id = cc.block_id) 524 | WHERE cc.chain_id = ? 525 | AND b.block_id = ?""", 526 | (chain_id, block_id)) 527 | if row is None: 528 | break # genesis block? 529 | block_id, in_longest = row 530 | if in_longest == 1: 531 | break 532 | store.log.info("Processed %d in chain %d", count, chain_id) 533 | store.log.info("Repair successful.") 534 | 535 | def add_block_num_tx(store): 536 | store.sql("ALTER TABLE block ADD block_num_tx NUMERIC(10)") 537 | 538 | def add_block_ss_destroyed(store): 539 | store.sql("ALTER TABLE block ADD block_ss_destroyed NUMERIC(28)") 540 | 541 | def init_block_tx_sums(store): 542 | store.log.info("Calculating block_num_tx and block_ss_destroyed.") 543 | rows = store.selectall(""" 544 | SELECT block_id, 545 | COUNT(1), 546 | COUNT(satoshi_seconds_destroyed), 547 | SUM(satoshi_seconds_destroyed) 548 | FROM block 549 | JOIN block_tx USING (block_id) 550 | GROUP BY block_id""") 551 | count = 0 552 | store.log.info("Storing block_num_tx and block_ss_destroyed.") 553 | for row in rows: 554 | block_id, num_tx, num_ssd, ssd = row 555 | if num_ssd < num_tx: 556 | ssd = None 557 | store.sql(""" 558 | UPDATE block 559 | SET block_num_tx = ?, 560 | block_ss_destroyed = ? 561 | WHERE block_id = ?""", 562 | (num_tx, ssd, block_id)) 563 | count += 1 564 | if count % 1000 == 0: 565 | store.commit() 566 | # XXX would like to set NOT NULL on block_num_tx. 567 | 568 | def config_ddl(store): 569 | store.configure_ddl_implicit_commit() 570 | store.save_configvar("ddl_implicit_commit") 571 | 572 | def config_create_table_epilogue(store): 573 | store.configure_create_table_epilogue() 574 | store.save_configvar("create_table_epilogue") 575 | 576 | def rename_abe_sequences_key(store): 577 | """Drop and recreate abe_sequences with key renamed to sequence_key.""" 578 | # Renaming a column is horribly unportable. 579 | try: 580 | data = store.selectall(""" 581 | SELECT DISTINCT key, nextid 582 | FROM abe_sequences""") 583 | except: 584 | store.rollback() 585 | return 586 | store.log.info("copying sequence positions: %s", data) 587 | store.ddl("DROP TABLE abe_sequences") 588 | store.ddl("""CREATE TABLE abe_sequences ( 589 | sequence_key VARCHAR(100) PRIMARY KEY, 590 | nextid NUMERIC(30) 591 | )""") 592 | for row in data: 593 | store.sql("INSERT INTO abe_sequences (sequence_key, nextid)" 594 | " VALUES (?, ?)", row) 595 | 596 | def create_x_txin_txout(store): 597 | store.sql("CREATE INDEX x_txin_txout ON txin (txout_id)") 598 | 599 | def save_datadir(store): 600 | """Copy the datadir table to recreate it with a new column.""" 601 | store.sql("CREATE TABLE abe_tmp_datadir AS SELECT * FROM datadir") 602 | 603 | def add_datadir_id(store): 604 | data = store.selectall(""" 605 | SELECT dirname, blkfile_number, blkfile_offset, chain_id 606 | FROM abe_tmp_datadir""") 607 | try: 608 | store.ddl("DROP TABLE datadir") 609 | except: 610 | store.rollback() # Assume already dropped. 611 | 612 | store.ddl("""CREATE TABLE datadir ( 613 | datadir_id NUMERIC(10) PRIMARY KEY, 614 | dirname VARCHAR(2000) NOT NULL, 615 | blkfile_number NUMERIC(4) NULL, 616 | blkfile_offset NUMERIC(20) NULL, 617 | chain_id NUMERIC(10) NULL 618 | )""") 619 | store.create_sequence("datadir") 620 | for row in data: 621 | new_row = [store.new_id("datadir")] 622 | new_row += row 623 | store.sql(""" 624 | INSERT INTO datadir ( 625 | datadir_id, dirname, blkfile_number, blkfile_offset, chain_id 626 | ) VALUES (?, ?, ?, ?, ?)""", new_row) 627 | 628 | def drop_tmp_datadir(store): 629 | store.ddl("DROP TABLE abe_tmp_datadir") 630 | 631 | def config_clob(store): 632 | store.configure_max_varchar() 633 | store.save_configvar("max_varchar") 634 | store.configure_clob_type() 635 | store.save_configvar("clob_type") 636 | 637 | def clear_bad_addresses(store): 638 | """Set address=Unknown for the bogus outputs in Bitcoin 71036.""" 639 | bad_tx = [ 640 | 'a288fec5559c3f73fd3d93db8e8460562ebfe2fcf04a5114e8d0f2920a6270dc', 641 | '2a0597e665ac3d1cabeede95cedf907934db7f639e477b3c77b242140d8cf728', 642 | 'e411dbebd2f7d64dafeef9b14b5c59ec60c36779d43f850e5e347abee1e1a455'] 643 | for tx_hash in bad_tx: 644 | row = store.selectrow(""" 645 | SELECT tx_id FROM tx WHERE tx_hash = ?""", 646 | (store.hashin_hex(tx_hash),)) 647 | if row: 648 | store.sql(""" 649 | UPDATE txout SET pubkey_id = NULL 650 | WHERE tx_id = ? AND txout_pos = 1 AND pubkey_id IS NOT NULL""", 651 | (row[0],)) 652 | if store.cursor.rowcount: 653 | store.log.info("Cleared txout %s", tx_hash) 654 | 655 | def find_namecoin_addresses(store): 656 | updated = 0 657 | for tx_id, txout_pos, script in store.selectall(""" 658 | SELECT tx_id, txout_pos, txout_scriptPubKey 659 | FROM txout 660 | WHERE pubkey_id IS NULL"""): 661 | pubkey_id = store.script_to_pubkey_id(store.binout(script)) 662 | if pubkey_id is not None: 663 | store.sql(""" 664 | UPDATE txout 665 | SET pubkey_id = ? 666 | WHERE tx_id = ? 667 | AND txout_pos = ?""", (pubkey_id, tx_id, txout_pos)) 668 | updated += 1 669 | if updated % 1000 == 0: 670 | store.commit() 671 | store.log.info("Found %d addresses", updated) 672 | if updated % 1000 > 0: 673 | store.commit() 674 | store.log.info("Found %d addresses", updated) 675 | 676 | def create_abe_lock(store): 677 | store.ddl("""CREATE TABLE abe_lock ( 678 | lock_id NUMERIC(10) NOT NULL PRIMARY KEY, 679 | pid VARCHAR(255) NULL 680 | )""") 681 | 682 | def create_abe_lock_row(store): 683 | store.sql("INSERT INTO abe_lock (lock_id) VALUES (1)") 684 | 685 | def insert_null_pubkey(store): 686 | dbnull = store.binin(DataStore.NULL_PUBKEY_HASH) 687 | row = store.selectrow("SELECT pubkey_id FROM pubkey WHERE pubkey_hash = ?", 688 | (dbnull,)) 689 | if row: 690 | # Null hash seen in a transaction. Go to some trouble to 691 | # set its pubkey_id = 0 without violating constraints. 692 | old_id = row[0] 693 | import random # No need for cryptographic strength here. 694 | temp_hash = "".join([chr(random.randint(0, 255)) for x in xrange(20)]) 695 | store.sql("INSERT INTO pubkey (pubkey_id, pubkey_hash) VALUES (?, ?)", 696 | (DataStore.NULL_PUBKEY_ID, store.binin(temp_hash))) 697 | store.sql("UPDATE txout SET pubkey_id = ? WHERE pubkey_id = ?", 698 | (DataStore.NULL_PUBKEY_ID, old_id)) 699 | store.sql("DELETE FROM pubkey WHERE pubkey_id = ?", (old_id,)) 700 | store.sql("UPDATE pubkey SET pubkey_hash = ? WHERE pubkey_id = ?", 701 | (dbnull, DataStore.NULL_PUBKEY_ID)) 702 | else: 703 | store.sql(""" 704 | INSERT INTO pubkey (pubkey_id, pubkey_hash) VALUES (?, ?)""", 705 | (DataStore.NULL_PUBKEY_ID, dbnull)) 706 | 707 | def set_netfee_pubkey_id(store): 708 | store.log.info("Updating network fee output address to 'Destroyed'...") 709 | # XXX This doesn't work for Oracle because of LOB weirdness. 710 | # There, you could probably get away with: 711 | # UPDATE txout SET pubkey_id = 0 WHERE txout_scriptPubKey BETWEEN 1 AND 2; 712 | # UPDATE configvar SET configvar_value = 'Abe26' WHERE configvar_name = 713 | # 'schema_version' AND configvar_value = 'Abe25.3'; 714 | # COMMIT; 715 | store.sql(""" 716 | UPDATE txout 717 | SET pubkey_id = ? 718 | WHERE txout_scriptPubKey = ?""", 719 | (DataStore.NULL_PUBKEY_ID, 720 | store.binin(DataStore.SCRIPT_NETWORK_FEE))) 721 | store.log.info("...rows updated: %d", store.cursor.rowcount) 722 | 723 | def adjust_block_total_satoshis(store): 724 | store.log.info("Adjusting value outstanding for lost coins.") 725 | block = {} 726 | block_ids = [] 727 | 728 | store.log.info("...getting block relationships.") 729 | for block_id, prev_id in store.selectall(""" 730 | SELECT block_id, prev_block_id 731 | FROM block 732 | WHERE block_height IS NOT NULL 733 | ORDER BY block_height"""): 734 | block[block_id] = {"prev_id": prev_id} 735 | block_ids.append(block_id) 736 | 737 | store.log.info("...getting lossage per block.") 738 | for block_id, lost in store.selectall(""" 739 | SELECT block_tx.block_id, SUM(txout.txout_value) 740 | FROM block_tx 741 | JOIN txout ON (block_tx.tx_id = txout.tx_id) 742 | WHERE txout.pubkey_id <= 0 743 | GROUP BY block_tx.block_id"""): 744 | if block_id in block: 745 | block[block_id]["lost"] = lost 746 | 747 | store.log.info("...calculating adjustments.") 748 | for block_id in block_ids: 749 | b = block[block_id] 750 | prev_id = b["prev_id"] 751 | prev_lost = 0 if prev_id is None else block[prev_id]["cum_lost"] 752 | b["cum_lost"] = b.get("lost", 0) + prev_lost 753 | 754 | store.log.info("...applying adjustments.") 755 | count = 0 756 | for block_id in block_ids: 757 | adj = block[block_id]["cum_lost"] 758 | if adj != 0: 759 | store.sql(""" 760 | UPDATE block 761 | SET block_total_satoshis = block_total_satoshis - ? 762 | WHERE block_id = ?""", 763 | (adj, block_id)) 764 | count += 1 765 | if count % 1000 == 0: 766 | store.log.info("Adjusted %d of %d blocks.", count, len(block_ids)) 767 | if count % 1000 != 0: 768 | store.log.info("Adjusted %d of %d blocks.", count, len(block_ids)) 769 | 770 | def config_limit_style(store): 771 | store.configure_limit_style() 772 | store.save_configvar("limit_style") 773 | 774 | def config_sequence_type(store): 775 | if store.config['sequence_type'] != "update": 776 | return 777 | store.configure_sequence_type() 778 | if store.config['sequence_type'] != "update": 779 | store.log.info("Creating native sequences.") 780 | for name in ['magic', 'policy', 'chain', 'datadir', 781 | 'tx', 'txout', 'pubkey', 'txin', 'block']: 782 | store.drop_sequence_if_exists(name) 783 | store.create_sequence(name) 784 | store.save_configvar("sequence_type") 785 | 786 | def add_search_block_id(store): 787 | store.log.info("Creating block.search_block_id") 788 | store.sql("ALTER TABLE block ADD search_block_id NUMERIC(14) NULL") 789 | 790 | def populate_search_block_id(store): 791 | store.log.info("Calculating block.search_block_id") 792 | 793 | for block_id, height, prev_id in store.selectall(""" 794 | SELECT block_id, block_height, prev_block_id 795 | FROM block 796 | WHERE block_height IS NOT NULL 797 | ORDER BY block_height"""): 798 | height = int(height) 799 | 800 | search_id = None 801 | if prev_id is not None: 802 | prev_id = int(prev_id) 803 | search_height = util.get_search_height(height) 804 | if search_height is not None: 805 | search_id = store.get_block_id_at_height(search_height, prev_id) 806 | store.sql("UPDATE block SET search_block_id = ? WHERE block_id = ?", 807 | (search_id, block_id)) 808 | store.cache_block(int(block_id), height, prev_id, search_id) 809 | store.commit() 810 | 811 | def add_fk_search_block_id(store): 812 | add_constraint(store, "block", "fk1_search_block_id", 813 | "FOREIGN KEY (search_block_id) REFERENCES block (block_id)") 814 | 815 | def create_firstbits(store): 816 | flag = store.config.get('use_firstbits') 817 | 818 | if flag is None: 819 | if store.args.use_firstbits is None: 820 | store.log.info("use_firstbits not found, defaulting to false.") 821 | store.config['use_firstbits'] = "false" 822 | store.save_configvar("use_firstbits") 823 | return 824 | flag = "true" if store.args.use_firstbits else "false" 825 | store.config['use_firstbits'] = flag 826 | store.save_configvar("use_firstbits") 827 | 828 | if flag == "true": 829 | import firstbits 830 | firstbits.create_firstbits(store) 831 | 832 | def populate_firstbits(store): 833 | if store.config['use_firstbits'] == "true": 834 | import firstbits 835 | firstbits.populate_firstbits(store) 836 | 837 | def add_keep_scriptsig(store): 838 | store.config['keep_scriptsig'] = "true" 839 | store.save_configvar("keep_scriptsig") 840 | 841 | def drop_satoshi_seconds_destroyed(store): 842 | store.drop_column_if_exists("block_txin", "satoshi_seconds_destroyed") 843 | 844 | def widen_blkfile_number(store): 845 | data = store.selectall(""" 846 | SELECT datadir_id, dirname, blkfile_number, blkfile_offset, chain_id 847 | FROM abe_tmp_datadir""") 848 | store.drop_table_if_exists("datadir") 849 | 850 | store.ddl("""CREATE TABLE datadir ( 851 | datadir_id NUMERIC(10) NOT NULL PRIMARY KEY, 852 | dirname VARCHAR(2000) NOT NULL, 853 | blkfile_number NUMERIC(8) NULL, 854 | blkfile_offset NUMERIC(20) NULL, 855 | chain_id NUMERIC(10) NULL 856 | )""") 857 | for row in data: 858 | store.sql(""" 859 | INSERT INTO datadir ( 860 | datadir_id, dirname, blkfile_number, blkfile_offset, chain_id 861 | ) VALUES (?, ?, ?, ?, ?)""", row) 862 | 863 | def add_datadir_loader(store): 864 | store.sql("ALTER TABLE datadir ADD datadir_loader VARCHAR(100) NULL") 865 | 866 | def populate_pubkeys(store): 867 | store.log.info("Finding short public key addresses.") 868 | count = 0 869 | last = 0 870 | while True: 871 | rows = store.selectall(""" 872 | SELECT txout_id, txout_scriptPubKey 873 | FROM txout 874 | WHERE pubkey_id IS NULL 875 | AND txout_id > ? 876 | AND txout_scriptPubKey BETWEEN ? AND ? 877 | ORDER BY txout_id 878 | LIMIT 3000""", 879 | (last, store.binin("\x21"), store.binin("\x22"))) 880 | if not rows: 881 | break 882 | for txout_id, db_script in rows: 883 | last = txout_id 884 | script = store.binout(db_script) 885 | pubkey_id = store.script_to_pubkey_id(script) 886 | if pubkey_id > 0: 887 | store.sql("UPDATE txout SET pubkey_id = ? WHERE txout_id = ?", 888 | (pubkey_id, txout_id)) 889 | count += 1 890 | store.log.info("Found %d", count) 891 | 892 | upgrades = [ 893 | ('6', add_block_value_in), 894 | ('6.1', add_block_value_out), 895 | ('6.2', add_block_total_satoshis), 896 | ('6.3', add_block_total_seconds), 897 | ('6.4', add_block_satoshi_seconds), 898 | ('6.5', add_block_total_ss), 899 | ('6.6', add_satoshi_seconds_destroyed), 900 | ('6.7', add_cc_block_height), 901 | ('6.8', init_cc_block_height), 902 | ('6.9', index_cc_block_height), 903 | ('6.10', index_cc_block), 904 | ('6.11', create_block_txin), 905 | ('6.12', index_block_tx_tx), 906 | ('6.13', init_block_txin), 907 | ('6.14', init_block_value_in), 908 | ('6.15', init_block_value_out), 909 | ('6.16', init_block_totals), 910 | ('6.17', init_satoshi_seconds_destroyed), 911 | ('6.18', set_0_satoshi_seconds_destroyed), 912 | ('6.19', noop), 913 | ('6.20', index_block_nTime), 914 | ('6.21', replace_chain_summary), 915 | ('7', replace_chain_summary), 916 | ('7.1', index_block_tx_tx), # forgot to put in abe.py 917 | ('7.2', init_block_txin), # abe.py put bad data there. 918 | ('7.3', init_satoshi_seconds_destroyed), 919 | ('7.4', set_0_satoshi_seconds_destroyed), 920 | ('7.5', noop), 921 | ('7.6', drop_block_ss_columns), 922 | ('8', add_fk_block_txin_block_id), 923 | ('8.1', add_fk_block_txin_tx_id), 924 | ('8.2', add_fk_block_txin_out_block_id), 925 | ('8.3', add_chk_block_txin_out_block_id_nn), 926 | ('8.4', create_x_cc_block_id), 927 | ('9', reverse_binary_hashes), 928 | ('9.1', drop_x_cc_block_id), 929 | ('9.2', create_x_cc_block_height), 930 | ('10', create_txout_approx), 931 | ('11', add_fk_chain_candidate_block_id), 932 | ('12', create_configvar), 933 | ('12.1', configure), 934 | ('Abe13', populate_abe_sequences), 935 | ('Abe14', add_datadir_chain_id), 936 | ('Abe15', noop), 937 | ('Abe16', rescan_if_missed_blocks), # May be slow. 938 | ('Abe17', insert_missed_blocks), 939 | ('Abe17.1', repair_missed_blocks), 940 | ('Abe18', add_block_num_tx), # Seconds 941 | ('Abe18.1', add_block_ss_destroyed), # Seconds 942 | ('Abe18.2', init_block_tx_sums), # 5 minutes 943 | ('Abe18.3', replace_chain_summary), # Fast 944 | ('Abe19', config_ddl), # Fast 945 | ('Abe20', config_create_table_epilogue), # Fast 946 | ('Abe20.1', rename_abe_sequences_key), # Fast 947 | ('Abe21', create_x_txin_txout), # 25 seconds 948 | ('Abe22', save_datadir), # Fast 949 | ('Abe22.1', add_datadir_id), # Fast 950 | ('Abe22.2', drop_tmp_datadir), # Fast 951 | ('Abe23', config_clob), # Fast 952 | ('Abe24', clear_bad_addresses), # Fast 953 | ('Abe24.1', find_namecoin_addresses), # 2 minutes if you have Namecoin 954 | ('Abe25', create_abe_lock), # Fast 955 | ('Abe25.1', create_abe_lock_row), # Fast 956 | ('Abe25.2', insert_null_pubkey), # 1 second 957 | ('Abe25.3', set_netfee_pubkey_id), # Seconds 958 | ('Abe26', adjust_block_total_satoshis), # 1-3 minutes 959 | ('Abe26.1', init_block_satoshi_seconds), # 3-10 minutes 960 | ('Abe27', config_limit_style), # Fast 961 | ('Abe28', config_sequence_type), # Fast 962 | ('Abe29', add_search_block_id), # Seconds 963 | ('Abe29.1', populate_search_block_id), # 1-2 minutes if using firstbits 964 | ('Abe29.2', add_fk_search_block_id), # Seconds 965 | ('Abe29.3', create_firstbits), # Fast 966 | ('Abe29.4', populate_firstbits), # Slow if config use_firstbits=true 967 | ('Abe30', add_keep_scriptsig), # Fast 968 | ('Abe31', drop_satoshi_seconds_destroyed), # Seconds 969 | ('Abe32', save_datadir), # Fast 970 | ('Abe32.1', widen_blkfile_number), # Fast 971 | ('Abe32.2', drop_tmp_datadir), # Fast 972 | ('Abe33', add_datadir_loader), # Fast 973 | ('Abe34', populate_pubkeys), # Minutes? 974 | ('Abe35', None) 975 | ] 976 | 977 | def upgrade_schema(store): 978 | run_upgrades(store, upgrades) 979 | sv = store.config['schema_version'] 980 | curr = upgrades[-1][0] 981 | if sv != curr: 982 | raise Exception('Can not upgrade from schema version %s to %s\n' 983 | % (sv, curr)) 984 | store.log.warning("Upgrade complete.") 985 | 986 | if __name__ == '__main__': 987 | print "Run Abe with --upgrade added to the usual arguments." 988 | sys.exit(2) 989 | --------------------------------------------------------------------------------