├── sqlchain ├── __init__.py ├── overlay │ ├── __init__.py │ └── reddcoin.py ├── version.py ├── rpc.py ├── democvt.py ├── blkdat.py ├── dbpool.py ├── bci.py └── insight.py ├── .gitignore ├── docs ├── ALTCOINS.md ├── ENGINES.md └── INSTALL.md ├── www ├── img │ ├── favicon.png │ ├── loading.gif │ ├── glyphicons-halflings.png │ └── glyphicons-halflings-white.png ├── 404.html ├── fonts │ ├── glyphicons-halflings-regular.eot │ ├── glyphicons-halflings-regular.ttf │ ├── glyphicons-halflings-regular.woff │ └── glyphicons-halflings-regular.woff2 ├── js │ ├── ie10-viewport-bug-workaround.js │ └── main.js ├── css │ ├── signin.css │ └── main.css ├── wstest.html └── main.html ├── MANIFEST.in ├── etc ├── sqlchain.log.template ├── systemd.template ├── post-receive ├── node.log.template ├── nginx.template ├── deploy ├── dbinfo.sql ├── electrum.banner ├── dogecoin.sql ├── sqlchain.sql └── reddcoin.sql ├── utils ├── blkbtc ├── fixblksize ├── fixchainwork ├── synctest ├── bwlimit ├── stripsigs └── sqlchain-upgrade-db ├── tests ├── conftest.py ├── README.md ├── test_live_api.py ├── mklivetestdb └── test_utils_bitcoin.py ├── LICENSE ├── RELEASE-NOTES ├── setup.py ├── README.md ├── sqlchain-electrum ├── sqlchain-api └── sqlchaind /sqlchain/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /sqlchain/overlay/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .cache 2 | livetest.*.db 3 | -------------------------------------------------------------------------------- /docs/ALTCOINS.md: -------------------------------------------------------------------------------- 1 | ### sqlChain Multi-Coin support 2 | 3 | 4 | -------------------------------------------------------------------------------- /docs/ENGINES.md: -------------------------------------------------------------------------------- 1 | ### sqlChain RocksDB (and other Engine) Install Guide 2 | -------------------------------------------------------------------------------- /www/img/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/img/favicon.png -------------------------------------------------------------------------------- /www/img/loading.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/img/loading.gif -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | recursive-include docs * 3 | recursive-include www * 4 | 5 | -------------------------------------------------------------------------------- /www/img/glyphicons-halflings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/img/glyphicons-halflings.png -------------------------------------------------------------------------------- /www/404.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 | 5 | Error 404 - File not found. 6 | 7 | -------------------------------------------------------------------------------- /www/img/glyphicons-halflings-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/img/glyphicons-halflings-white.png -------------------------------------------------------------------------------- /www/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /www/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /www/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /www/fonts/glyphicons-halflings-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/fonts/glyphicons-halflings-regular.woff2 -------------------------------------------------------------------------------- /etc/sqlchain.log.template: -------------------------------------------------------------------------------- 1 | {coindir}/debug.log {{ 2 | weekly 3 | copytruncate 4 | rotate 4 5 | compress 6 | delaycompress 7 | missingok 8 | notifempty 9 | }} 10 | -------------------------------------------------------------------------------- /etc/systemd.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=sqlChain - title 3 | 4 | [Service] 5 | Type=forking 6 | ExecStart 7 | TimeoutStopSec=2min 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | -------------------------------------------------------------------------------- /etc/post-receive: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | unset GIT_DIR 4 | DEPLOY_WORK="${HOME}/work" 5 | 6 | while read from to branch 7 | do 8 | mkdir -p "${DEPLOY_WORK}" 9 | GIT_WORK_TREE="${DEPLOY_WORK}" git checkout -f "${branch}" 10 | cd "${DEPLOY_WORK}" 11 | if [ -f etc/deploy ]; then 12 | etc/deploy "${branch##*/}" 13 | fi 14 | rm -rf "${DEPLOY_WORK}" 15 | done 16 | -------------------------------------------------------------------------------- /utils/blkbtc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$1" == "off" ]; then 4 | echo "bitcoin open" 5 | CHG="D" 6 | else 7 | echo "bitcoin blocked" 8 | CHG="A" 9 | fi 10 | 11 | sudo iptables -$CHG INPUT -p tcp --dport 8333 -j DROP 12 | sudo iptables -$CHG OUTPUT -p tcp --dport 8333 -j DROP 13 | sudo iptables -$CHG INPUT -p tcp --dport 18333 -j DROP 14 | sudo iptables -$CHG OUTPUT -p tcp --dport 18333 -j DROP 15 | -------------------------------------------------------------------------------- /etc/node.log.template: -------------------------------------------------------------------------------- 1 | {sqldir}/api.log {{ 2 | weekly 3 | rotate 4 4 | compress 5 | delaycompress 6 | missingok 7 | notifempty 8 | postrotate 9 | [ -f {sqldir}/api.pid ] && kill -HUP `cat {sqldir}/api.pid` 10 | endscript 11 | }} 12 | 13 | {sqldir}/daemon.log {{ 14 | weekly 15 | rotate 4 16 | compress 17 | delaycompress 18 | missingok 19 | notifempty 20 | postrotate 21 | [ -f {sqldir}/daemon.pid ] && kill -HUP `cat {sqldir}/daemon.pid` 22 | endscript 23 | }} 24 | -------------------------------------------------------------------------------- /etc/nginx.template: -------------------------------------------------------------------------------- 1 | server {{ 2 | listen 80; 3 | listen [::]:80; 4 | listen 443 ssl http2; 5 | listen [::]:443 ssl http2; 6 | server_name {apidomain}; 7 | 8 | ssl_certificate {sslpath}/fullchain.pem; 9 | ssl_certificate_key {sslpath}/privkey.pem; 10 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 11 | 12 | location / {{ 13 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 14 | proxy_set_header Host $http_host; 15 | proxy_set_header X-Forwarded-Proto $scheme; 16 | proxy_pass http://{listen}; 17 | }} 18 | }} 19 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | def pytest_addoption(parser): 4 | parser.addoption("--runlive", action="store_true", help="run live tests") 5 | parser.addoption("--nosigs", action="store_true", help="is nosigs db") 6 | parser.addoption("--dbuser", action="store", default="root:root", help="db user:pwd for mysql tests") 7 | parser.addoption("--coin", action="store", default="btc", help="set coin type, default btc") 8 | parser.addoption("--server", action="store", default="localhost:8085/api", help="set api-server: host:port/api-path") 9 | parser.addoption("--append", action="store_true", help="don't clear previous live test results") 10 | 11 | -------------------------------------------------------------------------------- /etc/deploy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # for server deploy of git repo - customize to suit 3 | # include in repo - needs a post-receive hook to call 4 | # see docs directory for example 5 | # checkout is current directory 6 | # runs as user git 7 | 8 | declare -A branch 9 | branch["dev"]="/usr/local" 10 | branch["tests"]="/usr/local" 11 | 12 | if [[ "${branch[$1]}" ]]; then 13 | target=${branch[$1]} 14 | 15 | echo "Copying executable and package files" 16 | cp sqlchaind sqlchain-api sqlchain-config sqlchain-electrum $target/bin/ 17 | cp -r sqlchain/* $target/lib/python2.7/dist-packages/sqlchain/ 18 | cp -r etc $target/share/sqlchain/ 19 | cp -r tests /home/chris/ 20 | 21 | else 22 | echo "Branch "$1" has no target - not deployed." 23 | fi 24 | 25 | -------------------------------------------------------------------------------- /www/js/ie10-viewport-bug-workaround.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * IE10 viewport hack for Surface/desktop Windows 8 bug 3 | * Copyright 2014-2015 Twitter, Inc. 4 | * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) 5 | */ 6 | 7 | // See the Getting Started docs for more information: 8 | // http://getbootstrap.com/getting-started/#support-ie10-width 9 | 10 | (function () { 11 | 'use strict'; 12 | 13 | if (navigator.userAgent.match(/IEMobile\/10\.0/)) { 14 | var msViewportStyle = document.createElement('style') 15 | msViewportStyle.appendChild( 16 | document.createTextNode( 17 | '@-ms-viewport{width:auto!important}' 18 | ) 19 | ) 20 | document.querySelector('head').appendChild(msViewportStyle) 21 | } 22 | 23 | })(); 24 | -------------------------------------------------------------------------------- /utils/fixblksize: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # fix blksize when missing 4 | # 5 | 6 | cfg = { 'dbinfo':"localhost:btc:pwd:testnet", 'path':'/var/data/sqlchain/testnet' } 7 | 8 | blk = 0 9 | max_blk = 907843 10 | 11 | import MySQLdb as db 12 | 13 | sql = db.connect(*cfg['dbinfo'].split(':')) 14 | cur = sql.cursor() 15 | 16 | fixlist = [] 17 | while blk < max_blk: 18 | cur.execute("select count(*),sum(txsize) from trxs where block_id>=%s and block_id <%s;", (blk*20000,(blk+1)*20000)) 19 | txcnt,blksize = cur.fetchone() 20 | blksize += 81 if txcnt <= 252 else 83 21 | fixlist.append((blksize,blk)) 22 | if blk % 10000 == 0: 23 | print "Commit:",blk 24 | cur.executemany("update blocks set blksize=%s where id=%s;", fixlist) 25 | sql.commit() 26 | fixlist = [] 27 | blk += 1 28 | 29 | cur.executemany("update blocks set blksize=%s where id=%s;", fixlist) 30 | sql.commit() 31 | 32 | print "Done",blk 33 | 34 | -------------------------------------------------------------------------------- /www/css/signin.css: -------------------------------------------------------------------------------- 1 | body { 2 | padding-top: 40px; 3 | padding-bottom: 40px; 4 | background-color: #eee; 5 | } 6 | 7 | .form-signin { 8 | max-width: 330px; 9 | padding: 15px; 10 | margin: 0 auto; 11 | } 12 | .form-signin .form-signin-heading, 13 | .form-signin .checkbox { 14 | margin-bottom: 10px; 15 | } 16 | .form-signin .checkbox { 17 | font-weight: normal; 18 | } 19 | .form-signin .form-control { 20 | position: relative; 21 | height: auto; 22 | -webkit-box-sizing: border-box; 23 | -moz-box-sizing: border-box; 24 | box-sizing: border-box; 25 | padding: 10px; 26 | font-size: 16px; 27 | } 28 | .form-signin .form-control:focus { 29 | z-index: 2; 30 | } 31 | .form-signin input[type="email"] { 32 | margin-bottom: -1px; 33 | border-bottom-right-radius: 0; 34 | border-bottom-left-radius: 0; 35 | } 36 | .form-signin input[type="password"] { 37 | margin-bottom: 10px; 38 | border-top-left-radius: 0; 39 | border-top-right-radius: 0; 40 | } 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015-2018 neoCogent.com 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /utils/fixchainwork: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # fix chainwork when missing 4 | # 5 | 6 | cfg = { 'dbinfo':"localhost:btc:pwd:testnet", 'path':'/var/data/sqlchain/testnet' } 7 | 8 | blk = 0 9 | max_blk = 907843 10 | chainwork = 0 11 | 12 | from struct import unpack_from 13 | from binascii import unhexlify 14 | import MySQLdb as db 15 | 16 | def blockwork(bits): 17 | return 2**256/((bits&0xFFFFFF) * 2**(8*((bits>>24) - 3))+1) 18 | def int2bin32(val): 19 | return unhexlify('%064x' % val) 20 | 21 | def gethdr(blk, cfg, var=None): 22 | with open(cfg['path']+'/hdrs.dat', 'rb') as f: 23 | f.seek(blk*80) 24 | data = f.read(80) 25 | hdr = dict(zip(['version','previousblockhash','merkleroot', 'time', 'bits', 'nonce'], unpack_from('"+JSON.stringify(data, null, 2)+""); 26 | id += 1; 27 | }, "json"); 28 | } 29 | } 30 | function respClose(e) { 31 | if ($(this).next().hasClass('resp')) 32 | $(this).next().remove(); 33 | $(this).click(); 34 | } 35 | $(document).ready( function() { 36 | $('.apiClk li').click(apiClk); 37 | $('.apiClk span').click(apiClk); 38 | $('#findform').submit( function( e ) { 39 | if ($('#findform').next().hasClass('resp')) 40 | $('#findform').next().remove(); 41 | $.post( '/api/auto', $(this).serialize(), function(data) { 42 | var msg = $('#blank').clone().attr('id', '#d'+id).insertAfter($('#findform')); 43 | msg.html("
"+JSON.stringify(data, null, 2)+""); 44 | $('#findform').click(respClose); 45 | id += 1; 46 | }, "json"); 47 | e.preventDefault(); 48 | }); 49 | }); 50 | })(jQuery); 51 | -------------------------------------------------------------------------------- /utils/bwlimit: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) 2013 The Bitcoin Core developers 3 | # Distributed under the MIT software license, see the accompanying 4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php. 5 | 6 | #network interface on which to limit traffic 7 | IF="wlan0" 8 | #limit of the network interface in question 9 | LINKCEIL="1gbit" 10 | #limit outbound Bitcoin protocol traffic to this rate 11 | LIMIT="100kbit" 12 | #defines the address space for which you wish to disable rate limiting 13 | LOCALNET="192.168.0.0/16" 14 | 15 | #delete existing rules 16 | tc qdisc del dev ${IF} root 17 | iptables -t mangle -F OUTPUT 18 | 19 | #add root class 20 | tc qdisc add dev ${IF} root handle 1: htb default 10 21 | 22 | #add parent class 23 | tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL} 24 | 25 | #add our two classes. one unlimited, another limited 26 | tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0 27 | tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1 28 | 29 | #add handles to our classes so packets marked with
=%s*{0} and o.id<%s*{0};".format(MAX_IO_TX), (txid,txid+1)) 184 | outs = cur.fetchall() 185 | for in_id,n,value,aid in outs: 186 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 187 | for addr, in cur: 188 | vout = { 'n':int(n), 'value':int(value), 'addr':mkaddr(addr,int(aid)), 'type':0, 'tx_index':txid } 189 | if in_id: 190 | vout['spent'] = True 191 | data.append(vout) 192 | return data,len(outs) 193 | -------------------------------------------------------------------------------- /sqlchain-electrum: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from time import sleep 4 | from urllib2 import urlopen, URLError 5 | 6 | import os, sys, getopt, signal, json, daemon 7 | 8 | from gevent import socket, monkey, spawn 9 | from gevent.server import StreamServer 10 | from gevent.queue import Queue 11 | 12 | from sqlchain.version import version 13 | from sqlchain.util import dotdict, loadcfg, savecfg, drop2user, logts, getChunk 14 | 15 | __builtins__.sqc = dotdict() # container for super globals 16 | 17 | sqc.cfg = { 'log':sys.argv[0]+'.log', 'listen':'localhost:8081', 'www':'www', 'api':'http://localhost:8085/api', 18 | 'banner':'docs/electrum.banner', 'path':'/var/data/sqlchain' } 19 | 20 | srvinfo = { 'version':version, 'banner':'', 'block':0, 'header':{} } 21 | subs = { 'numblocks':{}, 'headers':{}, 'address':{}, '_ip_':{} } 22 | 23 | def ReqHandler(): 24 | while True: 25 | #resp = None 26 | fp,req = sqc.reqQ.get() 27 | print 'REQ', req 28 | args = req['method'].split('.') 29 | val = req['params'][0] if len(req['params']) > 0 else 1 30 | if args[-1] == 'subscribe': 31 | if args[1] in subs and not getSubs(args[1], val, fp): 32 | addSub(args[1], val, fp) 33 | respSub(args[1], fp, req) 34 | elif args[0] == 'server': 35 | sqc.respQ.put((fp, req['id'], srvinfo[args[1]] if args[1] in srvinfo else {})) 36 | elif req['method'] in reqFuncs: 37 | spawn(reqFuncs[req['method']], fp, req) 38 | else: 39 | logts("Bad Req %s:%d - %s" % (subs['_ip_'][fp][0]+(req['method'],))) 40 | 41 | def RespHandler(): 42 | while True: 43 | fp,reqid,resp = sqc.respQ.get() 44 | resp = json.dumps({ 'id':reqid, 'result':resp } if resp is None or not 'error' in resp else { 'id':reqid,'error':resp['error'] }) 45 | print "RESP", reqid, resp 46 | fp.write(resp+'\n') 47 | fp.flush() 48 | 49 | def SyncHandler(): 50 | sync_id = 0 51 | while True: 52 | resp = apicall('/sync/'+str(sync_id)) 53 | if resp and 'error' in resp: 54 | sleep(30) 55 | elif resp: 56 | if resp['block'] != srvinfo['block']: 57 | srvinfo['block'] = resp['block'] 58 | pubSubs('numblocks', msg=resp['block']) 59 | hdr = apicall('/block-index/'+str(resp['block'])+'/electrum') 60 | if hdr != srvinfo['header']: 61 | srvinfo['header'] = hdr 62 | pubSubs('headers', msg=hdr) 63 | if len(resp['txs']) > 0: 64 | for tx in resp['txs']: 65 | pubSubs('address', addrs=getAddrs(tx)) 66 | sync_id = resp['sync_id'] 67 | 68 | def TcpHandler(sock, address): 69 | fp = sock.makefile() 70 | addSub('_ip_', address, fp) 71 | while True: 72 | line = fp.readline() 73 | if line: 74 | sqc.reqQ.put((fp, json.loads(line))) 75 | else: 76 | break 77 | delSubs(fp) 78 | sock.shutdown(socket.SHUT_WR) 79 | sock.close() 80 | 81 | def pubSubs(sub, msg=None, addrs=None): 82 | if addrs: 83 | for addr in addrs: 84 | fps = getSubs(sub, addr) 85 | if len(fps) > 0: 86 | data = apicall('/history/'+addr+'/status') 87 | for fp in fps: 88 | sqc.respQ.put((fp, None, data)) 89 | if msg: 90 | for fp in getSubs(sub): 91 | sqc.respQ.put((fp, None, msg)) 92 | 93 | def getSubs(sub, val=1, key=None): 94 | if key: 95 | return key in subs[sub] and val in subs[sub][key] 96 | if val == 1: 97 | return subs[sub].keys() 98 | fps = [] 99 | for k in subs[sub]: 100 | if val in subs[sub][k]: 101 | fps.append(k) 102 | return fps 103 | 104 | def addSub(sub, val, key): 105 | if key in subs[sub]: 106 | subs[sub][key].add(val) 107 | else: 108 | subs[sub][key] = set(val) 109 | 110 | def delSubs(key): 111 | for sub in subs: 112 | if key in subs[sub]: 113 | del subs[sub][key] 114 | 115 | def respSub(to, fp, req): 116 | if to == 'address': 117 | spawn(addrHistory, fp, req, '/status') 118 | elif to == 'numblocks': 119 | sqc.respQ.put((fp, req['id'], srvinfo['block'])) 120 | elif to == 'headers': 121 | sqc.respQ.put((fp, req['id'], srvinfo['header'])) 122 | else: 123 | sqc.respQ.put((fp, req['id'], [])) 124 | 125 | def addrHistory(fp, req, args=''): 126 | data = apicall('/history/'+req['params'][0] + args) 127 | sqc.respQ.put((fp, req['id'], data if args else data['txs'] if len(data['txs']) > 0 else None )) 128 | 129 | def addrBalance(fp, req): 130 | sqc.respQ.put((fp, req['id'], apicall('/history/'+req['params'][0]+'/balance'))) 131 | 132 | def addrMemPool(fp, req): 133 | sqc.respQ.put((fp, req['id'], apicall('/history/'+req['params'][0]+'/uncfmd'))) 134 | 135 | def addrUnspent(fp, req): 136 | sqc.respQ.put((fp, req['id'], apicall('/history/'+req['params'][0]+'/utxo'))) 137 | 138 | def addrProof(fp, req): # pylint:disable=unused-argument 139 | pass 140 | 141 | def blkHeader(fp, req): 142 | sqc.respQ.put((fp, req['id'], apicall('/block-index/'+req['params'][0]+'/electrum') )) 143 | 144 | def blkChunk(fp, req): 145 | sqc.respQ.put((fp, req['id'], getChunk(int(req['params'][0]), sqc.cfg).encode('hex') )) 146 | 147 | def utxoAddress(fp, req): 148 | sqc.respQ.put((fp, req['id'], apicall('/tx/'+req['params'][0]+'/output/%d' % req['params'][1]) )) 149 | 150 | def txGet(fp, req): 151 | sqc.respQ.put((fp, req['id'], apicall('/tx/'+req['params'][0]+'/raw') )) 152 | 153 | def txSend(fp, req): 154 | #logts("Tx Sent: %s" % txid) 155 | sqc.respQ.put((fp, req['id'], apicall('/tx/send', {'rawtx':req['params'][0]}) )) 156 | 157 | def txMerkle(fp, req): 158 | sqc.respQ.put((fp, req['id'], apicall('/merkle/'+req['params'][0]) )) 159 | 160 | def feeEstimate(fp, req): 161 | sqc.respQ.put((fp, req['id'], apicall('/util/estimatefee/'+req['params'][0]) )) 162 | 163 | reqFuncs = { 'blockchain.address.get_history':addrHistory, 'blockchain.address.get_balance':addrBalance, 164 | 'blockchain.address.get_mempool':addrMemPool, 'blockchain.address.get_proof':addrProof, 165 | 'blockchain.address.listunspent':addrUnspent, 'blockchain.utxo.get_address':utxoAddress, 166 | 'blockchain.block.get_header':blkHeader, 'blockchain.block.get_chunk':blkChunk, 167 | 'blockchain.transaction.broadcast':txSend, 'blockchain.transaction.get_merkle':txMerkle, 168 | 'blockchain.transaction.get':txGet, 'blockchain.estimatefee':feeEstimate } 169 | 170 | def getAddrs(tx): 171 | addrs = [] 172 | for vi in tx['inputs']: 173 | if 'addr' in vi['prev_out']: 174 | addrs.append(vi['prev_out']['addr']) 175 | for vo in tx['out']: 176 | addrs.append(vo['addr']) 177 | return addrs 178 | 179 | def options(cfg): # pylint:disable=too-many-branches 180 | try: 181 | opts,_ = getopt.getopt(sys.argv[1:], "hvb:p:c:d:l:w:p:s:a:u:b:", 182 | ["help", "version", "debug", "db=", "log=", "listen=", "www=", "user=", "banner=", "defaults" ]) 183 | except getopt.GetoptError: 184 | usage() 185 | for opt,arg in opts: 186 | if opt in ("-h", "--help"): 187 | usage() 188 | elif opt in ("-v", "--version"): 189 | sys.exit(sys.argv[0]+': '+version) 190 | elif opt in ("-d", "--db"): 191 | cfg['db'] = arg 192 | elif opt in ("-l", "--log"): 193 | cfg['log'] = arg 194 | elif opt in ("-w", "--www"): 195 | cfg['www'] = arg 196 | elif opt in ("-p", "--path"): 197 | cfg['path'] = arg 198 | elif opt in ("-s", "--listen"): 199 | cfg['listen'] = arg 200 | elif opt in ("-a", "--api"): 201 | cfg['api'] = arg 202 | elif opt in ("-u", "--user"): 203 | cfg['user'] = arg 204 | elif opt in ("-b", "--banner"): 205 | cfg['banner'] = arg 206 | elif opt in "--defaults": 207 | savecfg(cfg) 208 | sys.exit("%s updated" % (sys.argv[0]+'.cfg')) 209 | elif opt in "--debug": 210 | cfg['debug'] = True 211 | 212 | def usage(): 213 | print """Usage: {0} [options...][cfg file]\nCommand options are:\n-h,--help\tShow this help info\n-v,--version\tShow version info 214 | --debug\t\tRun in foreground with logging to console 215 | --defaults\tUpdate cfg and exit\nDefault files are {0}.cfg, {0}.log 216 | \nThese options get saved in cfg file as defaults. 217 | -s,--listen\tSet host:port for Electrum server\n-w,--www\tWeb server root directory\n-u,--user\tSet user to run as 218 | -p,--path\tSet path for header data files (/var/data/sqlchain) 219 | -b,--banner\tSet file path for banner text\n-a,--api\tSet host:port for API connection\n-l,--log\tSet log file path""".format(sys.argv[0]) 220 | sys.exit(2) 221 | 222 | def apicall(url, post=None): 223 | try: 224 | data = urlopen(sqc.cfg['api']+url, post).read() 225 | except URLError: 226 | logts("Error: sqlchain-api not at %s" % sqc.cfg['api']) 227 | return { 'error':'No api connection' } 228 | return json.loads(data) 229 | 230 | def sigterm_handler(_signo, _stack_frame): 231 | logts("Shutting down.") 232 | if not sqc.cfg['debug']: 233 | os.unlink(sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid') 234 | sys.exit(0) 235 | 236 | def sighup_handler(_signo, _stack_frame): 237 | path = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 238 | sys.stdout.close() 239 | sys.stdout=open(path,'a') 240 | sys.stderr.close() 241 | sys.stderr=open(path,'a') 242 | logts("SIGHUP Log reopened") 243 | 244 | def run(): 245 | monkey.patch_socket() 246 | 247 | with open(sqc.cfg['banner']) as bf: 248 | srvinfo['banner'] = bf.read() 249 | 250 | hdr = apicall('/block-index/latest/electrum') 251 | if 'error' in hdr: 252 | sys.exit(1) 253 | srvinfo['block'],srvinfo['header'] = hdr['block_height'],hdr 254 | 255 | sqc.reqQ = Queue() 256 | sqc.respQ = Queue() 257 | spawn(ReqHandler) 258 | spawn(RespHandler) 259 | spawn(SyncHandler) 260 | 261 | logts("Starting on %s" % sqc.cfg['listen']) 262 | host,port = sqc.cfg['listen'].split(':') 263 | cert = {'certfile':sqc.cfg['ssl']} if ('ssl' in sqc.cfg) and (sqc.cfg['ssl'] != '') else {} 264 | server = StreamServer((host, int(port)), TcpHandler, **cert) 265 | 266 | drop2user(sqc.cfg, chown=True) 267 | 268 | server.serve_forever() 269 | 270 | if __name__ == '__main__': 271 | 272 | loadcfg(sqc.cfg) 273 | options(sqc.cfg) 274 | 275 | if sqc.cfg['debug']: 276 | signal.signal(signal.SIGINT, sigterm_handler) 277 | run() 278 | else: 279 | logpath = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 280 | pidpath = sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid' 281 | with daemon.DaemonContext(working_directory='.', umask=0002, stdout=open(logpath,'a'), stderr=open(logpath,'a'), 282 | signal_map={signal.SIGTERM:sigterm_handler, signal.SIGHUP:sighup_handler } ): 283 | with file(pidpath,'w') as f: 284 | f.write(str(os.getpid())) 285 | run() 286 | -------------------------------------------------------------------------------- /sqlchain-api: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from collections import OrderedDict 4 | from importlib import import_module 5 | from datetime import datetime 6 | from time import time, sleep 7 | import threading, mimetypes, json 8 | import os, sys, getopt, cgi, signal, daemon 9 | 10 | from geventwebsocket import WebSocketServer, WebSocketApplication, Resource 11 | 12 | from sqlchain.version import version, P2SH_FLAG, BECH32_FLAG 13 | from sqlchain.rpc import do_RPC 14 | from sqlchain.bci import isTxAddrs, bciBlockWS, bciTxWS 15 | from sqlchain.insight import apiStatus 16 | from sqlchain.dbpool import DBPool 17 | from sqlchain.util import dotdict, loadcfg, savecfg, drop2user, getssl, log, logts 18 | 19 | __builtins__.sqc = dotdict() # container for super globals 20 | 21 | sqc.cfg = { 'log':sys.argv[0]+'.log', 'listen':'localhost:8085', 'www':'www', 'block':0, 22 | 'pool':4, 'dbinfo-ts':datetime.now().strftime('%s'), 23 | 'dbinfo':-1, 'path':'/var/data/sqlchain', 'cointype':'bitcoin' } 24 | 25 | sqc.server = {} 26 | sqc.clients = {} # active websockets we publish to 27 | sqc.syncTxs,sqc.lastBlk = [],{} # current sync data shared for every sync/subscription 28 | sqc.sync = threading.Condition() 29 | sqc.sync_id = 0 30 | 31 | def do_Root(env, send_resp): 32 | try: 33 | path = env['PATH_INFO'] 34 | if env['REQUEST_METHOD'] == 'POST': # POST 35 | if path == '/': # the /rpc api is mirrored here as form params 36 | form = cgi.FieldStorage(fp=env['wsgi.input'], environ=env, keep_blank_values=True) 37 | env['PATH_INFO'] = "/rpc/%s/%s" % (form['method'].value, "/".join(form.getlist('params'))) 38 | return do_RPC(env, send_resp) 39 | elif sqc.cfg['www']: # GET static website files, if path configured 40 | path = '/main.html' if path in ['', '/'] else path 41 | if os.path.isfile(sqc.cfg['www']+path): 42 | _,ext = os.path.splitext(path) 43 | filesize = str(os.path.getsize(sqc.cfg['www']+path)) 44 | with open(sqc.cfg['www']+path) as fd: 45 | send_resp('200 OK', [('Content-Type', mimetypes.types_map[ext]), ('Content-Length', filesize), 46 | ('Expires', datetime.utcfromtimestamp(time()+3600).strftime("%a, %d %b %Y %H:%M:%S %ZGMT"))]) 47 | return [ fd.read() ] 48 | send_resp('404 - File Not Found: %s' % path, [("Content-Type", "text/html")], sys.exc_info()) 49 | if not sqc.cfg['www']: 50 | return [] 51 | with open(sqc.cfg['www']+'/404.html') as fd: 52 | return [ fd.read() ] 53 | except IOError: 54 | pass 55 | 56 | class BCIWebSocket(WebSocketApplication): 57 | remote = None 58 | def on_open(self, *args, **kwargs): 59 | self.remote = self.ws.environ['REMOTE_ADDR'] 60 | logts("WS Client connected from %s" % self.remote) 61 | sqc.clients[self.ws.handler.active_client] = { 'subs':[], 'addrs':set() } 62 | 63 | def on_message(self, msg, *args, **kwargs): # pylint:disable=arguments-differ 64 | if msg: 65 | msg = json.loads(msg) 66 | if msg['op'] in [ 'blocks_sub', 'unconfirmed_sub' ]: 67 | sqc.clients[self.ws.handler.active_client]['subs'].append(msg['op']) 68 | if msg['op'] == 'addr_sub' and 'addr' in msg: 69 | sqc.clients[self.ws.handler.active_client]['addrs'].add(msg['addr']) 70 | if msg['op'] == 'ping_block': 71 | self.ws.send({ 'op': 'block', 'x': sqc.lastBlk }) 72 | if msg['op'] == 'ping_tx': 73 | if 'lasttx' in sqc.clients[self.ws.handler.active_client]: 74 | self.ws.send(json.dumps({ 'op': 'utx', 'x': sqc.clients[self.ws.handler.active_client]['lasttx'] })) 75 | 76 | def on_close(self, *args, **kwargs): 77 | logts("WS Client disconnected %s %s" % (self.remote, ''.join(args))) 78 | del sqc.clients[self.ws.handler.active_client] 79 | 80 | # monitor mempool, block, orphan changes - publish to websocket subscriptions, notify waiting sync connections 81 | def syncMonitor(): 82 | with sqc.dbpool.get().cursor() as cur: 83 | cur.execute("select greatest(ifnull(m,0),ifnull(o,0)) from (select max(sync_id) as m from mempool) m,(select max(sync_id) as o from orphans) o;") 84 | sqc.sync_id = cur.fetchone()[0] 85 | cur.execute("select ifnull(max(id),0) from blocks;") 86 | sqc.cfg['block'] = cur.fetchone()[0] 87 | if sqc.cfg['dbinfo'] == 0: 88 | sqc.dbwrk = threading.Thread(target = mkDBInfo) 89 | sqc.dbwrk.start() 90 | while not sqc.done.isSet(): 91 | with sqc.dbpool.get().cursor() as cur: 92 | txs,lastsync = [],0 93 | cur.execute("select hash,sync_id from mempool m, trxs t where m.sync_id > %s and t.id=m.id;", (sqc.sync_id,)) 94 | for txhash,sync_id in cur: 95 | txs.append(bciTxWS(cur, txhash[::-1].encode('hex'))) 96 | lastsync = max(lastsync, sync_id) 97 | if len(txs) > 0: 98 | sqc.syncTxs = txs 99 | cur.execute("select count(*) from orphans where sync_id > %s;", (sqc.sync_id,)) 100 | new_orphans = cur.fetchone()[0] > 0 101 | cur.execute("select max(id) from blocks;") 102 | block = cur.fetchone()[0] 103 | cur.execute("replace into info (class,`key`,value) values('info','ws-clients',%s),('info','connections',%s);", (len(sqc.clients), len(sqc.server.pool) if sqc.server.pool else 0)) 104 | if block != sqc.cfg['block'] or new_orphans or len(txs) > 0: 105 | do_Sync(block, lastsync) 106 | if sqc.cfg['dbinfo'] > 0 and (datetime.now() - datetime.fromtimestamp(int(sqc.cfg['dbinfo-ts']))).total_seconds() > sqc.cfg['dbinfo']*60: 107 | sqc.dbwrk = threading.Thread(target = mkDBInfo) 108 | sqc.dbwrk.start() 109 | sleep(sqc.cfg['sync'] if 'sync' in sqc.cfg else 5) 110 | if sqc.dbwrk: 111 | sqc.dbwrk.join() 112 | 113 | def do_Sync(block, lastsync): 114 | if block != sqc.cfg['block']: 115 | sqc.cfg['block'] = min(block, sqc.cfg['block']+1) 116 | with sqc.dbpool.get().cursor() as cur: 117 | sqc.lastBlk = bciBlockWS(cur, block) 118 | for client in sqc.server.clients.values(): 119 | if 'blocks_sub' in sqc.clients[client]['subs']: 120 | client.ws.send(json.dumps({ 'op': 'block', 'x': sqc.lastBlk })) 121 | sqc.sync_id = lastsync 122 | with sqc.sync: 123 | sqc.sync.notifyAll() 124 | if len(sqc.syncTxs) > 0: 125 | for client in sqc.server.clients.values(): 126 | for tx in sqc.syncTxs: 127 | if 'unconfirmed_sub' in sqc.clients[client]['subs'] or (sqc.clients[client]['addrs'] and isTxAddrs(tx, sqc.clients[client]['addrs'])): 128 | client.ws.send(json.dumps({ 'op': 'utx', 'x': tx })) 129 | sqc.clients[client]['lasttx'] = tx 130 | 131 | def mkDBInfo(): 132 | with sqc.dbpool.get().cursor() as cur: 133 | logts("Updating server db info") 134 | sqc.cfg['dbinfo-ts'] = datetime.now().strftime('%s') 135 | savecfg(sqc.cfg) 136 | apiStatus(cur, 'db') 137 | cur.execute("select count(*) from address where (id & %s = %s);", (P2SH_FLAG,P2SH_FLAG)) 138 | cur.execute("replace into info (class,`key`,value) values('db','address:p2sh',%s);", (cur.fetchone()[0], )) 139 | cur.execute("select count(*) from address where (id & %s = %s);", (BECH32_FLAG,BECH32_FLAG)) 140 | cur.execute("replace into info (class,`key`,value) values('db','address:p2wpkh',%s);", (cur.fetchone()[0], )) 141 | cur.execute("select count(*) from bech32 where 1;") 142 | cur.execute("replace into info (class,`key`,value) values('db','address:p2wsh',%s);", (cur.fetchone()[0], )) 143 | cur.execute("select count(*) from address where cast(conv(hex(reverse(unhex(substr(sha2(addr,0),1,10)))),16,10) as unsigned) != floor(id);") 144 | cur.execute("replace into info (class,`key`,value) values('db','address:id-collisions',%s);", (cur.fetchone()[0], )) 145 | cur.execute("select count(*) from trxs where strcmp(reverse(unhex(hex(id*8))), left(hash,5)) > 0;") 146 | cur.execute("replace into info (class,`key`,value) values('db','trxs:id-collisions',%s);", (cur.fetchone()[0], )) 147 | cur.execute("select count(*) from outputs where addr_id=0;") 148 | cur.execute("replace into info (class,`key`,value) values('db','outputs:non-std',%s);", (cur.fetchone()[0], )) 149 | cur.execute("select count(*) from outputs where tx_id is null;") 150 | cur.execute("replace into info (class,`key`,value) values('db','outputs:unspent',%s);", (cur.fetchone()[0], )) 151 | cur.execute("replace into info (class,`key`,value) values('db','all:updated',now());") 152 | logts("DB info update complete") 153 | sqc.dbwrk = None 154 | 155 | def options(cfg): # pylint:disable=too-many-branches 156 | try: 157 | opts,_ = getopt.getopt(sys.argv[1:], "hvb:p:c:d:l:w:h:p:r:u:i:", 158 | ["help", "version", "debug", "db=", "log=", "www=", "listen=", "path=", "rpc=", "user=", "dbinfo=", "defaults" ]) 159 | except getopt.GetoptError: 160 | usage() 161 | for opt,arg in opts: 162 | if opt in ("-h", "--help"): 163 | usage() 164 | elif opt in ("-v", "--version"): 165 | sys.exit(sys.argv[0]+': '+version) 166 | elif opt in ("-d", "--db"): 167 | cfg['db'] = arg 168 | elif opt in ("-l", "--log"): 169 | cfg['log'] = arg 170 | elif opt in ("-w", "--www"): 171 | cfg['www'] = arg 172 | elif opt in ("-p", "--path"): 173 | cfg['path'] = arg 174 | elif opt in ("-h", "--listen"): 175 | cfg['listen'] = arg 176 | elif opt in ("-r", "--rpc"): 177 | cfg['rpc'] = arg 178 | elif opt in ("-u", "--user"): 179 | cfg['user'] = arg 180 | elif opt in ("-i","--dbinfo"): 181 | cfg['dbinfo'] = int(arg) 182 | elif opt in "--defaults": 183 | savecfg(cfg) 184 | sys.exit("%s updated" % (sys.argv[0]+'.cfg')) 185 | elif opt in "--debug": 186 | cfg['debug'] = True 187 | 188 | def usage(): 189 | print """Usage: {0} [options...][cfg file]\nCommand options are:\n-h,--help\tShow this help info\n-v,--version\tShow version info 190 | --debug\t\tRun in foreground with logging to console 191 | --defaults\tUpdate cfg and exit\nDefault files are {0}.cfg, {0}.log 192 | \nThese options get saved in cfg file as defaults. 193 | -p,--path\tSet path for blob and header data files (/var/data/sqlchain) 194 | -h,--listen\tSet host:port for web server\n-w,--www\tWeb server root directory\n-u,--user\tSet user to run as 195 | -d,--db \tSet mysql db connection, "host:user:pwd:dbname"\n-l,--log\tSet log file path 196 | -r,--rpc\tSet rpc connection, "http://user:pwd@host:port" 197 | -i,--dbinfo\tSet db info update period in minutes (default=180, 0=at start, -1=never) """.format(sys.argv[0]) 198 | sys.exit(2) 199 | 200 | def sigterm_handler(_signo, _stack_frame): 201 | logts("Shutting down.") 202 | sqc.done.set() 203 | if sqc.syncd: 204 | sqc.syncd.join() 205 | if not sqc.cfg['debug']: 206 | os.unlink(sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid') 207 | sys.exit(0) 208 | 209 | def sighup_handler(_signo, _stack_frame): 210 | path = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 211 | sys.stdout.close() 212 | sys.stdout=open(path,'a') 213 | sys.stderr.close() 214 | sys.stderr=open(path,'a') 215 | logts("SIGHUP Log reopened") 216 | 217 | def run(): 218 | sqc.done = threading.Event() 219 | sqc.dbpool = DBPool(sqc.cfg['db'].split(':'), sqc.cfg['pool'], 'MySQLdb') 220 | 221 | mimetypes.init() 222 | mimetypes.add_type('application/x-font-woff', '.woff') 223 | mimetypes.add_type('application/x-font-woff2', '.woff2') 224 | mimetypes.add_type('application/x-font-woff', '.ttf') 225 | 226 | logts("Starting on %s" % sqc.cfg['listen']) 227 | host,port = sqc.cfg['listen'].split(':') 228 | sqc.server = WebSocketServer((host, int(port)), APIs, spawn=10000, **getssl(sqc.cfg)) 229 | sqc.server.start() 230 | 231 | if 'sync' not in sqc.cfg or sqc.cfg['sync'] > 0: 232 | log("Sync monitoring at %d second intervals" % (sqc.cfg['sync'] if 'sync' in sqc.cfg else 5,)) 233 | sqc.syncd = threading.Thread(target = syncMonitor) 234 | sqc.syncd.daemon = True 235 | sqc.syncd.start() 236 | else: 237 | log("Sync monitor disabled") 238 | 239 | drop2user(sqc.cfg, chown=True) 240 | 241 | sqc.server.serve_forever() 242 | 243 | if __name__ == '__main__': 244 | 245 | loadcfg(sqc.cfg) 246 | options(sqc.cfg) 247 | 248 | if 'apis' not in sqc.cfg: 249 | apis = [("/api", getattr(import_module("sqlchain.insight"),"do_API"))] 250 | else: 251 | apis = [] 252 | for api in sqc.cfg['apis']: 253 | log("Adding api at %s" % api[0]) 254 | apis.append((api[0], getattr( import_module(api[1]) if api[1] else sys.modules[__name__], api[2]))) 255 | APIs = Resource(OrderedDict(apis)) 256 | 257 | if sqc.cfg['debug']: 258 | signal.signal(signal.SIGINT, sigterm_handler) 259 | run() 260 | else: 261 | logpath = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 262 | pidpath = sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid' 263 | with daemon.DaemonContext(working_directory='.', umask=0002, stdout=open(logpath,'a'), stderr=open(logpath,'a'), 264 | signal_map={signal.SIGTERM:sigterm_handler, signal.SIGHUP:sighup_handler } ): 265 | with file(pidpath,'w') as f: 266 | f.write(str(os.getpid())) 267 | run() 268 | -------------------------------------------------------------------------------- /www/main.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 |sqlChain Demo - Bitcoin SQL Blockchain Explorer 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 19 | 20 | 21 | 22 | 44 | 45 |46 |47 | 48 | 49 |205 | 206 | 207 | 208 | 209 | 210 | -------------------------------------------------------------------------------- /tests/test_utils_bitcoin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # sqlchain.utils - unit test module 4 | # 5 | 6 | import os, sys 7 | from struct import unpack 8 | 9 | try: 10 | import MySQLdb as db 11 | except ImportError: 12 | print "Cannot run database tests without MySQLdb module" 13 | 14 | import pytest 15 | 16 | from sqlchain.version import ADDR_ID_FLAGS, P2SH_FLAG, BECH32_FLAG, BECH32_LONG 17 | from sqlchain.util import dotdict, is_address, addr2pkh, mkaddr, addr2id, decodeScriptPK, mkOpCodeStr, decodeVarInt, encodeVarInt 18 | from sqlchain.util import txh2id, insertAddress, findTx 19 | 20 | __builtins__['sqc'] = dotdict() # container for super globals 21 | sqc.cfg = { 'cointype':'bitcoin' } 22 | 23 | # memory based test db with same schema 24 | # remains after test run for inspection, cleared at start of each run 25 | # does not survive mysql restart or os reboot 26 | @pytest.fixture(scope="module") 27 | def testdb(request): 28 | if 'MySQLdb' not in sys.modules: 29 | pytest.skip("requires MySQLdb to run") 30 | return None 31 | dbuser,dbpwd = request.config.getoption("--dbuser").split(':') 32 | try: 33 | sql = db.connect('localhost',dbuser,dbpwd,'') 34 | except db.OperationalError: 35 | pytest.skip("requires mysql running + admin user/pwd to run") 36 | return None 37 | cur = sql.cursor() 38 | cur.execute("set sql_notes=0;") 39 | cur.execute("show databases like 'unittest';") 40 | if cur.rowcount > 0: 41 | print "\nClearing test db" 42 | cur.execute("drop database unittest;") 43 | sqlsrc = open('/usr/local/share/sqlchain/docs/sqlchain.sql').read() 44 | sqlcode = '' 45 | for k,v in [('{dbeng}','Memory'),('{dbname}','unittest'),('{dbpwd}',dbpwd),('{dbuser}',dbuser)]: 46 | sqlsrc = sqlsrc.replace(k, v) 47 | for line in sqlsrc.splitlines(): 48 | if line != '' and line[:2] != '--': 49 | sqlcode += line 50 | for stmnt in sqlcode.split(';'): 51 | if stmnt: 52 | cur.execute(stmnt) 53 | return cur 54 | 55 | def test_is_address(): 56 | #p2pkh 57 | assert is_address('1FomKJy8btcmuyKBFXeZmje94ibnQxfDEf') 58 | assert is_address('1EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh') 59 | assert is_address('1MBxxUgVF27trqqBMnoz8Rr7QATEoz1u2Y') 60 | assert not is_address('1MBxxUgVF27trqqCMnoz8Rr7QATEoz1u2Y') 61 | assert not is_address('1EWpTBe9rE27NT9boqg8Zsc643bCFCEdbc') 62 | assert not is_address('3EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh') 63 | #p2sh 64 | assert is_address('3De5zB9JKmwU4zP85EEazYS3MEDVXSmvvm') 65 | assert is_address('3MixsgkBB8NBQe5GAxEj4eGx5YPxvbaSk9') 66 | assert is_address('3HQR7C1Ag53BoaxKDeaA97wTh9bpGuUpgg') 67 | assert not is_address('3HQR7C1Ag53BoaxKDeaA97wTh7bpGuUpgg') 68 | assert not is_address('2MixsgkBB8NBQe5GAxEj4eGx5YPxvbaSk9') 69 | #p2wpkh 70 | assert is_address('bc1qs5d7gy4l7k7nm5rqzda8qruh7kqzhjdhgn7upf') 71 | assert is_address('bc1qvee24y274ymfxx0luvl2jsr6mfxmewd22jfvwd') 72 | assert is_address('bc1qxn8tc5kmuu2sevvjz0xxcz4dm2c42pxd9ea0dt') 73 | assert is_address('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4') 74 | assert is_address('BC1SW50QA3JX3S') 75 | assert is_address('bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj') 76 | assert not is_address('bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t5') 77 | assert not is_address('bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t5') 78 | assert not is_address('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2') 79 | assert not is_address('cb1zw508d6qejxtdg4y5r3zarvaryvg6kdaj') 80 | #p2wsh 81 | assert is_address('bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3') 82 | assert is_address('bc1ql0y3lcuy6937hauw7ur304dd9fmw4ca7tt4kr99uda7cg7walvystw4gyu') 83 | assert is_address('bc1qadvzzmf5fzh7546n2ms76vkl0rd65wlg753dq4ds0v30urtpxlasf5lc7a') 84 | assert is_address('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx') 85 | assert not is_address('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7') 86 | assert not is_address('bc10w508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kw5rljs90') 87 | assert not is_address('bc1rw5uspcuh') 88 | 89 | def test_addr2pkh(): 90 | #p2pkh 91 | assert addr2pkh('127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i') =='0c2f3eb0fa5f65269236658bc361187dfaa964bb'.decode('hex') 92 | assert addr2pkh('1JS2xvSfG2hD3rnMGd3xxEeYSoBs8r7eKh') =='bf362d4dda191483e789ccf3059d6447cd64bb9c'.decode('hex') 93 | assert addr2pkh('1DK2kyHNMUx8XoWZm9t2GWqJGzqBNxUYuv') =='870a76dd469ab77084229a61984db634abaafb8b'.decode('hex') 94 | #p2sh 95 | assert addr2pkh('34H8pSTwFNEngG5xfadqctdQykcGgRmSgf') =='1c6426545908803de2a4ed61caf805ccc282900f'.decode('hex') #2of2 96 | assert addr2pkh('3KKXcGTmxvedJr9GrzWayA8GVnS5AXm8tj') =='c161e4848786150e2add1a93f084fa94a7259b97'.decode('hex') #2of3 97 | assert addr2pkh('38oAwJnDWRTWf1GUg7FJ112bjVRoMjvCmV') =='4df2e66aeb640a642c8476185f63e433ad074220'.decode('hex') #3of4 98 | #p2wpkh-p2sh 99 | assert addr2pkh('3F2YodB6PAzbov1rAkYVMNu6KBB1g9AHrG') =='924b50fdfc0e0afab1b1d12acae31c3b4a215154'.decode('hex') 100 | assert addr2pkh('36LF9sFUJQAzGgxKtrVFDcXqmTF9yyVeow') =='32eaeff4e7e856e74dcf0926724d04324320eb75'.decode('hex') 101 | assert addr2pkh('35FowTfm9qpeKGX9VQuuSrcgDiBd9SczAi') =='271c19a61825788201434354d2a3a6b03d23e316'.decode('hex') 102 | assert addr2pkh('3Pux8TuPxZHm7RsBvAP9zjkF3jCcw9K7wL') =='f3c501dd6b3086911f7b9e7eea0dade0de025287'.decode('hex') 103 | #p2wsh-p2sh - unavailble 104 | #p2wpkh 105 | assert addr2pkh('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4').encode('hex') == '751e76e8199196d454941c45d1b3a323f1433bd6' # bip173 106 | assert addr2pkh('bc1q5lz8xffjt4azkzm4hled45qpgcu46thhl6j7vm').encode('hex') == 'a7c47325325d7a2b0b75bff2dad00146395d2ef7' # electrum 107 | assert addr2pkh('bc1qzlc8mvcyww95ycfgf520y7yvu64qhta6uqxada').encode('hex') == '17f07db304738b4261284d14f2788ce6aa0bafba' # electrum 108 | assert addr2pkh('bc1qtcpsntfzjx7mj6ljqy480sdufnh2nuwqhtsz8g').encode('hex') == '5e0309ad2291bdb96bf2012a77c1bc4ceea9f1c0' # electrum 109 | assert addr2pkh('bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t').encode('hex') == '7906d7156a0932c2d649c9005deebd7c3696c1c9' # electrum 110 | #p2wsh 111 | assert addr2pkh('bc1qm7fcgs9ugg66rw5tg2w7sy0m0afttnnucr59hcmpa87sezd769vsac7pmy') \ 112 | =='df938440bc4235a1ba8b429de811fb7f52b5ce7cc0e85be361e9fd0c89bed159'.decode('hex') #2of2 electrum 113 | assert addr2pkh('bc1q5gp20lfuhz2avvqwau6sgwmakrp5r2qv66x56rfr9t30halv4vfs283f6e') \ 114 | =='a202a7fd3cb895d6300eef35043b7db0c341a80cd68d4d0d232ae2fbf7ecab13'.decode('hex') #2of2 electrum 115 | assert addr2pkh('bc1qs5vep8zczr6rfskq3euz44zjnv05zmhkp84jhkufufsdy2ygfr7qr8x759') \ 116 | =='8519909c5810f434c2c08e782ad4529b1f416ef609eb2bdb89e260d2288848fc'.decode('hex') #2of2 electrum 117 | #bech32, future versions from bip173 spec. 118 | assert addr2pkh('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx') \ 119 | == '751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6'.decode('hex') # witver 0x51 120 | assert addr2pkh('BC1SW50QA3JX3S') == '751e'.decode('hex') # witver 0x60 121 | assert addr2pkh('bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj') =='751e76e8199196d454941c45d1b3a323'.decode('hex') # witver 0x52 122 | 123 | def test_mkaddr(): 124 | #p2pkh 125 | assert mkaddr('0c2f3eb0fa5f65269236658bc361187dfaa964bb'.decode('hex')) == '127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i' 126 | assert mkaddr('bf362d4dda191483e789ccf3059d6447cd64bb9c'.decode('hex')) == '1JS2xvSfG2hD3rnMGd3xxEeYSoBs8r7eKh' 127 | assert mkaddr('870a76dd469ab77084229a61984db634abaafb8b'.decode('hex')) == '1DK2kyHNMUx8XoWZm9t2GWqJGzqBNxUYuv' 128 | #p2sh 129 | assert mkaddr('1c6426545908803de2a4ed61caf805ccc282900f'.decode('hex'),p2sh=True) == '34H8pSTwFNEngG5xfadqctdQykcGgRmSgf' 130 | assert mkaddr('c161e4848786150e2add1a93f084fa94a7259b97'.decode('hex'),p2sh=True) == '3KKXcGTmxvedJr9GrzWayA8GVnS5AXm8tj' 131 | assert mkaddr('4df2e66aeb640a642c8476185f63e433ad074220'.decode('hex'),p2sh=True) == '38oAwJnDWRTWf1GUg7FJ112bjVRoMjvCmV' 132 | #p2wpkh-p2sh 133 | assert mkaddr('924b50fdfc0e0afab1b1d12acae31c3b4a215154'.decode('hex'),p2sh=True) == '3F2YodB6PAzbov1rAkYVMNu6KBB1g9AHrG' 134 | assert mkaddr('32eaeff4e7e856e74dcf0926724d04324320eb75'.decode('hex'),p2sh=True) == '36LF9sFUJQAzGgxKtrVFDcXqmTF9yyVeow' 135 | assert mkaddr('271c19a61825788201434354d2a3a6b03d23e316'.decode('hex'),p2sh=True) == '35FowTfm9qpeKGX9VQuuSrcgDiBd9SczAi' 136 | #p2wsh-p2sh - unavailble 137 | #p2wpkh 138 | assert mkaddr('751e76e8199196d454941c45d1b3a323f1433bd6'.decode('hex'),bech32=True) == 'BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4'.lower() 139 | assert mkaddr('a7c47325325d7a2b0b75bff2dad00146395d2ef7'.decode('hex'),bech32=True) == 'bc1q5lz8xffjt4azkzm4hled45qpgcu46thhl6j7vm' 140 | assert mkaddr('7906d7156a0932c2d649c9005deebd7c3696c1c9'.decode('hex'),bech32=True) == 'bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t' 141 | #p2wsh 142 | assert mkaddr('df938440bc4235a1ba8b429de811fb7f52b5ce7cc0e85be361e9fd0c89bed159'.decode('hex'),bech32=True) \ 143 | == 'bc1qm7fcgs9ugg66rw5tg2w7sy0m0afttnnucr59hcmpa87sezd769vsac7pmy' 144 | assert mkaddr('a202a7fd3cb895d6300eef35043b7db0c341a80cd68d4d0d232ae2fbf7ecab13'.decode('hex'),bech32=True) \ 145 | == 'bc1q5gp20lfuhz2avvqwau6sgwmakrp5r2qv66x56rfr9t30halv4vfs283f6e' 146 | assert mkaddr('8519909c5810f434c2c08e782ad4529b1f416ef609eb2bdb89e260d2288848fc'.decode('hex'),bech32=True) \ 147 | == 'bc1qs5vep8zczr6rfskq3euz44zjnv05zmhkp84jhkufufsdy2ygfr7qr8x759' 148 | 149 | def test_addr2id(): 150 | assert addr2id('127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i') & ADDR_ID_FLAGS == 0 151 | assert addr2id('34H8pSTwFNEngG5xfadqctdQykcGgRmSgf') & ADDR_ID_FLAGS == P2SH_FLAG 152 | assert addr2id('3Pux8TuPxZHm7RsBvAP9zjkF3jCcw9K7wL') & ADDR_ID_FLAGS == P2SH_FLAG 153 | assert addr2id('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4') & ADDR_ID_FLAGS == BECH32_FLAG 154 | assert addr2id('bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t') & ADDR_ID_FLAGS == BECH32_FLAG 155 | assert addr2id('bc1qm7fcgs9ugg66rw5tg2w7sy0m0afttnnucr59hcmpa87sezd769vsac7pmy') & ADDR_ID_FLAGS == BECH32_LONG 156 | assert addr2id('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx') & ADDR_ID_FLAGS == BECH32_LONG 157 | 158 | assert addr2id('127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i', rtnPKH=True) == (369302191541,'0c2f3eb0fa5f65269236658bc361187dfaa964bb'.decode('hex')) 159 | assert addr2id('34H8pSTwFNEngG5xfadqctdQykcGgRmSgf', rtnPKH=True) == (1260639692375,'1c6426545908803de2a4ed61caf805ccc282900f'.decode('hex')) 160 | assert addr2id('3Pux8TuPxZHm7RsBvAP9zjkF3jCcw9K7wL', rtnPKH=True) == (1905635504253,'f3c501dd6b3086911f7b9e7eea0dade0de025287'.decode('hex')) 161 | assert addr2id('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4', rtnPKH=True) \ 162 | == (2239906766591,'751e76e8199196d454941c45d1b3a323f1433bd6'.decode('hex')) 163 | assert addr2id('bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t', rtnPKH=True) \ 164 | == (2322962910768,'7906d7156a0932c2d649c9005deebd7c3696c1c9'.decode('hex')) 165 | assert addr2id('bc1qm7fcgs9ugg66rw5tg2w7sy0m0afttnnucr59hcmpa87sezd769vsac7pmy', rtnPKH=True) \ 166 | == (3310624892327,'df938440bc4235a1ba8b429de811fb7f52b5ce7cc0e85be361e9fd0c89bed159'.decode('hex')) 167 | assert addr2id('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx', rtnPKH=True) \ 168 | == (4041402476188,'751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6'.decode('hex')) 169 | 170 | data = [ 171 | ['76a9140c2f3eb0fa5f65269236658bc361187dfaa964bb88ac','p2pkh','','127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i', 172 | 'OP_DUP OP_HASH160 0c2f3eb0fa5f65269236658bc361187dfaa964bb OP_EQUALVERIFY OP_CHECKSIG'], # p2pkh 173 | 174 | ['a9141c6426545908803de2a4ed61caf805ccc282900f87','p2sh','','34H8pSTwFNEngG5xfadqctdQykcGgRmSgf', 175 | 'OP_HASH160 1c6426545908803de2a4ed61caf805ccc282900f OP_EQUAL'], # p2sh 176 | 177 | ['210298d26fa24aca4b1fdf7bc0d73bf875c3e10b198fb47de414cff39c7229dbacc6AC','p2pk', 178 | '210298d26fa24aca4b1fdf7bc0d73bf875c3e10b198fb47de414cff39c7229dbacc6AC','1G7AYiSCXMKyVeSVcPUe8PqgfygiqxZyeX', 179 | '0298d26fa24aca4b1fdf7bc0d73bf875c3e10b198fb47de414cff39c7229dbacc6 OP_CHECKSIG'], # p2pk compressed 180 | 181 | ['4104E9A095A6A5790BC82FEADE07EE6FC77B05BC4DE7F3790C36D2ECC886D9EC0AC0E44402759C51ED0D3BA2F53E749B30A6D1772F0DAE1E3F465E8C8828DF899FE2AC','p2pk', 182 | '4104E9A095A6A5790BC82FEADE07EE6FC77B05BC4DE7F3790C36D2ECC886D9EC0AC0E44402759C51ED0D3BA2F53E749B30A6D1772F0DAE1E3F465E8C8828DF899FE2AC', 183 | '1JGTdegLcK8N9mqwhXmGjeUgbQNugii3rm', # p2pk uncompressed 184 | '04e9a095a6a5790bc82feade07ee6fc77b05bc4de7f3790c36d2ecc886d9ec0ac0e44402759c51ed0d3ba2f53e749b30a6d1772f0dae1e3f465e8c8828df899fe2 OP_CHECKSIG'], 185 | 186 | ['a914924b50fdfc0e0afab1b1d12acae31c3b4a21515487','p2sh','','3F2YodB6PAzbov1rAkYVMNu6KBB1g9AHrG', 187 | 'OP_HASH160 924b50fdfc0e0afab1b1d12acae31c3b4a215154 OP_EQUAL'], # p2sh(p2wpkh) 188 | 189 | ['0014a7c47325325d7a2b0b75bff2dad00146395d2ef7','p2wpkh','','bc1q5lz8xffjt4azkzm4hled45qpgcu46thhl6j7vm', 190 | 'OP_0 a7c47325325d7a2b0b75bff2dad00146395d2ef7'], # p2wpkh 191 | 192 | ['0020a202a7fd3cb895d6300eef35043b7db0c341a80cd68d4d0d232ae2fbf7ecab13','p2wsh','', 193 | 'bc1q5gp20lfuhz2avvqwau6sgwmakrp5r2qv66x56rfr9t30halv4vfs283f6e', 194 | 'OP_0 a202a7fd3cb895d6300eef35043b7db0c341a80cd68d4d0d232ae2fbf7ecab13'], # p2wsh 195 | 196 | ['5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6','other', 197 | '5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6', # future bech32, witver 0x51 198 | 199 | 'bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx', 200 | 'OP_1 751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6'], 201 | ['6002751e','other','6002751e','BC1SW50QA3JX3S','OP_16 751e'] # future bech32, witver 0x60 202 | ] 203 | 204 | def test_decodeScriptPK(): 205 | for row in data: 206 | r = decodeScriptPK(row[0].decode('hex')) 207 | assert r['type'] == row[1] 208 | assert r['data'].encode('hex').lower() == row[2].lower() 209 | if 'addr' in r: 210 | assert r['addr'] == row[3] 211 | 212 | def test_mkOpCodeStr(): 213 | for row in data: 214 | assert mkOpCodeStr(row[0].decode('hex'), sepPUSH=' ') == row[4] 215 | 216 | def test_VarInt(): 217 | values = [ (1, [ 0,1,2,55,192,192,234,252]), 218 | (3, [ 253, 255,256,257,4000,16500,47654,2**16-1]), 219 | (5, [2**16,2**16+1,2**16+2,2*2**16,2**24,2**32-1]), 220 | (9, [2**32,2**32+1,2**32+2,2**40+234,2**42,2**44+2**24-5,2**48 ]) ] 221 | for (L,grp) in values: 222 | for N in grp: 223 | assert decodeVarInt(encodeVarInt(N)) == ( N,L ) 224 | 225 | def test_insertAddress(testdb, monkeypatch): 226 | addrs = [ '1FomKJy8btcmuyKBFXeZmje94ibnQxfDEf','1EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh','1MBxxUgVF27trqqBMnoz8Rr7QATEoz1u2Y', 227 | '1EWpTBe9rE27NT9b1qg8Zsc643bCFCEdbh','3EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh','3De5zB9JKmwU4zP85EEazYS3MEDVXSmvvm', 228 | '3MixsgkBB8NBQe5GAxEj4eGx5YPxvbaSk9','3HQR7C1Ag53BoaxKDeaA97wTh9bpGuUpgg','2MixsgkBB8NBQe5GAxEj4eGx5YPxvbaSk9', 229 | 'bc1q5lz8xffjt4azkzm4hled45qpgcu46thhl6j7vm','bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t', 230 | '1EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh', # duplicate, should not add row 231 | 'bc1q5gp20lfuhz2avvqwau6sgwmakrp5r2qv66x56rfr9t30halv4vfs283f6e' ] # bech32 table, should not add row 232 | 233 | def fake_id(addr, cur=None, rtnPKH=False): # forces collisions by always returning same id 234 | x = monkeypatch._setattr[0][2](addr, cur, rtnPKH) 235 | return ((x[0]&ADDR_ID_FLAGS)|123456,x[1]) if isinstance(x, tuple) else (x&ADDR_ID_FLAGS)|123456 # keep flags 236 | monkeypatch.setattr("sqlchain.util.addr2id", fake_id) 237 | 238 | for addr in addrs: 239 | insertAddress(testdb, addr) 240 | testdb.execute("select count(1) from address where id !=0;") 241 | assert testdb.fetchone()[0] == 11 # 13 minus 2 addresses not inserted 242 | 243 | def test_findTx(testdb): 244 | trxs = [] 245 | tx1 = bytearray(os.urandom(32)) 246 | for x in range(16): 247 | tx1[-1] = chr((int(tx1[-1])+x)&0xFF) # use sequential tx hashes to test collisions 248 | tid,new = findTx(testdb, tx1, True) 249 | testdb.execute("insert ignore into trxs (id,hash,ins,outs,txsize) values (%s,%s,0,0,0);", (tid,tx1)) 250 | trxs.append(tid) 251 | assert len(set(trxs)) == len(trxs) # all ids should be unique 252 | 253 | for x in range(1000): 254 | tx1 = os.urandom(32) 255 | assert txh2id(tx1) == unpack('50 | 51 | sqlChain is a compact SQL layer that runs on top of bitcoind. It extends the query options on the 52 | blockchain with a priority towards lower storage demands. 53 | This demonstration server provides multiple API (compatible) interfaces:74 | 75 |
54 |55 |
60 | The Demo API page above documents the queries supported and differences to the original sites.- Insight API (with some extensions)
56 |- Blockchain.info API (with WebSocket)
57 |- RPC via POST, GET urls
58 |- Electrum server
59 |
61 | Three daemon programs are provided:
62 |63 |
67 | Using sqlChain over a pruning node as a compact alternative, a web socket interface, and other ideas, are currently under development. 68 | Status and blockchain analysis information below is updated periodically from this server. The open source Python code is 69 | available in the neoCogent Github and please check out 70 | my neoCogent blog. 71 | This information is provided by the /api/status/db/html call. 72 | 73 |- sqlchaind updates the mysql backend from bitcoind.
64 |- sqlchain-api provides an the API and web interfaces for querying the database.
65 |- sqlchain-electrum adds a layer over sqlchain-api supporting Electrum clients.
66 |76 |167 | 168 |77 | 87 |88 | 89 |90 |
111 | 112 |- /api/block/0000000000001271efd5d9f7e539909160a181b2c0a2b8c164d6f8159e5c7dd9
91 |- /api/block-index/123432
92 |- /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56
93 |- /api/rawtx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56
94 |- /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH?noTxList=1
95 |Spelling mistake on txApperances is maintained for compatibility.96 |- /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/balance
97 |- /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/totalReceived
98 |- /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/totalSent
99 |- /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/unconfirmedBalance
100 |- /api/addr/1FvzCLoTPGANNjWoUo6jUGuAG3wg1w4YjR/utxo
101 |- /api/addrs/1FvzCLoTPGANNjWoUo6jUGuAG3wg1w4YjR,1CmTtsKEqPxZsW3YjGYXbPSY89xrzkhy94/utxo
102 |Also supports POST at '/api/addrs/utxo' with param 'addrs'.103 |- /api/addrs/17pfg6L3hT1ZPBASPt7DCQZfy9jWeMGq1W,1CmTtsKEqPxZsW3YjGYXbPSY89xrzkhy94/txs?from=0&to=1
104 |Also supports POST at '/api/addrs/txs' with params 'addrs,from,to'.105 |- /api/txs?block=0000000000001271efd5d9f7e539909160a181b2c0a2b8c164d6f8159e5c7dd9
106 |- /api/txs?address=1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH
107 |- /api/utils/estimatefee?nbBlocks=2
108 |- /api/tx/send
109 |Send raw transaction by POST method with param 'rawtx'.110 |113 |
124 | 125 | This api also available via POST with [method,params] args. 126 |- /bci/block-height/123432
114 |...115 |- /bci/rawblock/0000000000001271efd5d9f7e539909160a181b2c0a2b8c164d6f8159e5c7dd9
116 |- /bci/rawtx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56?format=hex
117 |- /bci/address/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH
118 |- /bci/unspent/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH
119 |- /bci/latestblock
120 |- /bci/q/getblockcount
121 |- ws://api-host/ws
122 |Blockchain.info compatible websocket interface123 |127 |
137 | Extensions to support sqlchain-electrum server. 138 |- /rpc/getinfo
128 |- /rpc/getdifficulty
129 |- /rpc/getblock/0000000000001271efd5d9f7e539909160a181b2c0a2b8c164d6f8159e5c7dd9
130 |- /rpc/getblockhash/123432
131 |- /rpc/getblockcount
132 |- /rpc/getrawtransaction/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56
133 |- /rpc/gettxout/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/1
134 |- /rpc/getmempoolinfo
135 |- /rpc/getrawmempool
136 |139 |
150 | Extensions, and status / debugging. 151 |- /api/history/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH
140 |- /api/history/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/status
141 |- /api/history/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/balance
142 |- /api/history/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/uncfmd
143 |- /api/history/1FvzCLoTPGANNjWoUo6jUGuAG3wg1w4YjR/utxo
144 |- /api/block-index/167324/electrum
145 |- /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/output/1
146 |- /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/raw
147 |- /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/addrs
148 |- /api/merkle/2acba2c6916cdfdbf3584dfdd32534af5031ab076029ff275167fa5181dee0a8
149 |152 |
165 | 166 |- /api/auto/123432
153 |Auto detect value and return appropriate data.155 |
Supports 4 modes: block-index, block hash, address, tx hash.
154 | Also available as POST method with 'data' parameter, as demonstrated above with "Find".- /api/closure/1M8s2S5bgAzSSzVTeL7zruvMPLvzSkEAuv
156 |Compute the closure of an address. That is, the addresses likely to be owned by the same entity.157 |- /api/status
158 |- /api/status/db/html
159 |Returns database information like row counts, disk space used.162 |
Advanced info like multi-sig address count, 160 | id collision counts, non-std and unspent output counts are updated periodically due to slow query time. An html 161 | version returns a table that can be used in web pages with styling.- /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/html
163 |An html table formatted version of raw transaction data with hex values and op codes.164 |169 | 170 | It's easy to create a web app using sqlChain on an Ubuntu server.188 | 189 |
See the full install guide on github, but briefly: 171 |172 |
187 |- Install base packages:
174 | sudo apt install software-properties-common python-software-properties libev-dev libevent-dev 175 | sudo apt install mariadb-server libmysqlclient20 176 | sudo apt install bitcoind libmysqlclient-dev python-pip python-dev build-essential 177 |
173 | # may not need but won't hurt
sudo apt-get install python-software-properties libev-dev libevent-dev- Install sqlchain from PyPi:
179 |
178 | sudo pip install sqlchain- Run the init script to setup the mysql/maria db, users and config files:
181 |
180 | sudo sqlchain-init- Start the daemons, as needed:
183 |
182 | sudo systemctl start bitcoin
sudo systemctl start sqlchain
sudo systemctl start sqlchain-api- Modify the /var/www source files as your own app.
184 |- Scale to serve the world:
186 |
185 | run nginx as front end and mysql replication behind.190 | 191 | Huh, What support?201 | 204 |
192 | More seriously, I put a lot of work into creating this. If you want to support continued effort by donating 193 | that would be cool. Visit my blog donation page.
194 | I do freelance programming work, and I'm available to build on this commericially for you or work on other Bitcoin related 195 | projects. My rates are very reasonable, given my expertise, because I live in a downright life-is-cheap country. 196 | So if donating rubs you the wrong way then you can always hire me.
197 | Give [Vultr.com](http://www.vultr.com/?ref=7087266) a try out. I've been very happy with them for development and testing sqlchain. 198 | Use my referral link and I get funded for my testing without costing you a penny extra. You can start up and run VPS servers by the 199 | hour using a simple control panel and in my tests they're been both faster and cheaper than Amazon AWS. 200 |> 3 # test 1000 randoms hashes match, just for heck of it 256 | -------------------------------------------------------------------------------- /sqlchaind: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # sqlchaind - daemon to update sql blockchain db 4 | # 5 | # pylint:disable=no-member 6 | from Queue import Queue, Empty 7 | from datetime import datetime 8 | from struct import pack, unpack 9 | 10 | import os, sys, socket, getopt, time, signal, threading, daemon 11 | import MySQLdb as db 12 | 13 | from sqlchain.version import version, coincfg, BLKDAT_NEAR_SYNC, BLKDAT_MAGIC, MAX_IO_TX, MAX_TX_BLK 14 | from sqlchain.util import dotdict, sqlchain_overlay, loadcfg, savecfg, drop2user, rpcPool, blockwork, int2bin32, log, logts 15 | from sqlchain.util import encodeVarInt, decodeBlock, decodeTx, findTx, insertAddress, mkBlobHdr, insertBlob, puthdr, gethdr 16 | from sqlchain.blkdat import BlkDatHandler 17 | 18 | __builtins__.sqc = dotdict() # container for super globals 19 | 20 | sqc.cfg = { 'log':sys.argv[0]+'.log', 'queue':8, 'no-sigs':False, 'db':'', 'rpc':'', 'path':'/var/data/sqlchain', 'cointype':'bitcoin' } 21 | sqc.bestblk = 120000 22 | sqc.zmq = True 23 | blksecs = [] 24 | memPool = set() 25 | 26 | def getBlocks(blk): 27 | sql = db.connect(*sqc.cfg['db'].split(':')) 28 | sql.autocommit(True) # only mempool, most data inserted in handlers 29 | cur = sql.cursor() 30 | cur.execute("show tables like 'bech32';") # test if old db version and abort with log msg 31 | if cur.rowcount == 0: 32 | log("sqlChain Database upgrade required for this daemon version.\nCannot continue. Run sqlchain-upgrade-db.") 33 | sqc.done.set() 34 | return 0 35 | blkinfo = sqc.rpc.getblockchaininfo() # wait for node to be ready 36 | if blkinfo is None: 37 | return 0 38 | if blk == 0: 39 | cur.execute('select ifnull(max(id), -1) from blocks') 40 | blk = int(cur.fetchone()[0] + 1) 41 | cur.execute("select hex(chainwork) from blocks where id=%s;", (blk-1,)) 42 | row = cur.fetchone() 43 | chainwork = int(row[0],16) if row is not None else 0 44 | if blk == 0 and 'max_blks' in sqc.cfg: 45 | blk = blkinfo['blocks'] - sqc.cfg['max_blks'] 46 | log("Using block limit %d" % sqc.cfg['max_blks']) 47 | blkhash = sqc.rpc.getblockhash(blk) 48 | chainwork = int(sqc.rpc.getblockheader(blkhash)['chainwork'],16) 49 | else: 50 | blkhash = sqc.rpc.getblockhash(blk) 51 | chainwork = int(sqc.rpc.getblockheader(blkhash)['chainwork'],16) 52 | startblk = blk 53 | 54 | sqc.bestblk = blkinfo['headers'] if 'headers' in blkinfo else 0 55 | if 'blkdat' in sqc.cfg and (sqc.bestblk - startblk) > coincfg(BLKDAT_NEAR_SYNC): 56 | blk = getBlocksDirect(cur, blk, chainwork) # use direct file access for catch up 57 | if not sqc.done.isSet(): 58 | log("Using rpc mode. Monitoring blocks / mempool on " + sqc.cfg['cointype']) 59 | poll_delay = 0.05 60 | while not sqc.done.isSet(): 61 | blkinfo = sqc.rpc.getblockchaininfo() 62 | if blkinfo is None or blk > blkinfo['blocks']: 63 | if sqc.zmq and 'zmq' in sqc.cfg: 64 | blk = getBlocksZMQ(cur, blk, chainwork) # try to upgrade to ZMQ, more efficient 65 | else: 66 | checkMemPool(cur) 67 | time.sleep(5) 68 | continue 69 | if blockQ.qsize() >= sqc.cfg['queue']: 70 | time.sleep(min(poll_delay,5)) 71 | poll_delay *= 2 72 | continue 73 | poll_delay = 0.05 74 | if 'pruned' in blkinfo and blkinfo['pruned']: 75 | chkPruning(blk - blockQ.qsize()) 76 | rpcstart = time.time() 77 | blkhash = sqc.rpc.getblockhash(blk) 78 | if blkhash is not None: 79 | data = decodeBlock(sqc.rpc.getblock(blkhash, False).decode('hex')) 80 | data['height'] = blk 81 | chainwork += blockwork(data['bits']) 82 | data['chainwork'] = int2bin32(chainwork) 83 | data['rpc'] = time.time()-rpcstart 84 | blockQ.put(data) 85 | blk += 1 86 | return blk - startblk 87 | 88 | def getBlocksDirect(cur, blk, chainwork): 89 | blkscan = threading.Thread(target = BlkDatHandler, args=(True,)) 90 | blkscan.start() 91 | idle_count = 0 92 | log("Using blkdat mode: %s" % sqc.cfg['blkdat']) 93 | while not sqc.done.isSet(): 94 | if blockQ.qsize() >= sqc.cfg['queue']: 95 | time.sleep(0.01) 96 | continue 97 | chkPruning(blk - blockQ.qsize()) 98 | if (sqc.bestblk - blk) < coincfg(BLKDAT_NEAR_SYNC): 99 | logts("Near sync %d. Aborting direct mode" % blk) 100 | return blk 101 | if idle_count >= 60: 102 | logts("No blkdat activity, 3 minutes. Aborting direct mode") 103 | return blk 104 | cur.execute("select filenum,filepos from blkdat where id=%s limit 1;", (blk,)) 105 | row = cur.fetchone() 106 | if row: 107 | filenum,pos = row 108 | started = time.time() 109 | with open(sqc.cfg['blkdat'] + "/blocks/blk%05d.dat" % filenum, 'rb') as fd: 110 | fd.seek(pos) 111 | magic,blksize = unpack('120000 and blk % 100 == 0: 170 | blkinfo = sqc.rpc.getblockchaininfo() 171 | if blkinfo is not None and blkinfo['pruned']: 172 | keep = 20 if not 'prune' in sqc.cfg else max(sqc.cfg['prune'], 20) 173 | sqc.rpc.pruneblockchain(blk-keep) # keep at least 20 blocks for reorgs but can now config higher 174 | sqc.bestblk = blkinfo['headers'] 175 | 176 | def limitBlocks(cur, max_blks): 177 | cur.execute("select id from blocks order by id desc limit %s, 1", (max_blks,)) 178 | row = cur.fetchone() 179 | if row: 180 | blkid = row[0] 181 | cur.execute("select id from trxs where block_id < %s", ((blkid+1)*MAX_TX_BLK,)) 182 | txids = [ txid for txid, in cur ] 183 | for txid in txids: 184 | cur.execute("delete from outputs where id >= %s*{0} and id < %s*{0}".format(MAX_IO_TX), (txid,txid)) 185 | cur.execute("delete from trxs where id=%s", (txid,)) 186 | cur.execute("delete from blocks where id<=%s", (blkid,)) 187 | 188 | def BlockHandler(): 189 | sql = db.connect(*sqc.cfg['db'].split(':')) 190 | cur = sql.cursor() 191 | while not sqc.done.isSet(): 192 | try: 193 | insertBlock(cur, blockQ.get(True, 5)) 194 | sql.commit() 195 | if 'max_blks' in sqc.cfg: 196 | limitBlocks(cur, sqc.cfg['max_blks']) 197 | sql.commit() 198 | except Empty: 199 | pass 200 | 201 | def OutputHandler(): 202 | sql = db.connect(*sqc.cfg['db'].split(':')) 203 | cur = sql.cursor() 204 | cur.execute("select count(*) from mempool;") 205 | poolcnt = cur.fetchone()[0] 206 | ins,outs = [],[] 207 | while True: 208 | try: 209 | xo,xi = outQ.get(True, 3) 210 | sqc.flushed.clear() 211 | outs.extend(xo) 212 | ins.extend(xi) 213 | if len(outs) + len(ins) > (8192 if not poolcnt else 0): 214 | cur.executemany("insert ignore into outputs (id,addr_id,value) values(%s,%s,%s)", outs) 215 | cur.executemany("update outputs set tx_id=%s where id=%s limit 1", ins) 216 | sql.commit() 217 | ins,outs = [],[] 218 | except Empty: 219 | if len(outs) > 0 or len(ins) > 0: 220 | cur.executemany("insert ignore into outputs (id,addr_id,value) values(%s,%s,%s)", outs) 221 | cur.executemany("update outputs set tx_id=%s where id=%s limit 1", ins) 222 | sql.commit() 223 | sqc.flushed.set() 224 | if sqc.alldone.isSet(): 225 | print "Flushed outQ - outs %d - ins %d" % (len(outs), len(ins)) 226 | return 227 | ins,outs = [],[] 228 | 229 | def checkMemPool(cur): 230 | cur.execute("select ifnull(max(sync_id),0) from mempool;") 231 | sync_id = cur.fetchone()[0] 232 | if len(memPool) == 0: 233 | cur.execute("delete from mempool;") 234 | trxs = sqc.rpc.getrawmempool() 235 | if trxs is not None: 236 | for tx in trxs: 237 | txx = tx.decode('hex')[::-1][:8] # uses 1/4 space, only for detecting changes in mempool 238 | if txx not in memPool: 239 | rawtx = sqc.rpc.getrawtransaction(tx,0) 240 | if rawtx is not None: 241 | insertTxMemPool(cur, decodeTx(rawtx.decode('hex')), sync_id+1) 242 | memPool.add(txx) 243 | 244 | def addOrphan(cur, height): 245 | cur.execute("select ifnull(max(sync_id),0) from mempool;") 246 | sync_id = cur.fetchone()[0] 247 | hdr = gethdr(height, sqc.cfg, 'raw') 248 | cur.execute("select hash,coinbase from blocks where id=%s limit 1;", (height,)) 249 | for blkhash,coinbase in cur: 250 | cur.execute("insert into orphans (sync_id,block_id,hash,hdr,coinbase) values(%s,%s,%s,%s,%s);", (sync_id,height,blkhash,hdr,coinbase)) 251 | 252 | def checkReOrg(cur, data): 253 | if data['height'] == 0: 254 | return 255 | blkhash,height = data['previousblockhash'],data['height']-1 256 | while True: 257 | cur.execute("select id from blocks where hash=%s limit 1;", (blkhash,)) 258 | row = cur.fetchone() 259 | if row is None: 260 | log("No previous block %d - ok if first run, ReOrg aborted" % height) 261 | return 262 | if row and row[0] == height: # rewind until block in good chain 263 | break 264 | height -= 1 265 | blkhash = sqc.rpc.getblockhash(height).decode('hex')[::-1] 266 | if blkhash is None: 267 | log("Rewind failure during ReOrg - Check manually.") 268 | return 269 | height += 1 270 | if height < data['height']: 271 | sqc.flushed.wait() # make sure all outputs committed before we start re-org 272 | cur.execute("update trxs set block_id=-1 where block_id >= %s;", (height*MAX_TX_BLK,)) # set bad chain txs uncfmd 273 | logts("Block %d *** ReOrg: %d orphan(s), %d txs affected" % (height, data['height']-height, cur.rowcount)) 274 | doReOrg(cur, data, height) 275 | 276 | def doReOrg(cur, data, height): 277 | while height < data['height']: 278 | blkhash = sqc.rpc.getblockhash(height) 279 | if blkhash is None: 280 | log("Block failure during ReOrg - Check manually.") 281 | return 282 | data = decodeBlock(sqc.rpc.getblock(blkhash, False).decode('hex')) # get good chain blocks 283 | if data: 284 | for n,tx in enumerate(data['tx']): 285 | tx_id,found = findTx(cur, tx['txid'], mkNew=True) 286 | if found: 287 | cur.execute("update trxs set block_id=%s where id=%s limit 1;", (height*MAX_TX_BLK+n, tx_id)) 288 | else: 289 | insertTx(cur, tx, tx_id, height*MAX_TX_BLK + n) # occurs if tx wasn't in our mempool or orphan block 290 | addOrphan(cur, height) 291 | data['chainwork'] = sqc.rpc.getblockheader(data['hash'][::-1].encode('hex'))['chainwork'].decode('hex') 292 | cur.execute("update blocks set hash=%s,coinbase=%s,chainwork=%s,blksize=%s where id=%s;", (data['hash'], data['coinbase'], data['chainwork'], data['size'], height)) 293 | puthdr(data['height'], sqc.cfg, data['hdr']) 294 | height += 1 295 | 296 | def insertTxMemPool(cur, tx, sync_id): 297 | tx_id,found = findTx(cur, tx['txid'], mkNew=True) 298 | if not found: 299 | insertTx(cur, tx, tx_id, -1) # -1 means trx has no block 300 | cur.execute("insert ignore into mempool (id,sync_id) values(%s,%s);", (tx_id, sync_id)) 301 | 302 | def insertTx(cur, tx, tx_id, blk_id): # pylint:disable=too-many-locals 303 | inlist,outlist = [],[] 304 | in_ids,txdata = '','' 305 | tx['stdSeq'] = True 306 | for vin in tx['vin']: 307 | if vin['sequence'] != 0xffffffff: 308 | tx['stdSeq'] = False 309 | break 310 | for vin in tx['vin']: 311 | if 'coinbase' not in vin: 312 | in_id = findTx(cur, vin['txid']) 313 | if in_id and vin['vout'] < MAX_IO_TX: 314 | in_id = (in_id*MAX_IO_TX) + vin['vout'] 315 | inlist.append(( tx_id, in_id )) 316 | in_ids += pack(' 0: 346 | continue 347 | insertTx(cur, tx, tx_id, blk_id + n) 348 | 349 | cur.execute("insert ignore into blocks (id,hash,coinbase,chainwork,blksize) values (%s,%s,%s,%s,%s);", (data['height'], data['hash'], data['coinbase'], data['chainwork'], data['size'])) 350 | puthdr(data['height'], sqc.cfg, data['hdr']) 351 | 352 | blktime = time.time() - blkstart 353 | log("Block %d [ Q:%d %4d txs - %s - %3.0fms %2.1fs %3.0f tx/s]" % ( data['height'], blockQ.qsize(), 354 | len(data['tx']), datetime.fromtimestamp(data['time']).strftime('%d-%m-%Y'), data['rpc']*1000, blktime, len(data['tx'])/blktime) ) 355 | 356 | blksecs.append(blktime) 357 | if len(blksecs) > 18: # ~3 hour moving avg 358 | del blksecs[0] 359 | cur.execute("replace into info (class,`key`,value) value('info','avg-block-sync',%s);", ("%2.1f"%(sum(blksecs)/len(blksecs)), )) 360 | 361 | def options(cfg): # pylint:disable=too-many-branches 362 | try: 363 | opts,_ = getopt.getopt(sys.argv[1:], "hvd:l:r:w:p:q:u:b:f:", 364 | ["help", "version", "debug", "db=", "log=", "rpc=", "path=", "queue=", "user=", "block=", "blkdat=", "no-sigs", "defaults" ]) 365 | except getopt.GetoptError: 366 | usage() 367 | for opt,arg in opts: 368 | if opt in ("-h", "--help"): 369 | usage() 370 | elif opt in ("-v", "--version"): 371 | sys.exit(sys.argv[0]+': '+version) 372 | elif opt in ("-d", "--db"): 373 | cfg['db'] = arg 374 | elif opt in ("-l", "--log"): 375 | cfg['log'] = arg 376 | elif opt in ("-r", "--rpc"): 377 | cfg['rpc'] = arg 378 | elif opt in ("-p", "--path"): 379 | cfg['path'] = arg 380 | elif opt in ("-q", "--queue"): 381 | cfg['queue'] = int(arg) 382 | elif opt in ("-u", "--user"): 383 | cfg['user'] = arg 384 | elif opt in "--no-sigs": 385 | cfg['no-sigs'] = True 386 | elif opt in "--defaults": 387 | savecfg(cfg) 388 | sys.exit("%s updated" % (sys.argv[0]+'.cfg')) 389 | elif opt in ("-b", "--block"): 390 | cfg['block'] = int(arg) 391 | elif opt in "--debug": 392 | cfg['debug'] = True 393 | elif opt in ("-f", "--blkdat"): 394 | cfg['blkdat'] = arg 395 | 396 | def usage(): 397 | print """Usage: {0} [options...][cfg file]\nCommand options are:\n-h,--help\tShow this help info\n-v,--version\tShow version info 398 | -b,--block\tStart at block number (instead of from last block done) 399 | -f,--blkdat\tSet path to block data and use direct file access (no mempool/re-org) 400 | --debug\t\tRun in foreground with logging to console 401 | --defaults\tUpdate cfg and exit\nDefault files are {0}.cfg, {0}.log 402 | \nThese options get saved in cfg file as defaults. 403 | -p,--path\tSet path for blob and block header data file (/var/data/sqlchain) 404 | -q,--queue\tSet block queue size (8)\n-u,--user\tSet user to run as\n-d,--db \tSet mysql db connection, "host:user:pwd:dbname" 405 | -l,--log\tSet log file path\n-r,--rpc\tSet rpc connection, "http://user:pwd@host:port" 406 | --no-sigs\tDo not store input sigScript data """.format(sys.argv[0]) 407 | sys.exit(2) 408 | 409 | def sigterm_handler(_signo, _stack_frame): 410 | sqc.done.set() 411 | def sighup_handler(_signo, _stack_frame): 412 | path = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 413 | sys.stdout.close() 414 | sys.stdout=open(path,'a') 415 | sys.stderr.close() 416 | sys.stderr=open(path,'a') 417 | logts("SIGHUP Log reopened") 418 | 419 | def run(): 420 | sqc.done = threading.Event() 421 | sqc.alldone = threading.Event() 422 | sqc.flushed = threading.Event() 423 | 424 | sqlchain_overlay(sqc.cfg['cointype']) 425 | 426 | blkwrk = threading.Thread(target = BlockHandler) 427 | blkwrk.start() 428 | outwrk = threading.Thread(target = OutputHandler) 429 | outwrk.start() 430 | 431 | blksdone = None 432 | workstart = time.time() 433 | while not sqc.done.isSet(): 434 | try: 435 | blksdone = getBlocks(sqc.cfg['block'] if 'block' in sqc.cfg else 0) 436 | break 437 | except socket.error: 438 | log("Cannot connect to rpc") 439 | time.sleep(5) 440 | 441 | sqc.done.set() 442 | blkwrk.join() 443 | sqc.alldone.set() 444 | outwrk.join() 445 | if blksdone: 446 | log("Session %d blocks, %.2f blocks/s" % (blksdone, float(blksdone / (time.time() - workstart))) ) 447 | 448 | if __name__ == '__main__': 449 | 450 | loadcfg(sqc.cfg) 451 | options(sqc.cfg) 452 | drop2user(sqc.cfg) 453 | 454 | sqc.rpc = rpcPool(sqc.cfg) 455 | blockQ = Queue() 456 | outQ = Queue(64) 457 | 458 | if sqc.cfg['debug']: 459 | signal.signal(signal.SIGINT, sigterm_handler) 460 | run() 461 | else: 462 | logpath = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 463 | pidpath = sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid' 464 | with daemon.DaemonContext(working_directory='.', umask=0002, stdout=open(logpath,'a'), stderr=open(logpath,'a'), 465 | signal_map={signal.SIGTERM:sigterm_handler, signal.SIGHUP:sighup_handler} ): 466 | with file(pidpath,'w') as f: 467 | f.write(str(os.getpid())) 468 | run() 469 | os.unlink(pidpath) 470 | -------------------------------------------------------------------------------- /sqlchain/insight.py: -------------------------------------------------------------------------------- 1 | # 2 | # Insight compatible API module 3 | # 4 | import os, urlparse, cgi, json 5 | from string import hexdigits 6 | from struct import pack, unpack 7 | from datetime import datetime 8 | from hashlib import sha256 9 | 10 | from bitcoinrpc.authproxy import AuthServiceProxy 11 | from gevent import sleep 12 | 13 | from sqlchain.version import version, MAX_TX_BLK, MAX_IO_TX 14 | from sqlchain.util import is_address, mkaddr, addr2id, txh2id, mkSPK, getBlobData, getBlobsSize, is_BL32 15 | from sqlchain.util import encodeVarInt, gethdr, coin_reward, bits2diff, mkOpCodeStr, logts 16 | 17 | RESULT_ROW_LIMIT = 1000 18 | zF = lambda x: int(x) if int(x) == x else x 19 | 20 | #main entry point for api calls 21 | def do_API(env, send_resp): # pylint:disable=too-many-branches 22 | result = [] 23 | get,args,cur = urlparse.parse_qs(env['QUERY_STRING']), env['PATH_INFO'].split('/')[2:], sqc.dbpool.get().cursor() 24 | send_resp('200 OK', [('Content-Type', 'application/json')]) 25 | if args[0] == 'auto' or env['REQUEST_METHOD'] == 'POST': 26 | result = apiAuto(cur, env, args, get) 27 | elif args[0] == "block-index": 28 | result = json.dumps(apiHeader(cur, args[1], args[2:])) 29 | elif args[0] == "block": 30 | if len(args[1]) == 64 and all(c in hexdigits for c in args[1]): 31 | result = json.dumps(apiBlock(cur, args[1])) 32 | elif args[0] in ["tx","rawtx"]: 33 | if len(args[1]) == 64 and all(c in hexdigits for c in args[1]): 34 | result = json.dumps(apiTx(cur, args[1], args)) 35 | elif args[0] == "txs": 36 | result = json.dumps({ 'pagesTotal':1, 'txs': apiTxs(cur, get) }) 37 | elif args[0] in ["addr","addrs"]: 38 | result = json.dumps(apiAddr(cur, args[1].split(','), args[2:], get)) 39 | elif args[0] == "history": 40 | result = json.dumps(addrHistory(cur, args[1], args[2:])) 41 | elif args[0] == "status": 42 | result = json.dumps(apiStatus(cur, *args[1:])) 43 | elif args[0] == "merkle": 44 | result = json.dumps(apiMerkle(cur, args[1])) 45 | elif args[0] == "utils": 46 | result = json.dumps(apiRPC(args[1], get['nbBlocks'][0] if 'nbBlocks' in get else args[2] if len(args) > 2 else 2)) 47 | elif args[0] == "sync": 48 | result = json.dumps(apiSync(cur, *[int(x) if x.isdigit() else 0 for x in args[1:]])) 49 | elif args[0] == "closure": 50 | result = json.dumps(apiClosure(cur, args[1].split(',') )) 51 | return result 52 | 53 | def apiAuto(cur, env, args, get): 54 | result = [] 55 | form = cgi.FieldStorage(fp=env['wsgi.input'], environ=env, keep_blank_values=True) 56 | if args[0] == "auto": 57 | param = form['data'].value if 'data' in form else args[1] 58 | if param.isdigit() and int(param) <= sqc.cfg['block']: 59 | blkhash = apiHeader(cur, param, args[2:]) 60 | result = json.dumps(apiBlock(cur, blkhash['blockHash'])) if blkhash else [] 61 | elif len(param) == 64: 62 | if param[:8] == '00000000': 63 | result = json.dumps(apiBlock(cur, param)) 64 | result = json.dumps(apiTx(cur, param, args)) 65 | elif is_address(param): 66 | result = json.dumps(apiAddr(cur, [ param ], args[2:], get)) 67 | elif args[0] == "addrs": 68 | result = json.dumps(apiAddr(cur, form['addrs'].value.split(','), args[2:], get)) 69 | elif args[0] == "tx" and args[1] == "send": 70 | result = apiRPC('send', form['rawtx'].value) 71 | return result 72 | 73 | def apiHeader(cur, blk, args): 74 | if blk.isdigit(): 75 | cur.execute("select id,hash from blocks where id=%s limit 1;", (blk,)) 76 | else: 77 | cur.execute("select id,hash from blocks order by id desc limit 1;") 78 | for blkid,blkhash in cur: 79 | hdr = gethdr(int(blkid), sqc.cfg) 80 | if 'electrum' in args: 81 | return { 'block_height':int(blkid), 'version':hdr['version'], 'time':hdr['time'], 'bits':hdr['bits'], 'nonce':hdr['nonce'], 82 | 'merkle_root':hdr['merkleroot'][::-1].encode('hex'), 'prev_block_hash':hdr['previousblockhash'][::-1].encode('hex') } 83 | return { 'blockHash': blkhash[::-1].encode('hex') } 84 | return {} 85 | 86 | def apiBlock(cur, blkhash): 87 | data = { 'hash':blkhash, 'tx':[] } 88 | cur.execute("select id,chainwork,blksize from blocks where hash=%s limit 1;", (blkhash.decode('hex')[::-1],)) 89 | for blk,work,blksz in cur: 90 | data['height'] = int(blk) 91 | data['confirmations'] = sqc.cfg['block'] - data['height'] + 1 92 | data.update(gethdr(data['height'], sqc.cfg)) 93 | data['previousblockhash'] = data['previousblockhash'][::-1].encode('hex') 94 | data['merkleroot'] = data['merkleroot'][::-1].encode('hex') 95 | data['difficulty'] = zF(int(bits2diff(data['bits'])*1e8)/1e8) 96 | data['bits'] = '%08x' % data['bits'] 97 | data['reward'] = zF(coin_reward(data['height'])) 98 | data['isMainChain'] = True 99 | data['size'] = blksz 100 | data['chainwork'] = work.encode('hex') 101 | data['poolInfo'] = {} 102 | cur.execute("select hash from trxs where block_id>=%s and block_id<%s;", (blk*MAX_TX_BLK, blk*MAX_TX_BLK+MAX_TX_BLK)) 103 | for txhash, in cur: 104 | data['tx'].append(txhash[::-1].encode('hex')) 105 | cur.execute("select hash from blocks where id=%s limit 1;", (data['height']+1,)) 106 | for txhash, in cur: 107 | data['nextblockhash'] = txhash[::-1].encode('hex') 108 | return data 109 | return {} 110 | 111 | def apiAddr(cur, addrs, args, get): 112 | data = [] 113 | for addr in addrs: 114 | if is_address(addr): 115 | addr_id = addr2id(addr, cur) 116 | if addr_id: 117 | if 'utxo' in args: 118 | data.append(addrUTXOs(cur, addr_id, addr, get)) 119 | else: 120 | data.append(addrTXs(cur, addr_id, addr, args, get)) 121 | return data if len(data) != 1 else data[0] 122 | 123 | 124 | def addrTXs(cur, addr_id, addr, args, get): # pylint:disable=too-many-locals 125 | incTxs = 'noTxList' not in get or get['noTxList'][0] == '0' 126 | offset = int(get['from'][0]) if 'from' in get else 0 127 | limit = min(int(get['to'][0])-offset, RESULT_ROW_LIMIT) if 'to' in get else RESULT_ROW_LIMIT 128 | txs = [] 129 | sums = [[0,0],[0,0]] 130 | untxs = 0 131 | count = 0 132 | cur.execute("select value,t.id,tx_id,hash,block_id from trxs t left join outputs o on t.id=(o.id div {0}) or t.id=o.tx_id where addr_id=%s order by block_id desc;".format(MAX_IO_TX), (addr_id,)) 133 | for value,tx_id,spend_id,txhash,blk in cur: 134 | uncfmd = 1 if blk < 0 else 0 135 | untxs += uncfmd 136 | spend = 1 if tx_id == spend_id else 0 137 | sums[uncfmd][spend] += value 138 | 139 | if count >= offset and count < offset+limit: 140 | txhash = txhash[::-1].encode('hex') 141 | if incTxs and txhash not in txs: 142 | txs.append(txhash) 143 | count += 1 144 | 145 | if 'balance' in args: 146 | return int(sums[0][0]-sums[0][1]) 147 | if 'unconfirmedBalance' in args: 148 | return int(sums[1][0]-sums[1][1]) 149 | if 'totalReceived' in args: 150 | return int(sums[0][0]) 151 | if 'totalSent' in args: 152 | return int(sums[0][1]) 153 | 154 | return { 'addrStr':addr, 'balanceSat':int(sums[0][0]-sums[0][1]), 'balance':float(sums[0][0]-sums[0][1])/1e8 or 0, 'totalReceivedSat':int(sums[0][0]), 155 | 'totalReceived': float(sums[0][0])/1e8, 'totalSentSat':int(sums[0][1]), 'totalSent':float(sums[0][1])/1e8, 156 | 'unconfirmedBalanceSat':int(sums[1][0]-sums[1][1]), 'unconfirmedBalance':float(sums[1][0]-sums[1][1])/1e8 or 0, 157 | 'txApperances':len(txs), 'transactions':txs, 'unconfirmedTxApperances':untxs } 158 | 159 | def addrUTXOs(cur, addr_id, addr, get): 160 | offset = int(get['from'][0]) if 'from' in get else 0 161 | limit = min(int(get['to'][0])-offset, RESULT_ROW_LIMIT) if 'to' in get else RESULT_ROW_LIMIT 162 | data = [] 163 | cur.execute("select value,o.id,hash,block_id div {1} from trxs t left join outputs o on t.id=(o.id div {0}) and o.tx_id is null where addr_id=%s order by block_id limit %s,%s;".format(MAX_IO_TX,MAX_TX_BLK), (addr_id,limit,offset)) 164 | for value,out_id,txhash,blk in cur: 165 | data.append({ 'address':addr, 'txid':txhash[::-1].encode('hex'), 'vout':int(out_id)%MAX_IO_TX, 'amount':float(value)/1e8, 166 | 'confirmations':sqc.cfg['block']-int(blk)+1 if blk>=0 else 0, 'ts':gethdr(int(blk), sqc.cfg, 'time') if blk>=0 else 0 }) 167 | return data 168 | 169 | def addrHistory(cur, addr, args): 170 | txt = '' 171 | data = { 'cfmd':0, 'uncfmd':0 } if 'balance' in args else { 'txs':[] } 172 | addr_id = addr2id(addr, cur) 173 | if addr_id: 174 | cur.execute("select value,t.id,o.tx_id,hash,block_id,o.id%%{0} from outputs o, trxs t where addr_id=%s and (t.id=(o.id div {0}) or t.id=o.tx_id) order by block_id;".format(MAX_IO_TX), (addr_id,)) 175 | for value,tx_id,spent_id,txhash,blk,n in cur: 176 | value = int(value) 177 | blk = blk//MAX_TX_BLK if blk >= 0 else 0 178 | if 'balance' in args: 179 | if blk == 0: 180 | data['uncfmd'] += value if tx_id == spent_id else -value 181 | else: 182 | data['cfmd'] += value if tx_id == spent_id else -value 183 | elif 'utxo' in args and not spent_id: 184 | tmp = { 'tx_hash':txhash[::-1].encode('hex'), 'height':int(blk), 'value':value, 'n':int(n) } 185 | else: 186 | tmp = { 'tx_hash':txhash[::-1].encode('hex'), 'height':int(blk) } 187 | if 'status' in args: 188 | txt += tmp['tx_hash'] + ":%d:" % tmp['height'] 189 | elif ('uncfmd' not in args or tmp['height'] == 0) and 'balance' not in args: 190 | data['txs'].append(tmp) 191 | return (sha256(txt).digest().encode('hex') if txt else None) if 'status' in args else data 192 | 193 | def apiTxs(cur, get): 194 | txs = [] 195 | if 'block' in get: 196 | blkhash = get['block'][0] 197 | if len(blkhash) == 64 and all(c in hexdigits for c in blkhash): 198 | txhashes = apiBlock(cur, blkhash) 199 | txhashes = txhashes['tx'] if 'tx' in txhashes else [] 200 | elif 'address' in get: 201 | txhashes = apiAddr(cur, [ get['address'][0] ], {}, {}) 202 | txhashes = txhashes['transactions'] if 'transactions' in txhashes else [] 203 | for txhash in txhashes: 204 | txs.append(apiTx(cur, txhash, [])) 205 | return txs 206 | 207 | def apiTx(cur, txhash, args): 208 | if 'output' in args: 209 | return txoAddr(cur, txhash, args[-1]) 210 | if 'addrs' in args: 211 | return txAddrs(cur, txhash) 212 | data = { 'txid':txhash } 213 | txh = txhash.decode('hex')[::-1] 214 | cur.execute("select id,hash,txdata,block_id div {0},ins,outs,txsize from trxs where id>=%s and hash=%s limit 1;".format(MAX_TX_BLK), (txh2id(txh), txh)) 215 | for tid,txh,txdata,blkid,ins,outs,txsize in cur: 216 | blob = getBlobData(txdata, ins, outs, txsize) 217 | if [i for i in ['rawtx','html'] if i in args]: 218 | return mkRawTx(cur, args, tid, blob, blkid) 219 | data['blockheight'] = blkid 220 | data['confirmations'] = sqc.cfg['block'] - blkid + 1 if blkid >= 0 else 0 221 | data['version'],data['locktime'] = blob['hdr'][4],blob['hdr'][5] 222 | data['valueIn'],data['vin'] = apiInputs(cur, blkid, blob['ins']) 223 | data['valueOut'],data['vout'] = apiOutputs(cur, int(tid), blob['outs']) 224 | data['fees'] = round(data['valueIn'] - data['valueOut'],8) 225 | data['size'] = blob['size'] 226 | cur.execute("select hash from blocks where id=%s limit 1;", (blkid,)) 227 | for txhash2, in cur: 228 | data['blockhash'] = txhash2[::-1].encode('hex') 229 | data['time'] = data['blocktime'] = gethdr(blkid, sqc.cfg, 'time') 230 | if 'coinbase' in data['vin'][0]: 231 | del data['valueIn'] 232 | del data['fees'] 233 | data['isCoinBase'] = True 234 | return data 235 | return {} 236 | 237 | def apiInputs(cur, height, ins): 238 | total,data = 0,[] 239 | if len(ins) == 0: 240 | cur.execute("select coinbase from blocks where id=%s;", (height,)) 241 | return 0,[{ 'n':0, 'coinbase':cur.fetchone()[0].encode('hex') }] 242 | else: 243 | for n,xin in enumerate(ins): 244 | cur.execute("select value,addr_id,hash from outputs o, trxs t where o.id=%s and t.id=o.id div %s limit 1;", (xin['outid'], MAX_IO_TX)) 245 | rows = cur.fetchall() 246 | for value,aid,txhash in rows: 247 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 248 | for addr, in cur: 249 | btc = float(value)/1e8 250 | data.append({ 'n':n, 'vout':xin['outid']%MAX_IO_TX, 'value':round(btc,8), 'valueSat':int(value), 251 | 'txid':txhash[::-1].encode('hex'), 'addr':mkaddr(addr,int(aid)), 'sequence':unpack('=%s*{0} and o.id<%s*{0};".format(MAX_IO_TX), (txid,txid+1)) 260 | rows = cur.fetchall() 261 | for out_id,n,value,aid,in_id in rows: 262 | btc = float(value)/1e8 263 | total += btc 264 | vout = { 'n':int(n), 'value':"%1.8f" % btc, 'scriptPubKey':{} } 265 | if aid == 0: 266 | vout['scriptPubKey']['hex'] = outs[int(n)] 267 | else: 268 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 269 | for addr, in cur: 270 | vout['scriptPubKey']['addresses'] = [ mkaddr(addr,int(aid)) ] 271 | vout['scriptPubKey']['hex'] = mkSPK(addr,int(aid))[1] 272 | vout['scriptPubKey']['asm'] = mkOpCodeStr(vout['scriptPubKey']['hex'], sepPUSH=' ') 273 | vout['scriptPubKey']['hex'] = vout['scriptPubKey']['hex'].encode('hex') 274 | if in_id: 275 | vout.update(apiSpent(cur, int(in_id), int(out_id))) 276 | data.append(vout) 277 | return round(total,8),data 278 | 279 | def apiSpent(cur, txid, out_id): 280 | cur.execute("select txdata,hash,block_id div {0},ins from trxs where id=%s limit 1;".format(MAX_TX_BLK), (txid,)) 281 | for txdata,txh,blk,ins in cur: 282 | blob = getBlobData(txdata, ins) 283 | for n,xin in enumerate(blob['ins']): 284 | if xin['outid'] == out_id: 285 | return { 'spentTxId':txh[::-1].encode('hex'), 'spentIndex':n, 'spentHeight':int(blk) } 286 | return {} 287 | 288 | def txoAddr(cur, txhash, n): 289 | txid = txh2id(txhash.decode('hex')[::-1]) 290 | cur.execute("select addr_id from outputs o where o.id>=%s*{0} and o.id<%s*{0} and o.id%%{0}=%s limit 1;".format(MAX_IO_TX), (txid,txid+1,int(n))) 291 | aids = cur.fetchall() 292 | for aid, in aids: 293 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 294 | addr = cur.fetchone()[0] 295 | return mkaddr(addr,int(aid)) 296 | return None 297 | 298 | def txAddrs(cur, txhash): 299 | data = [] 300 | txid = txh2id(txhash.decode('hex')[::-1]) 301 | cur.execute("select addr_id from outputs o where o.id>=%s*{0} and o.id<%s*{0};".format(MAX_IO_TX), (txid,txid+1)) 302 | for aid, in cur: 303 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 304 | addr = cur.fetchone()[0] 305 | data.append( mkaddr(addr,int(aid)) ) 306 | cur.execute("select txdata,ins from trxs where id=%s limit 1;", (txid,)) 307 | txins = cur.fetchall() 308 | for txdata,ins in txins: 309 | blob = getBlobData(int(txdata), ins) 310 | if ins > 0: 311 | for _,xin in enumerate(blob['ins']): 312 | cur.execute("select addr_id from outputs o where o.id=%s limit 1;", (xin['outid'],)) 313 | aids = cur.fetchall() 314 | for aid, in aids: 315 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 316 | addr = cur.fetchone()[0] 317 | data.append(mkaddr(addr,int(aid))) 318 | return data 319 | 320 | def apiMerkle(cur, txhash): 321 | txh = txhash.decode('hex')[::-1] 322 | cur.execute("select block_id from trxs where id>=%s and hash=%s limit 1", (txh2id(txh), txh)) 323 | for blkid, in cur: 324 | blk,pos = divmod(int(blkid), MAX_TX_BLK) 325 | cur.execute("select hash from trxs where block_id>=%s and block_id<%s order by block_id;", (blk*MAX_TX_BLK, blk*MAX_TX_BLK+MAX_TX_BLK)) 326 | mkt = [ tx for tx, in cur ] 327 | mkb,t = [],pos 328 | while len(mkt) > 1: 329 | if len(mkt) % 2 == 1: 330 | mkt.append(mkt[-1]) 331 | mkb.append(mkt[t-1][::-1].encode('hex') if t % 2 == 1 else mkt[t+1][::-1].encode('hex')) 332 | mkt = [ sha256(sha256(mkt[i]+mkt[i+1]).digest()).digest() for i in range(0,len(mkt),2) ] 333 | t //= 2 334 | if mkt[0] != gethdr(blk, sqc.cfg, 'merkleroot'): 335 | logts("Panic! Merkle tree failure, tx %s" % txhash ) 336 | return { "block_height": blk, "merkle": mkb, "pos": pos } 337 | return [] 338 | 339 | rawTxHdr = [ 'version','# inputs','# outputs', 'locktime' ] 340 | rawCbHdr = [ 'null txid','n','coinbase size','coinbase bytes','sequence' ] 341 | rawInHdr = [ 'in txid #%d','n #%d','sigScript size #%d','sigScript bytes #%d','sequence #%d' ] 342 | rawOutHdr = [ 'out value #%d','scriptPK size #%d','scriptPK
bytes/asm #%d' ] 343 | 344 | def rawHTML(out, vi, vo): 345 | outhex = [ x.encode('hex') for x in out ] 346 | tags = [ x for x in rawTxHdr ] 347 | for n in range(vo): 348 | tags[3:3] = [ s%(vo-n-1) for s in rawOutHdr ] 349 | outhex[3+5*vi+3*n+2] += "
"+mkOpCodeStr(out[3+5*vi+3*n+2]).replace('\n', '
')+"" 350 | if vi == 0: 351 | tags[2:2] = rawCbHdr 352 | else: 353 | for n in range(vi): 354 | tags[2:2] = [ s%(vi-n-1) for s in rawInHdr ] 355 | return "" 356 | 357 | def mkRawTx(cur, args, txid, blob, blkid): 358 | out = [ pack('= sqc.sync_id: 395 | with sqc.sync: 396 | sqc.sync.wait(timeout) # long polling support for sync connections 397 | if sync_req >= sqc.sync_id: 398 | return None # timeout 399 | if sync_req == 0 or sync_req == sqc.sync_id: 400 | utxs = sqc.syncTxs 401 | else: 402 | utxs = [] 403 | cur.execute("select hash from mempool m, trxs t where m.sync_id > %s and t.id=m.id;", (sync_req,)) 404 | for txhash, in cur: 405 | utxs.append(bciTxWS(cur, txhash[::-1].encode('hex'))) 406 | cur.execute("select min(block_id) from orphans where sync_id > %s;", (sync_req if sync_req > 0 else sqc.sync_id,)) 407 | orphan = cur.fetchone()[0] 408 | return { 'block':sqc.cfg['block'] if orphan is None else orphan, 'orphan':(not orphan is None), 'txs':utxs, 'sync_id':sqc.sync_id } 409 | 410 | # based on the closure code from 411 | # https://github.com/sharkcrayon/bitcoin-closure 412 | def apiClosure(cur, addrs): 413 | closure,balance = [],0 414 | txDone = [] 415 | while len(addrs) > 0: # pylint:disable=too-many-nested-blocks 416 | sleep(0) 417 | addr = addrs.pop(0) 418 | closure.append(addr) 419 | txs = apiTxs(cur, { 'address':[ addr ] }) 420 | for tx in txs: 421 | if not tx['txid'] in txDone: 422 | if len(tx['vin']) == 1: 423 | txDone.append(tx['txid']) 424 | else: 425 | in_addrs = [ vin['addr'] for vin in tx['vin'] ] 426 | if addr in in_addrs: 427 | txDone.append(tx['txid']) 428 | for ain in in_addrs: 429 | if not ain in closure and not ain in addrs: 430 | addrs.append(ain) 431 | 432 | utxos = apiAddr(cur, closure, 'utxo', {}) 433 | for addr in utxos: 434 | for utxo in addr: 435 | balance += utxo['amount'] 436 | return { 'closure':closure, 'balance':balance } 437 | 438 | def apiStatus(cur, cls='info', *args): 439 | data = {} 440 | cur.execute("select value from info where `class`='sys' and `key`='updated';") 441 | row = cur.fetchone() 442 | if not row or (datetime.now() - datetime.strptime(row[0],'%Y-%m-%d %H:%M:%S')).total_seconds() > 60: 443 | cur.execute("replace into info (class,`key`,value) values('info','block',%s);", (sqc.cfg['block'], )) 444 | cur.execute("replace into info (class,`key`,value) values('info','version',%s);", (version, )) 445 | cur.execute("replace into info (class,`key`,value) values('sys','updated',now());") 446 | if cls == 'db': 447 | total_bytes = 0 448 | cur.execute("show table status;") 449 | for tbl in cur: 450 | if tbl[0] not in ['blocks','trxs','address','outputs']: 451 | continue 452 | if tbl[6]+tbl[8] < 1e9: 453 | cur.execute("replace into info (class,`key`,value) values('db','{0}:rows',%s),('db','{0}:data-MB',%s),('db','{0}:idx-MB',%s),('db','{0}:total-MB',%s),('db','{0}:total-bytes',%s);".format(tbl[0]), 454 | (tbl[4], float("%.1f"%float(tbl[6]/1e6)), float("%.1f"%float(tbl[8]/1e6)), float("%.1f"%float(tbl[6]/1e6+tbl[8]/1e6)), tbl[6]+tbl[8])) 455 | else: 456 | cur.execute("replace into info (class,`key`,value) values('db','{0}:rows',%s),('db','{0}:data-GB',%s),('db','{0}:idx-GB',%s),('db','{0}:total-GB',%s),('db','{0}:total-bytes',%s);".format(tbl[0]), 457 | (tbl[4], float("%.1f"%float(tbl[6]/1e9)), float("%.1f"%float(tbl[8]/1e9)), float("%.1f"%float(tbl[6]/1e9+tbl[8]/1e9)), tbl[6]+tbl[8])) 458 | total_bytes += tbl[6]+tbl[8] 459 | blobs_size = getBlobsSize(sqc.cfg) 460 | cur.execute("replace into info (class,`key`,value) values('db','outputs:max-io-tx',%s);", (MAX_IO_TX, )) 461 | cur.execute("replace into info (class,`key`,value) values('db','blocks:hdr-data',%s);", (os.stat(sqc.cfg['path']+'/hdrs.dat').st_size, )) 462 | cur.execute("replace into info (class,`key`,value) values('db','trxs:blob-data',%s);", (blobs_size, )) 463 | cur.execute("replace into info (class,`key`,value) values('db','trxs:blob-GB',%s);", (float("%.1f"%float(blobs_size/1e9)), )) 464 | cur.execute("replace into info (class,`key`,value) values('db','trxs:max-tx-block',%s);", (MAX_TX_BLK, )) 465 | cur.execute("replace into info (class,`key`,value) values('db','all:total-bytes',%s);", (total_bytes, )) 466 | cur.execute("replace into info (class,`key`,value) values('db','all:total-GB',%s);", (float("%.1f"%float(total_bytes/1e9)), )) 467 | 468 | cur.execute("select `key`,value from info where class=%s;", (cls, )) 469 | for k,v in cur: 470 | if ':' in k: 471 | k1,k2 = k.split(':', 1) 472 | if k1 in data: 473 | data[k1].update({ k2:v }) 474 | else: 475 | data[k1] = { k2:v } 476 | else: 477 | data[k] = v 478 | if 'html' in args: 479 | pass # todo wrap data as html table 480 | return data 481 | --------------------------------------------------------------------------------
"+" ".join([' %s %s ' % (k,v) for k,v in zip(tags,outhex) ])+"