├── sqlchain ├── __init__.py ├── overlay │ ├── __init__.py │ └── reddcoin.py ├── version.py ├── rpc.py ├── democvt.py ├── blkdat.py ├── dbpool.py ├── bci.py └── insight.py ├── .gitignore ├── docs ├── ALTCOINS.md ├── ENGINES.md └── INSTALL.md ├── www ├── img │ ├── favicon.png │ ├── loading.gif │ ├── glyphicons-halflings.png │ └── glyphicons-halflings-white.png ├── 404.html ├── fonts │ ├── glyphicons-halflings-regular.eot │ ├── glyphicons-halflings-regular.ttf │ ├── glyphicons-halflings-regular.woff │ └── glyphicons-halflings-regular.woff2 ├── js │ ├── ie10-viewport-bug-workaround.js │ └── main.js ├── css │ ├── signin.css │ └── main.css ├── wstest.html └── main.html ├── MANIFEST.in ├── etc ├── sqlchain.log.template ├── systemd.template ├── post-receive ├── node.log.template ├── nginx.template ├── deploy ├── dbinfo.sql ├── electrum.banner ├── dogecoin.sql ├── sqlchain.sql └── reddcoin.sql ├── utils ├── blkbtc ├── fixblksize ├── fixchainwork ├── synctest ├── bwlimit ├── stripsigs └── sqlchain-upgrade-db ├── tests ├── conftest.py ├── README.md ├── test_live_api.py ├── mklivetestdb └── test_utils_bitcoin.py ├── LICENSE ├── RELEASE-NOTES ├── setup.py ├── README.md ├── sqlchain-electrum ├── sqlchain-api └── sqlchaind /sqlchain/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /sqlchain/overlay/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .cache 2 | livetest.*.db 3 | -------------------------------------------------------------------------------- /docs/ALTCOINS.md: -------------------------------------------------------------------------------- 1 | ### sqlChain Multi-Coin support 2 | 3 | 4 | -------------------------------------------------------------------------------- /docs/ENGINES.md: -------------------------------------------------------------------------------- 1 | ### sqlChain RocksDB (and other Engine) Install Guide 2 | -------------------------------------------------------------------------------- /www/img/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/img/favicon.png -------------------------------------------------------------------------------- /www/img/loading.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/img/loading.gif -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | recursive-include docs * 3 | recursive-include www * 4 | 5 | -------------------------------------------------------------------------------- /www/img/glyphicons-halflings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/img/glyphicons-halflings.png -------------------------------------------------------------------------------- /www/404.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Error 404 - File not found. 6 | 7 | -------------------------------------------------------------------------------- /www/img/glyphicons-halflings-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/img/glyphicons-halflings-white.png -------------------------------------------------------------------------------- /www/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /www/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /www/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /www/fonts/glyphicons-halflings-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neocogent/sqlchain/HEAD/www/fonts/glyphicons-halflings-regular.woff2 -------------------------------------------------------------------------------- /etc/sqlchain.log.template: -------------------------------------------------------------------------------- 1 | {coindir}/debug.log {{ 2 | weekly 3 | copytruncate 4 | rotate 4 5 | compress 6 | delaycompress 7 | missingok 8 | notifempty 9 | }} 10 | -------------------------------------------------------------------------------- /etc/systemd.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=sqlChain - title 3 | 4 | [Service] 5 | Type=forking 6 | ExecStart 7 | TimeoutStopSec=2min 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | -------------------------------------------------------------------------------- /etc/post-receive: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | unset GIT_DIR 4 | DEPLOY_WORK="${HOME}/work" 5 | 6 | while read from to branch 7 | do 8 | mkdir -p "${DEPLOY_WORK}" 9 | GIT_WORK_TREE="${DEPLOY_WORK}" git checkout -f "${branch}" 10 | cd "${DEPLOY_WORK}" 11 | if [ -f etc/deploy ]; then 12 | etc/deploy "${branch##*/}" 13 | fi 14 | rm -rf "${DEPLOY_WORK}" 15 | done 16 | -------------------------------------------------------------------------------- /utils/blkbtc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$1" == "off" ]; then 4 | echo "bitcoin open" 5 | CHG="D" 6 | else 7 | echo "bitcoin blocked" 8 | CHG="A" 9 | fi 10 | 11 | sudo iptables -$CHG INPUT -p tcp --dport 8333 -j DROP 12 | sudo iptables -$CHG OUTPUT -p tcp --dport 8333 -j DROP 13 | sudo iptables -$CHG INPUT -p tcp --dport 18333 -j DROP 14 | sudo iptables -$CHG OUTPUT -p tcp --dport 18333 -j DROP 15 | -------------------------------------------------------------------------------- /etc/node.log.template: -------------------------------------------------------------------------------- 1 | {sqldir}/api.log {{ 2 | weekly 3 | rotate 4 4 | compress 5 | delaycompress 6 | missingok 7 | notifempty 8 | postrotate 9 | [ -f {sqldir}/api.pid ] && kill -HUP `cat {sqldir}/api.pid` 10 | endscript 11 | }} 12 | 13 | {sqldir}/daemon.log {{ 14 | weekly 15 | rotate 4 16 | compress 17 | delaycompress 18 | missingok 19 | notifempty 20 | postrotate 21 | [ -f {sqldir}/daemon.pid ] && kill -HUP `cat {sqldir}/daemon.pid` 22 | endscript 23 | }} 24 | -------------------------------------------------------------------------------- /etc/nginx.template: -------------------------------------------------------------------------------- 1 | server {{ 2 | listen 80; 3 | listen [::]:80; 4 | listen 443 ssl http2; 5 | listen [::]:443 ssl http2; 6 | server_name {apidomain}; 7 | 8 | ssl_certificate {sslpath}/fullchain.pem; 9 | ssl_certificate_key {sslpath}/privkey.pem; 10 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 11 | 12 | location / {{ 13 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 14 | proxy_set_header Host $http_host; 15 | proxy_set_header X-Forwarded-Proto $scheme; 16 | proxy_pass http://{listen}; 17 | }} 18 | }} 19 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | def pytest_addoption(parser): 4 | parser.addoption("--runlive", action="store_true", help="run live tests") 5 | parser.addoption("--nosigs", action="store_true", help="is nosigs db") 6 | parser.addoption("--dbuser", action="store", default="root:root", help="db user:pwd for mysql tests") 7 | parser.addoption("--coin", action="store", default="btc", help="set coin type, default btc") 8 | parser.addoption("--server", action="store", default="localhost:8085/api", help="set api-server: host:port/api-path") 9 | parser.addoption("--append", action="store_true", help="don't clear previous live test results") 10 | 11 | -------------------------------------------------------------------------------- /etc/deploy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # for server deploy of git repo - customize to suit 3 | # include in repo - needs a post-receive hook to call 4 | # see docs directory for example 5 | # checkout is current directory 6 | # runs as user git 7 | 8 | declare -A branch 9 | branch["dev"]="/usr/local" 10 | branch["tests"]="/usr/local" 11 | 12 | if [[ "${branch[$1]}" ]]; then 13 | target=${branch[$1]} 14 | 15 | echo "Copying executable and package files" 16 | cp sqlchaind sqlchain-api sqlchain-config sqlchain-electrum $target/bin/ 17 | cp -r sqlchain/* $target/lib/python2.7/dist-packages/sqlchain/ 18 | cp -r etc $target/share/sqlchain/ 19 | cp -r tests /home/chris/ 20 | 21 | else 22 | echo "Branch "$1" has no target - not deployed." 23 | fi 24 | 25 | -------------------------------------------------------------------------------- /www/js/ie10-viewport-bug-workaround.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * IE10 viewport hack for Surface/desktop Windows 8 bug 3 | * Copyright 2014-2015 Twitter, Inc. 4 | * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) 5 | */ 6 | 7 | // See the Getting Started docs for more information: 8 | // http://getbootstrap.com/getting-started/#support-ie10-width 9 | 10 | (function () { 11 | 'use strict'; 12 | 13 | if (navigator.userAgent.match(/IEMobile\/10\.0/)) { 14 | var msViewportStyle = document.createElement('style') 15 | msViewportStyle.appendChild( 16 | document.createTextNode( 17 | '@-ms-viewport{width:auto!important}' 18 | ) 19 | ) 20 | document.querySelector('head').appendChild(msViewportStyle) 21 | } 22 | 23 | })(); 24 | -------------------------------------------------------------------------------- /utils/fixblksize: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # fix blksize when missing 4 | # 5 | 6 | cfg = { 'dbinfo':"localhost:btc:pwd:testnet", 'path':'/var/data/sqlchain/testnet' } 7 | 8 | blk = 0 9 | max_blk = 907843 10 | 11 | import MySQLdb as db 12 | 13 | sql = db.connect(*cfg['dbinfo'].split(':')) 14 | cur = sql.cursor() 15 | 16 | fixlist = [] 17 | while blk < max_blk: 18 | cur.execute("select count(*),sum(txsize) from trxs where block_id>=%s and block_id <%s;", (blk*20000,(blk+1)*20000)) 19 | txcnt,blksize = cur.fetchone() 20 | blksize += 81 if txcnt <= 252 else 83 21 | fixlist.append((blksize,blk)) 22 | if blk % 10000 == 0: 23 | print "Commit:",blk 24 | cur.executemany("update blocks set blksize=%s where id=%s;", fixlist) 25 | sql.commit() 26 | fixlist = [] 27 | blk += 1 28 | 29 | cur.executemany("update blocks set blksize=%s where id=%s;", fixlist) 30 | sql.commit() 31 | 32 | print "Done",blk 33 | 34 | -------------------------------------------------------------------------------- /www/css/signin.css: -------------------------------------------------------------------------------- 1 | body { 2 | padding-top: 40px; 3 | padding-bottom: 40px; 4 | background-color: #eee; 5 | } 6 | 7 | .form-signin { 8 | max-width: 330px; 9 | padding: 15px; 10 | margin: 0 auto; 11 | } 12 | .form-signin .form-signin-heading, 13 | .form-signin .checkbox { 14 | margin-bottom: 10px; 15 | } 16 | .form-signin .checkbox { 17 | font-weight: normal; 18 | } 19 | .form-signin .form-control { 20 | position: relative; 21 | height: auto; 22 | -webkit-box-sizing: border-box; 23 | -moz-box-sizing: border-box; 24 | box-sizing: border-box; 25 | padding: 10px; 26 | font-size: 16px; 27 | } 28 | .form-signin .form-control:focus { 29 | z-index: 2; 30 | } 31 | .form-signin input[type="email"] { 32 | margin-bottom: -1px; 33 | border-bottom-right-radius: 0; 34 | border-bottom-left-radius: 0; 35 | } 36 | .form-signin input[type="password"] { 37 | margin-bottom: 10px; 38 | border-top-left-radius: 0; 39 | border-top-right-radius: 0; 40 | } 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015-2018 neoCogent.com 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /utils/fixchainwork: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # fix chainwork when missing 4 | # 5 | 6 | cfg = { 'dbinfo':"localhost:btc:pwd:testnet", 'path':'/var/data/sqlchain/testnet' } 7 | 8 | blk = 0 9 | max_blk = 907843 10 | chainwork = 0 11 | 12 | from struct import unpack_from 13 | from binascii import unhexlify 14 | import MySQLdb as db 15 | 16 | def blockwork(bits): 17 | return 2**256/((bits&0xFFFFFF) * 2**(8*((bits>>24) - 3))+1) 18 | def int2bin32(val): 19 | return unhexlify('%064x' % val) 20 | 21 | def gethdr(blk, cfg, var=None): 22 | with open(cfg['path']+'/hdrs.dat', 'rb') as f: 23 | f.seek(blk*80) 24 | data = f.read(80) 25 | hdr = dict(zip(['version','previousblockhash','merkleroot', 'time', 'bits', 'nonce'], unpack_from(' 0; 13 | 14 | -- Count number of non-std tx outputs (because they don't have an address) 15 | select count(*) from outputs where addr_id=0; 16 | 17 | -- Count number of unspent outputs (tx_id is the tx where each output is spent, so null means not spent) 18 | select count(*) from outputs where tx_id is null; 19 | 20 | -- Find missing blocks (gaps) in blkdat sequence. 21 | -- select t1.id-1 from blkdat t1 left outer join blkdat t2 on t2.id=t1.id-1 where t2.id is null and t1.id > 0 order by t1.id; 22 | 23 | -- Find txs not in mempool but remaining unconfirmed in trxs table - usually rejects or double spend attempts 24 | -- select id,hex(reverse(hash)) from trxs where block_id=-1 and id not in (select id from mempool); 25 | -------------------------------------------------------------------------------- /RELEASE-NOTES: -------------------------------------------------------------------------------- 1 | v0.2.9 - default api when none set 2 | v0.2.8 - fix bci ws bug, improve logrotate cfg 3 | v0.2.7 - add config apis support, defaults as before 4 | v0.2.6 - add max_blks support, limit to recent blocks 5 | v0.2.5 - initial bech32 and multi-coin support, db upgrade! 6 | v0.2.3 - fix script decode index error 7 | v0.1.40 - alpha 1 release, update readme and release notes 8 | v0.1.39 - make split blobs default, update install docs 9 | v0.1.36 - fix s3 ranges, insight db status 10 | v0.1.35 - fix seek in single blobs 11 | v0.1.32 - add rpcPool, handle zero blob data 12 | v0.1.31 - update install guide, nginx proxy, demo web page info 13 | v0.1.30 - add locks to insertAddress, improve checkMemPool 14 | v0.1.28 - fix addOrphan bug 15 | v0.1.27 - chg near sync handling, update install guide 16 | v0.1.26 - Jan 20, 2017 -- add outQ threading, chg findTx, improve sync speed 17 | v0.1.25 - Jan 12, 2017 -- fix bugs, shutdown thread handling, add sslContext for python >= 2.7.9, logrotate support, sighup 18 | v0.1.23 - Jan 09, 2017 -- fix blkdat sync problems, install guide, bump version 19 | v0,1,21 - Jan 07, 2017 -- fix api, sqlchain-init 20 | v0.1.15 - Jan 06, 2017 -- update sqlchain-init, tag as 0.1.15, fix bugs 21 | v0.1.14 - Jan 06, 2017 -- fix api server, add www to pypi package, sqlchain-init copy www 22 | v0.1.13 - Dec 30, 2016 -- improve sqlchain-init, fix bugs 23 | v0.1.12 - Dec 29, 2016 -- pruneblockchain support, sqlchain-init script, fix bugs in daemon mode 24 | v0.1.1 - Aug 22, 2015 -- fix bug in orphan/re-org code 25 | v0.1.0 - Aug 22, 2015 -- Initial release. 26 | 27 | -------------------------------------------------------------------------------- /sqlchain/version.py: -------------------------------------------------------------------------------- 1 | # like a version, touched for the very first time 2 | 3 | version = '0.2.10' 4 | 5 | # definitions for coin types / chains supported 6 | # selected by sqc.cfg['cointype'] 7 | 8 | ADDR_CHAR = 0 9 | ADDR_PREFIX = 1 10 | P2SH_CHAR = 2 11 | P2SH_PREFIX = 3 12 | BECH_HRP = 4 13 | BLKDAT_MAGIC = 5 14 | BLKDAT_NEAR_SYNC = 6 15 | BLK_REWARD = 7 16 | HALF_BLKS = 8 17 | 18 | coin_cfg = { 19 | 'bitcoin': [ '1', 0, '3', 5, 'bc', 0xD9B4BEF9, 500, (50*1e8), 210000 ], 20 | 'testnet': [ 'mn', 111, '2', 196, 'tb', 0x0709110B, 8000, (50*1e8), 210000 ], 21 | 'litecoin':[ 'L', 48, '3M', 50, 'ltc', 0xDBB6C0FB, 500, (50*1e8), 840000 ], 22 | 'reddcoin':[ 'R', 48, '3', 5, 'rdd', 0xDBB6C0FB, 500, 0, None ], 23 | 'dogecoin':[ 'D', 30, '9A', 22, 'doge', 0xC0C0C0C0, 500, 0, None ], 24 | 'vertcoin':[ 'V', 71, '3', 5, 'vtc', 0xDAB5BFFA, 500, 0, None ] 25 | } 26 | 27 | def coincfg(IDX): 28 | return coin_cfg[sqc.cfg['cointype']][IDX] 29 | 30 | # addr id flags 31 | ADDR_ID_FLAGS = 0x70000000000 32 | P2SH_FLAG = 0x10000000000 33 | BECH32_FLAG = 0x20000000000 34 | BECH32_LONG = 0x30000000000 35 | 36 | # global version related definitions 37 | # cannot change these without first updating existing table schema and data 38 | # these are set to reasonable values for now - to increase, alter trxs.block_id or outputs.id column widths 39 | # and update data eg. update trxs set block_id=block_id div OLD_MAX * NEW_MAX + block_id % OLD_MAX 40 | MAX_TX_BLK = 20000 # allows 9,999,999 blocks with decimal(11) 41 | MAX_IO_TX = 16384 # allows 37 bit out_id value, (5 byte hash >> 3)*16384 in decimal(16), 7 bytes in blobs 42 | BLOB_SPLIT_SIZE = int(5e9) # size limit for split blobs, approx. as may extend past if tx on boundary 43 | S3_BLK_SIZE = 4096 # s3 block size for caching 44 | -------------------------------------------------------------------------------- /utils/synctest: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os, sys, json, time, signal 4 | from urllib2 import urlopen, URLError 5 | from time import sleep 6 | 7 | cfg = { 'api':'http://localhost:8086' } 8 | 9 | def doSync(): 10 | sync_id = 0 11 | lastblk = 0 12 | while True: 13 | print "Sync at %d" % sync_id 14 | resp = apicall('/api/sync/'+str(sync_id)) 15 | if resp and 'error' in resp: 16 | sleep(10) 17 | elif resp: 18 | for tx in resp['txs']: 19 | print "TX:", tx['hash'], len(tx['out']) 20 | if resp['block'] != lastblk: 21 | lastblk = resp['block'] 22 | print "##################\nBlock %d\n##################" % lastblk 23 | if sync_id > 0: 24 | blk = apicall('/bci/block-height/'+str(lastblk)) 25 | blk = blk['blocks'][0] 26 | for tx in blk['tx'][:2]: 27 | print "TX:", tx['hash'], len(tx['out']) 28 | cfg['block'] = blk['height'] 29 | print "Blk: %d" % blk['height'] 30 | cfg['blockts'] = int(time.time()) 31 | sync_id = resp['sync_id']+1 32 | 33 | def apicall(url, post=None): 34 | try: 35 | #print "call:", url 36 | data = urlopen(cfg['api']+url, post).read() 37 | except URLError: 38 | print "Error: sqlchain-api not at %s" % cfg['api'] 39 | return { 'error':'No api connection' } 40 | try: 41 | data = json.loads(data) 42 | except ValueError: 43 | print "Error: api returned non-json data %s" % data 44 | pass 45 | return data 46 | 47 | def sigterm_handler(_signo, _stack_frame): 48 | print "\nClean shutdown.\n" 49 | sys.exit(0) 50 | 51 | if __name__ == '__main__': 52 | signal.signal(signal.SIGINT, sigterm_handler) 53 | doSync() 54 | -------------------------------------------------------------------------------- /www/js/main.js: -------------------------------------------------------------------------------- 1 | (function($){ 2 | var id = 0; 3 | 4 | function apiClk(e) { 5 | var url='', obj = $(this); 6 | e.stopPropagation(); 7 | if(obj.hasClass('opt')) 8 | url = obj.text(); 9 | if(obj.is('span')) { 10 | obj = obj.parent(); 11 | url = obj.text().split('?')[0] + url; 12 | } 13 | var objIns = obj.next().hasClass('info') ? obj.next() : obj; 14 | if($(this).is( 'li' )) { 15 | if(obj.next().hasClass('info')) 16 | obj.next().toggle(); 17 | if(obj.hasClass('noargs')) 18 | url = obj.text().split('?')[0] 19 | } 20 | if(objIns.next().hasClass('resp')) 21 | objIns.next().remove(); 22 | else { 23 | $.get(url, function( data ) { 24 | var msg = $('#blank').clone().attr('id', '#d'+id).insertAfter(objIns); 25 | msg.html(obj.hasClass('raw') ? data : "
"+JSON.stringify(data, null, 2)+"
"); 26 | id += 1; 27 | }, "json"); 28 | } 29 | } 30 | function respClose(e) { 31 | if ($(this).next().hasClass('resp')) 32 | $(this).next().remove(); 33 | $(this).click(); 34 | } 35 | $(document).ready( function() { 36 | $('.apiClk li').click(apiClk); 37 | $('.apiClk span').click(apiClk); 38 | $('#findform').submit( function( e ) { 39 | if ($('#findform').next().hasClass('resp')) 40 | $('#findform').next().remove(); 41 | $.post( '/api/auto', $(this).serialize(), function(data) { 42 | var msg = $('#blank').clone().attr('id', '#d'+id).insertAfter($('#findform')); 43 | msg.html("
"+JSON.stringify(data, null, 2)+"
"); 44 | $('#findform').click(respClose); 45 | id += 1; 46 | }, "json"); 47 | e.preventDefault(); 48 | }); 49 | }); 50 | })(jQuery); 51 | -------------------------------------------------------------------------------- /utils/bwlimit: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) 2013 The Bitcoin Core developers 3 | # Distributed under the MIT software license, see the accompanying 4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php. 5 | 6 | #network interface on which to limit traffic 7 | IF="wlan0" 8 | #limit of the network interface in question 9 | LINKCEIL="1gbit" 10 | #limit outbound Bitcoin protocol traffic to this rate 11 | LIMIT="100kbit" 12 | #defines the address space for which you wish to disable rate limiting 13 | LOCALNET="192.168.0.0/16" 14 | 15 | #delete existing rules 16 | tc qdisc del dev ${IF} root 17 | iptables -t mangle -F OUTPUT 18 | 19 | #add root class 20 | tc qdisc add dev ${IF} root handle 1: htb default 10 21 | 22 | #add parent class 23 | tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL} 24 | 25 | #add our two classes. one unlimited, another limited 26 | tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0 27 | tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1 28 | 29 | #add handles to our classes so packets marked with go into the class with "... handle fw ..." 30 | tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10 31 | tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11 32 | 33 | #delete any existing rules 34 | #disable for now 35 | #ret=0 36 | #while [ $ret -eq 0 ]; do 37 | # iptables -t mangle -D OUTPUT 1 38 | # ret=$? 39 | #done 40 | 41 | #limit outgoing traffic to and from port 8333. but not when dealing with a host on the local network 42 | # (defined by $LOCALNET) 43 | # --set-mark marks packages matching these criteria with the number "2" 44 | # these packages are filtered by the tc filter with "handle 2" 45 | # this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT} 46 | iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 8333 ! -d ${LOCALNET} -j MARK --set-mark 0x2 47 | iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 8333 ! -d ${LOCALNET} -j MARK --set-mark 0x2 48 | -------------------------------------------------------------------------------- /www/wstest.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | WebSocket Test 6 | 7 | 71 | 72 |

WebSocket Test

73 | 74 |
75 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools, os 2 | from distutils.core import setup 3 | 4 | from sqlchain.version import * 5 | 6 | try: 7 | import pypandoc 8 | readme_md = pypandoc.convert('README.md', 'rst') 9 | except (IOError, ImportError): 10 | readme_md = open('README.md').read() 11 | 12 | destdir = os.path.join('share','sqlchain') 13 | datafiles = [(os.path.join(destdir,d), [os.path.join(d,f) for f in files]) for d, folders, files in os.walk('www')] 14 | datafiles.extend([(os.path.join(destdir,d), [os.path.join(d,f) for f in files]) for d, folders, files in os.walk('etc')]) 15 | 16 | setup( 17 | name='sqlchain', 18 | packages=['sqlchain'], 19 | version=version, 20 | author='neoCogent.com', 21 | author_email='info@neocogent.com', 22 | url='https://github.com/neocogent/sqlchain', 23 | download_url='https://github.com/neocogent/sqlchain/tarball/'+version, 24 | license='MIT', 25 | classifiers=[ 26 | #'Development Status :: 3 - Alpha', 27 | 'Development Status :: 4 - Beta', 28 | #'Development Status :: 5 - Production/Stable', 29 | 'Intended Audience :: Developers', 30 | 'Intended Audience :: System Administrators', 31 | 'Operating System :: POSIX :: Linux', 32 | 'Topic :: Database :: Database Engines/Servers', 33 | 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', 34 | 'Topic :: Office/Business :: Financial', 35 | 'License :: OSI Approved :: MIT License', 36 | 'Programming Language :: Python :: 2.7' 37 | ], 38 | keywords='bitcoin sql blockchain api websocket rpc server', 39 | description='Compact SQL layer for Bitcoin blockchain.', 40 | long_description=readme_md, 41 | scripts=['sqlchaind','sqlchain-api','sqlchain-electrum','sqlchain-config'], 42 | install_requires=[ 43 | "gevent >= 1.2.0", 44 | "gevent-websocket >= 0.9.5", 45 | "python-daemon >= 1.5.5", 46 | "MySQL-python >= 1.2.5", 47 | "python-bitcoinrpc >= 0.1", 48 | "backports.functools_lru_cache >= 1.2.1", 49 | "python2-pythondialog >= 3.4.0" 50 | ], 51 | data_files=datafiles 52 | 53 | ) 54 | -------------------------------------------------------------------------------- /www/css/main.css: -------------------------------------------------------------------------------- 1 | body { 2 | padding-top: 60px; 3 | padding-bottom: 40px; 4 | } 5 | 6 | input[disabled],select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{background-color:#eeeeee;border-color:#ddd;cursor:text;} 7 | 8 | legend { 9 | margin-bottom: 0px; 10 | } 11 | 12 | .navbar-brand { 13 | font-weight:bold; 14 | color: #9ED8F7; 15 | } 16 | 17 | .navbar .beta { 18 | position: relative; 19 | float: left; 20 | left: -65px; 21 | top: 32px; 22 | font-size: 9px; 23 | font-weight: 700; 24 | color: red; 25 | } 26 | 27 | .checkbox.inline { 28 | vertical-align: top; 29 | } 30 | 31 | .tab-pane em { 32 | color: darkblue; 33 | } 34 | 35 | .tab-pane h3 { 36 | line-height: 50px; 37 | } 38 | 39 | h3 { 40 | color: darkblue; 41 | } 42 | 43 | .subsection { 44 | margin-top: 50px; 45 | } 46 | 47 | .container { width: 800px; } 48 | 49 | #overview { width:750px; text-align:justify; } 50 | legend { margin: 20px 0 10px; } 51 | #overview ul, #api ul, #extend ol { margin-top:10px; line-height:180%; } 52 | 53 | .resp { margin: 5px 40px 5px 20px; } 54 | .rawtx { width:700px; margin-left:-40px; background-color:#eeeeee; font-family: monospace; font-size:12px; } 55 | .rawtx tr td { padding:5px; border: 1px solid white; word-break: break-all; } 56 | .rawtx tr td:first-child { width:150px; text-align: right; } 57 | .opcode { color:blue; } 58 | 59 | #findbox { margin-top:20px; } 60 | #finddata { width:600px; font-size:16px; font-weight:400; } 61 | .form-horizontal .find-label { padding-top:3px; font-size:21px; font-weight:400; } 62 | 63 | .apiClk li span { color:lightsteelblue; font-size:13px; } 64 | .apiClk li .opt { margin-left:5px; position:relative; font-size:13px; color:coral; } 65 | .apiClk li:hover, .apiClk li span:hover, .apiClk li .opt:hover { cursor: pointer; color:red; } 66 | .apiClk .info { display:none; margin: 0 0 10px; padding: 9.5px;background-color: #f5f5f5; 67 | border: 1px solid #ccc; border-radius: 4px; color: #333; word-break: break-all; word-wrap: break-word; } 68 | 69 | #install ol li { margin-top: 3px; } 70 | .bash { display:block; text-align:left; font-family: monospace; margin-left:10px; position:relative; font-size:13px; color:coral; } 71 | 72 | -------------------------------------------------------------------------------- /etc/electrum.banner: -------------------------------------------------------------------------------- 1 | 2 | _,aaaaaaaaaaaaaaaaaaa,_ _,aaaaaaaaaaaaaaaaaaa,_ 3 | ,P" "Y, ,P" "Y, 4 | d' ,aaaaaaaaaaaaaaa, `b d' ,aaaaaaaaaaaaaaa, `b 5 | d' ,d" ,aaabaaaa8aaaaaaaaaa8aaaadaaa, "b, `b 6 | I I I Electrum I ,adba, I I 7 | Y, `Y, `aaaaaaaaaaaaaaaaaaaaaaaaaaaa' I I,P' ,P 8 | Y, `baaaaaaaaaaaaaaad' ,P Y, `baaaaaaaaaI Id' ,P 9 | `b, ,d' `b, I I ,d' 10 | `baaaaaaaaaaaaaaaaaaad' `baaaaaaaaaaaI Iaad' 11 | I I 12 | I I 13 | I I 14 | _,aaaaaaaaaaaaaaaaaaa,_ _,aaaaaaaaaaaI Iaa,_ 15 | ,P" "Y, ,P" I I "Y, 16 | d' ,aaaaaaaaaaaaaaa, `b d' ,aaaaaaaaaI I, `b 17 | d' ,d" ,aaabaaaa8aaaaaaaaaa8aaaadaaa, I I"b, `b 18 | I I ,adba, I sqlChain I `"YP"' I I 19 | Y, `Y,I I `aaaaaaaaaaaaaaaaaaaaaaaaaaaa' ,P' ,P 20 | Y, `bI Iaaaaaaaaad' ,P Y, `baaaaaaaaaaaaaaad' ,P 21 | `b, I I ,d' `b, ,d' 22 | `baaI Iaaaaaaaaaaad' `baaaaaaaaaaaaaaaaaaad' 23 | I I 24 | I I 25 | I I 26 | _,aaI Iaaaaaaaaaaa,_ _,aaaaaaaaaaaaaaaaaaa,_ 27 | ,P" I I "Y, ,P" "Y, 28 | d' ,I Iaaaaaaaaa, `b d' ,aaaaaaaaaaaaaaa, `b 29 | d' ,d"I I ,aaabaaaa8aaaaaaaaaa8aaaadaaa, "b, `b 30 | I I `"YP"' I I I I 31 | Y, `Y, `aaaaaaaaaaaaaaaaaaaaaaaaaaaa' ,P' ,P 32 | Y, `baaaaaaaaaaaaaaad' ,P Y, `baaaaaaaaaaaaaaad' ,P 33 | `b, ,d' `b, ,d' 34 | `baaaaaaaaaaaaaaaaaaad' `baaaaaaaaaaaaaaaaaaad' 35 | 36 | Electrum on sqlChain 1BJv3XhLHR6xSraLqoJqNBTu69266Z52gW 37 | -------------------------------------------------------------------------------- /sqlchain/rpc.py: -------------------------------------------------------------------------------- 1 | # 2 | # RPC compatible API module 3 | # 4 | import urlparse, json, decimal 5 | 6 | from bitcoinrpc.authproxy import AuthServiceProxy 7 | from sqlchain.util import gethdr, bits2diff 8 | 9 | # encode json btc values as satoshi integer 10 | class btcEncoder(json.JSONEncoder): 11 | def default(self, o): 12 | if isinstance(o, decimal.Decimal): 13 | return int(float(o)*1e8) 14 | return super(btcEncoder, self).default(o) 15 | 16 | def do_RPC(env, send_resp): 17 | _,args,cur = urlparse.parse_qs(env['QUERY_STRING']), env['PATH_INFO'].split('/')[2:], sqc.dbpool.get().cursor() 18 | send_resp('200 OK', [('Content-Type', 'application/json')]) 19 | result = [] 20 | if args[0] == "getblockcount": 21 | result = json.dumps(sqc.cfg['block']) 22 | elif args[0] == "getinfo": 23 | result = json.dumps( { 'blocks':sqc.cfg['block'], 'difficulty':bits2diff(gethdr(sqc.cfg['block'], sqc.cfg, 'bits')) } ) 24 | elif args[0] == "getdifficulty": 25 | result = json.dumps( bits2diff(gethdr(sqc.cfg['block'], sqc.cfg, 'bits')) ) 26 | else: 27 | rpc = AuthServiceProxy(sqc.cfg['rpc']) 28 | if args[0] == "getblock": 29 | result = json.dumps( rpc.getblock(args[1]), cls=btcEncoder ) 30 | elif args[0] == "getblockhash": 31 | result = json.dumps( rpc.getblockhash(int(args[1])) ) 32 | elif args[0] == "getrawtransaction": 33 | result = json.dumps( rpc.getrawtransaction(args[1], 1), cls=btcEncoder ) 34 | elif args[0] == "gettxout": 35 | result = json.dumps( rpcTxOut(cur, args[1], args[2]) ) 36 | elif args[0] == "getmempoolinfo": 37 | result = json.dumps( rpc.getmempoolinfo(), cls=btcEncoder ) 38 | elif args[0] == "getrawmempool": 39 | result = json.dumps( rpc.getrawmempool(False), cls=btcEncoder ) 40 | return result 41 | 42 | def rpcTxOut(cur, txhash, out): 43 | return 'blah' # todo find output in fmt below 44 | 45 | ''' 46 | { 47 | "bestblock" : "00000000c92356f7030b1deeab54b3b02885711320b4c48523be9daa3e0ace5d", 48 | "confirmations" : 0, 49 | "value" : 0.00100000, 50 | "scriptPubKey" : { 51 | "asm" : "OP_DUP OP_HASH160 a11418d3c144876258ba02909514d90e71ad8443 OP_EQUALVERIFY OP_CHECKSIG", 52 | "hex" : "76a914a11418d3c144876258ba02909514d90e71ad844388ac", 53 | "reqSigs" : 1, 54 | "type" : "pubkeyhash", 55 | "addresses" : [ 56 | "mvCfAJSKaoFXoJEvv8ssW7wxaqRPphQuSv" 57 | ] 58 | }, 59 | "version" : 1, 60 | "coinbase" : false 61 | } 62 | 63 | ''' 64 | -------------------------------------------------------------------------------- /utils/stripsigs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # scan trxs and remove input sigScript data 4 | # 5 | # - user at your own risk - this script may not work as expected 6 | # - can reduce blob file size by ~93% 7 | # - if sqlchaind isn't run with --no-sigs option then new blocks will have sig data 8 | # - raw trx api calls will return non-standard data because missing sig data 9 | # - this rewrites blobs.dat to blobs-nosigs.dat and then renames for sqlchaind use 10 | # - sqlchaind should be stopped while this is run, and the trxs table should be backed up 11 | # as it contains the only index to old blobs.dat if something goes wrong 12 | # 13 | import os, sys 14 | import MySQLdb as db 15 | from MySQLdb import cursors 16 | from struct import pack, unpack, unpack_from 17 | 18 | from sqlchain import util 19 | 20 | blobname = '/var/data/nosigs.dat' 21 | dbcfg = "localhost:btc:password:bitcoin" 22 | 23 | if not os.path.isfile(blobname): 24 | open(blobname, 'a').close() 25 | 26 | sqlR = db.connect(*dbcfg.split(':')) 27 | curR = sqlR.cursor(cursors.SSCursor) # we use a server side cursor so all >80 million trxs aren't buffered 28 | curR.execute("select count(*) from trxs;") 29 | rows, = curR.fetchone() 30 | 31 | sqlU = db.connect(*dbcfg.split(':')) 32 | curU = sqlU.cursor() # we use a second connection normal cursor to do the updates 33 | curU.execute("create table txtmp ( `id` decimal(13) NOT NULL,`txdata` decimal(13,0) DEFAULT NULL ) ENGINE=MyISAM;") 34 | 35 | with open(blobname, 'r+b') as blobfile: 36 | count = 0 37 | curR.execute("select id,txdata,ins,outs from trxs;") 38 | for txid,blob,ins,outs in curR: 39 | hdr = getBlobHdr(int(blob)) 40 | if ins >= 192: 41 | ins = (ins & 63)*256 + hdr[1] 42 | pos = int(blob) + hdr[0] + ins*7 43 | buf = readBlob(int(blob), pos-int(blob)) 44 | for n in range(ins): 45 | vsz,off = decodeVarInt(readBlob(pos, 9)) if not hdr[7] else (0,0) 46 | pos += off+vsz 47 | buf += readBlob(pos, (0 if hdr[6] else 4)) 48 | pos += (0 if hdr[6] else 4) 49 | if outs >= 192: 50 | outs = (outs & 63)*256 + hdr[2] 51 | for n in range(outs): 52 | vsz,off = decodeVarInt(readBlob(pos, 9)) 53 | buf += readBlob(pos, off+vsz) 54 | pos += off+vsz 55 | txdata = blobfile.tell() 56 | blobfile.write(buf) 57 | curU.execute("insert into txtmp (id,txdata) value(%s,%s);", (txid, txdata)) 58 | count += 1 59 | if count % 100000 == 0: 60 | print "%2.1f" % count*100/rows 61 | 62 | print "Blob created. Now updating table to match new blob file." 63 | 64 | # disabled for safety 65 | #curU.execute("update trxs t inner join txtmp x on t.id=x.id set t.txdata=x.txdata;") 66 | print 'Done.' 67 | 68 | os.rename('/var/data/blobs.dat', '/var/data/blobs.bak') 69 | os.rename(blobname, '/var/data/blobs.dat') 70 | print 'Renamed.' 71 | 72 | 73 | -------------------------------------------------------------------------------- /etc/dogecoin.sql: -------------------------------------------------------------------------------- 1 | -- create new database after install 2 | -- need to do these as mysql root user 3 | 4 | CREATE USER IF NOT EXISTS '{dbuser}'@'localhost'; 5 | ALTER USER '{dbuser}'@'localhost' IDENTIFIED BY '{dbpwd}'; 6 | GRANT ALL PRIVILEGES ON {dbname}.* TO '{dbuser}'@'localhost'; 7 | FLUSH PRIVILEGES; 8 | 9 | CREATE DATABASE IF NOT EXISTS {dbname}; 10 | USE {dbname}; 11 | 12 | CREATE TABLE IF NOT EXISTS `blocks` ( 13 | `id` int(11) NOT NULL, 14 | `hash` binary(32) NOT NULL, 15 | `coinbase` varbinary(100) NOT NULL, 16 | `chainwork` binary(32) NOT NULL, 17 | `blksize` int(11) NOT NULL, 18 | PRIMARY KEY (`id`) 19 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 20 | 21 | CREATE TABLE IF NOT EXISTS `address` ( 22 | `id` decimal(13) NOT NULL, 23 | `addr` binary(20) NOT NULL, 24 | PRIMARY KEY (`id`) 25 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 26 | 27 | CREATE TABLE IF NOT EXISTS `bech32` ( 28 | `id` decimal(13) NOT NULL, 29 | `addr` binary(32) NOT NULL, 30 | PRIMARY KEY (`id`) 31 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 32 | 33 | CREATE TABLE IF NOT EXISTS `trxs` ( 34 | `id` decimal(13) NOT NULL, 35 | `hash` binary(32) NOT NULL, 36 | `ins` tinyint unsigned NOT NULL, 37 | `outs` tinyint unsigned NOT NULL, 38 | `txsize` smallint unsigned NOT NULL, 39 | `txdata` decimal(13) DEFAULT NULL, 40 | `block_id` decimal(13) DEFAULT NULL, 41 | PRIMARY KEY (`id`), 42 | KEY `block` (`block_id`) 43 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 44 | 45 | CREATE TABLE IF NOT EXISTS `outputs` ( 46 | `id` decimal(16) NOT NULL, 47 | `value` decimal(18) DEFAULT NULL, 48 | `addr_id` decimal(13) DEFAULT NULL, 49 | `tx_id` decimal(13) DEFAULT NULL, 50 | PRIMARY KEY (`id`), 51 | KEY `addr` (`addr_id`) 52 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 53 | 54 | CREATE TABLE IF NOT EXISTS `mempool` ( 55 | `id` decimal(13) NOT NULL, 56 | `sync_id` int(11) DEFAULT NULL, 57 | PRIMARY KEY (`id`), 58 | KEY `sync` (`sync_id`) 59 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 60 | 61 | CREATE TABLE IF NOT EXISTS `orphans` ( 62 | `sync_id` int(11) NOT NULL, 63 | `block_id` int(11) NOT NULL, 64 | `hash` binary(32) NOT NULL, 65 | `hdr` binary(80) NOT NULL, 66 | `coinbase` varbinary(100) NOT NULL, 67 | KEY (`sync_id`) 68 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 69 | 70 | CREATE TABLE IF NOT EXISTS `blkdat` ( 71 | `id` int(11) NOT NULL, 72 | `hash` binary(32) NOT NULL, 73 | `prevhash` binary(32) NOT NULL, 74 | `filenum` int(11) NOT NULL, 75 | `filepos` int(11) NOT NULL, 76 | UNIQUE KEY `filenum` (`filenum`,`filepos`), 77 | KEY `id` (`id`), 78 | KEY `hash` (`hash`) 79 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 80 | 81 | CREATE TABLE IF NOT EXISTS `info` ( 82 | `class` varbinary(12) NOT NULL, 83 | `key` varbinary(32) NOT NULL, 84 | `value` varchar(64) DEFAULT NULL, 85 | PRIMARY KEY `class` (`class`,`key`) 86 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 87 | 88 | --dummy row so API will properly return null outputs 89 | INSERT IGNORE INTO `address` (`id`, `addr`) VALUES (0,''); 90 | -------------------------------------------------------------------------------- /etc/sqlchain.sql: -------------------------------------------------------------------------------- 1 | -- create new database after install 2 | -- need to do these as mysql root user 3 | 4 | CREATE USER IF NOT EXISTS '{dbuser}'@'localhost'; 5 | ALTER USER '{dbuser}'@'localhost' IDENTIFIED BY '{dbpwd}'; 6 | GRANT ALL PRIVILEGES ON {dbname}.* TO '{dbuser}'@'localhost'; 7 | FLUSH PRIVILEGES; 8 | 9 | CREATE DATABASE IF NOT EXISTS {dbname}; 10 | USE {dbname}; 11 | 12 | CREATE TABLE IF NOT EXISTS `blocks` ( 13 | `id` int(11) NOT NULL, 14 | `hash` binary(32) NOT NULL, 15 | `coinbase` varbinary(100) NOT NULL, 16 | `chainwork` binary(32) NOT NULL, 17 | `blksize` int(11) NOT NULL, 18 | PRIMARY KEY (`id`) 19 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 20 | 21 | CREATE TABLE IF NOT EXISTS `address` ( 22 | `id` decimal(13) NOT NULL, 23 | `addr` binary(20) NOT NULL, 24 | PRIMARY KEY (`id`) 25 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 26 | 27 | CREATE TABLE IF NOT EXISTS `bech32` ( 28 | `id` decimal(13) NOT NULL, 29 | `addr` binary(32) NOT NULL, 30 | PRIMARY KEY (`id`) 31 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 32 | 33 | CREATE TABLE IF NOT EXISTS `trxs` ( 34 | `id` decimal(13) NOT NULL, 35 | `hash` binary(32) NOT NULL, 36 | `ins` tinyint unsigned NOT NULL, 37 | `outs` tinyint unsigned NOT NULL, 38 | `txsize` smallint unsigned NOT NULL, 39 | `txdata` decimal(13) DEFAULT NULL, 40 | `block_id` decimal(11) DEFAULT NULL, 41 | PRIMARY KEY (`id`), 42 | KEY `block` (`block_id`) 43 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 44 | 45 | CREATE TABLE IF NOT EXISTS `outputs` ( 46 | `id` decimal(16) NOT NULL, 47 | `value` decimal(16) DEFAULT NULL, 48 | `addr_id` decimal(13) DEFAULT NULL, 49 | `tx_id` decimal(13) DEFAULT NULL, 50 | PRIMARY KEY (`id`), 51 | KEY `addr` (`addr_id`) 52 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 53 | 54 | CREATE TABLE IF NOT EXISTS `mempool` ( 55 | `id` decimal(13) NOT NULL, 56 | `sync_id` int(11) DEFAULT NULL, 57 | PRIMARY KEY (`id`), 58 | KEY `sync` (`sync_id`) 59 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 60 | 61 | CREATE TABLE IF NOT EXISTS `orphans` ( 62 | `sync_id` int(11) NOT NULL, 63 | `block_id` int(11) NOT NULL, 64 | `hash` binary(32) NOT NULL, 65 | `hdr` binary(80) NOT NULL, 66 | `coinbase` varbinary(100) NOT NULL, 67 | KEY (`sync_id`) 68 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 69 | 70 | CREATE TABLE IF NOT EXISTS `blkdat` ( 71 | `id` int(11) NOT NULL, 72 | `hash` binary(32) NOT NULL, 73 | `prevhash` binary(32) NOT NULL, 74 | `filenum` int(11) NOT NULL, 75 | `filepos` int(11) NOT NULL, 76 | UNIQUE KEY `filenum` (`filenum`,`filepos`), 77 | KEY `id` (`id`), 78 | KEY `hash` (`hash`) 79 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 80 | 81 | CREATE TABLE IF NOT EXISTS `info` ( 82 | `class` varbinary(12) NOT NULL, 83 | `key` varbinary(32) NOT NULL, 84 | `value` varchar(64) DEFAULT NULL, 85 | PRIMARY KEY `class` (`class`,`key`) 86 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 87 | 88 | --dummy row so API will properly return null outputs 89 | INSERT IGNORE INTO `address` (`id`, `addr`) VALUES (0,''); 90 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | ### sqlChain Test Suite 2 | 3 | ##### Recent Updates (v0.2.5) 4 | 5 | Initial test suite commit. This is just a beginning in trying to make everything more robust. I hope to expand it's completeness over time. 6 | 7 | The current test suite is based on using the [pytest](https://docs.pytest.org/en/latest/contents.html) module. The live tests also require the [deepdiff](https://github.com/seperman/deepdiff) module. 8 | 9 | There are 3 classes of test here: 10 | 11 | - tests that can run standalone on the static codebase 12 | 13 | These try to assess whether module functions behave as expected independently and should be run before release. So far they only focus on the sqlchain.util module but I hope to extend wherever it may be worthwhile. 14 | 15 | - tests that depend on a transient memory-only version of the main database 16 | 17 | These still typically test on a unit basis but require some database interaction. To run they require the mysqldb module and a running mysql server plus a valid admin level user and password. The default user/pwd is hard coded as "root:root", making it a little easier for test runs in non-secure environments. A `--dbuser` option allows setting custom values for a test system, eg. `pytest --dbuser adm:hoochiedoll`, assuming you've created this user/pwd previously for testing. 18 | 19 | - live tests that depend on a installed and working system (to at least some extent). 20 | 21 | These test actual operation and try to detect errors or discrepancies under (close to) real operating conditions. They also include some profiling functions to assess performance. So far this is focusing on sqlchain-api tests comparing sqlchain returned values to a sqlite database of reference results collected from other api-server resources. 22 | 23 | The `mklivetestdb` utility can be used to collect random data from other servers that is stored in livetest.db (a sqlite3 db file). This data is used as for comparison in tracking down abherant behaviour. Live tests are skipped by default and can be enabled with the `--runlive` option. Two other options add more flexibility: `--server` for setting the live server to connect with, and `--append` to disable the default cleaning of previous test data for each new run. eg. `--server localhost:8085/api` (the default, note it includes api path). 24 | 25 | ##### Command Summary 26 | 27 | These assume sqlchain is installed with setup.py or pip, and you are in the sqlchain root directory of a cloned repository. 28 | 29 | `pytest` (will run any test cases it finds in the tests directory, skips any that cannot run for "reasons") 30 | 31 | `pytest -rs` (run tests and show reasons why if some are skipped) 32 | 33 | `pytest --dbuser root:mysecretpwd` (run db test cases) 34 | 35 | `pytest --runlive` (run live api tests with clean test results db) 36 | 37 | `pytest --runlive --server mytestrig.info:8989/rig-api/` (run live api tests against remote server) 38 | 39 | `pytest --runlive --append` (run live api tests but keep old data) 40 | 41 | -------------------------------------------------------------------------------- /etc/reddcoin.sql: -------------------------------------------------------------------------------- 1 | -- reddcoin sql def changes: 2 | -- * increase output value amounts to 1 digits because the first few blocks 3 | -- actually have values that require it, and larger colum takes same byte count (8) 4 | -- * increase block_id to allow more than 5,000,000 blocks 5 | -- reddcoin already up to 2,000,000 with ~1 minute blocks 6 | 7 | CREATE USER IF NOT EXISTS '{dbuser}'@'localhost'; 8 | ALTER USER '{dbuser}'@'localhost' IDENTIFIED BY '{dbpwd}'; 9 | GRANT ALL PRIVILEGES ON {dbname}.* TO '{dbuser}'@'localhost'; 10 | FLUSH PRIVILEGES; 11 | 12 | CREATE DATABASE IF NOT EXISTS {dbname}; 13 | USE {dbname}; 14 | 15 | CREATE TABLE IF NOT EXISTS `blocks` ( 16 | `id` int(11) NOT NULL, 17 | `hash` binary(32) NOT NULL, 18 | `coinbase` varbinary(100) NOT NULL, 19 | `chainwork` binary(32) NOT NULL, 20 | `blksize` int(11) NOT NULL, 21 | PRIMARY KEY (`id`) 22 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 23 | 24 | CREATE TABLE IF NOT EXISTS `address` ( 25 | `id` decimal(13) NOT NULL, 26 | `addr` binary(20) NOT NULL, 27 | PRIMARY KEY (`id`) 28 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 29 | 30 | CREATE TABLE IF NOT EXISTS `bech32` ( 31 | `id` decimal(13) NOT NULL, 32 | `addr` binary(32) NOT NULL, 33 | PRIMARY KEY (`id`) 34 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 35 | 36 | CREATE TABLE IF NOT EXISTS `trxs` ( 37 | `id` decimal(13) NOT NULL, 38 | `hash` binary(32) NOT NULL, 39 | `ins` tinyint unsigned NOT NULL, 40 | `outs` tinyint unsigned NOT NULL, 41 | `txsize` smallint unsigned NOT NULL, 42 | `txdata` decimal(13) DEFAULT NULL, 43 | `block_id` decimal(13) DEFAULT NULL, 44 | PRIMARY KEY (`id`), 45 | KEY `block` (`block_id`) 46 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 47 | 48 | CREATE TABLE IF NOT EXISTS `outputs` ( 49 | `id` decimal(16) NOT NULL, 50 | `value` decimal(18) DEFAULT NULL, 51 | `addr_id` decimal(13) DEFAULT NULL, 52 | `tx_id` decimal(13) DEFAULT NULL, 53 | PRIMARY KEY (`id`), 54 | KEY `addr` (`addr_id`) 55 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 56 | 57 | CREATE TABLE IF NOT EXISTS `mempool` ( 58 | `id` decimal(13) NOT NULL, 59 | `sync_id` int(11) DEFAULT NULL, 60 | PRIMARY KEY (`id`), 61 | KEY `sync` (`sync_id`) 62 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 63 | 64 | CREATE TABLE IF NOT EXISTS `orphans` ( 65 | `sync_id` int(11) NOT NULL, 66 | `block_id` int(11) NOT NULL, 67 | `hash` binary(32) NOT NULL, 68 | `hdr` binary(80) NOT NULL, 69 | `coinbase` varbinary(100) NOT NULL, 70 | KEY (`sync_id`) 71 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 72 | 73 | CREATE TABLE IF NOT EXISTS `blkdat` ( 74 | `id` int(11) NOT NULL, 75 | `hash` binary(32) NOT NULL, 76 | `prevhash` binary(32) NOT NULL, 77 | `filenum` int(11) NOT NULL, 78 | `filepos` int(11) NOT NULL, 79 | UNIQUE KEY `filenum` (`filenum`,`filepos`), 80 | KEY `id` (`id`), 81 | KEY `hash` (`hash`) 82 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 83 | 84 | CREATE TABLE IF NOT EXISTS `info` ( 85 | `class` varbinary(12) NOT NULL, 86 | `key` varbinary(32) NOT NULL, 87 | `value` varchar(64) DEFAULT NULL, 88 | PRIMARY KEY `class` (`class`,`key`) 89 | ) ENGINE={dbeng} DEFAULT CHARSET=latin1; 90 | 91 | --dummy row so API will properly return null outputs 92 | INSERT IGNORE INTO `address` (`id`, `addr`) VALUES (0,''); 93 | -------------------------------------------------------------------------------- /sqlchain/overlay/reddcoin.py: -------------------------------------------------------------------------------- 1 | # 2 | # Override Block and Tx decoding for Reddcoin (Proof of Stake) 3 | # 4 | # Changes as per reddcoin source core.h 5 | # 6 | # CTransaction - if version > POW_TX_VERSION then unsigned int nTime follows nLockTime 7 | # CBlock - if version > POW_BLOCK_VERSION then BlockSig string follows tx array 8 | # Transactions can be CoinStake, then Block gets marked as PoSV 9 | # 10 | 11 | import hashlib 12 | 13 | from struct import unpack, unpack_from 14 | from sqlchain.util import decodeVarInt, decodeScriptPK 15 | 16 | POW_BLOCK_VERSION = 2 17 | POW_TX_VERSION = 1 18 | 19 | # raw data decoding stuff 20 | def decodeBlock(data): 21 | hdr = ['version','previousblockhash','merkleroot', 'time', 'bits', 'nonce'] 22 | hv = unpack_from(' 0: 31 | tx = decodeTx(data[off:]) 32 | block['tx'].append(tx) 33 | off += tx['size'] 34 | txcnt -= 1 35 | if block['version'] > POW_BLOCK_VERSION: 36 | block['blocksig'] = '' 37 | block['height'] = 0 38 | block['coinbase'] = block['tx'][0]['vin'][0]['coinbase'] 39 | block['coinstake'] = txcnt > 1 and 'coinstake' in block['tx'][0] # mark as posv when first tx is CoinStake 40 | if block['version'] > 1 and block['height'] >= 227836 and block['coinbase'][0] == '\x03': 41 | block['height'] = unpack(' 0: 51 | txid,vout = unpack_from('<32sI', data, off) 52 | sigsz,soff = decodeVarInt(data[off+36:off+36+9]) 53 | off += soff+36 54 | seq, = unpack_from(' 1 and vicnt > 0 and emptyTXO(tx['vout'][0]) and not 'coinbase' in tx['vin'][0]: # mark as coinstake 72 | tx['coinstake'] = True 73 | tx['locktime'], = unpack_from(' POW_TX_VERSION: 75 | tx['time'], = unpack_from(' " % sys.argv[0] 74 | sys.exit(0) 75 | 76 | democvt(sys.argv[1], sys.argv[2], sys.argv[3]) 77 | -------------------------------------------------------------------------------- /tests/test_live_api.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # live api test - unit test module 4 | # - supports multiple coins by selecting the db based on cointype 5 | # - read api urls from livetest db and call live api 6 | # - compare json returned to livetest db json 7 | # 8 | 9 | import sys, time, requests, json 10 | 11 | try: 12 | from deepdiff import DeepDiff 13 | except ImportError: 14 | print "Cannot run database tests without deepdiff module" 15 | 16 | try: 17 | import sqlite3 as db 18 | except ImportError: 19 | print "Cannot run database tests without sqlite3 module" 20 | 21 | import pytest 22 | 23 | millis = lambda: int(round(time.time() * 1000)) 24 | server = None 25 | 26 | live = pytest.mark.skipif(not pytest.config.getoption("--runlive"), reason = "need --runlive option to run") 27 | nosigs = pytest.mark.skipif(pytest.config.getoption("--nosigs"), reason = "cannot test with nosigs db") 28 | 29 | # livetest db created by mklivetestdb.py 30 | @pytest.fixture(scope="module") 31 | def testdb(request): 32 | global server 33 | if 'sqlite3' not in sys.modules: 34 | pytest.skip("sqlite3 module not available.") 35 | return None 36 | server = request.config.getoption("--server") 37 | coin = request.config.getoption("--coin") 38 | cwd = str(request.fspath.join('..')) 39 | sql = db.connect(cwd+'/livetest.%s.db' % coin,isolation_level=None) 40 | cur = sql.cursor() 41 | cur.execute("select name from sqlite_master where name='calls';") 42 | if cur.fetchone() is None: 43 | pytest.skip("livetest.%s.db not initialized." % coin) 44 | return None 45 | if not request.config.getoption("--append"): 46 | cur.execute("delete from tests;") 47 | return cur 48 | 49 | def api_call(url): 50 | try: 51 | call_ts = millis() 52 | r = requests.get('http://'+server+url) 53 | if r.status_code == requests.codes.ok: 54 | return r.json(),millis()-call_ts 55 | except requests.exceptions.ConnectionError: 56 | pytest.skip("requires api connection to run") 57 | return { "error": "Connect Error: http://"+server+url },0 58 | return { "error": r.status_code },0 59 | 60 | def api_diff(cur, sqlstr, **kwargs): 61 | log,diff = [],{} 62 | cur.execute("select url,result from calls where %s;" % sqlstr) 63 | for url,result in cur: 64 | rtn,rtt = api_call(url) 65 | if 'error' in rtn: 66 | return rtn 67 | diff = DeepDiff(json.loads(result), rtn, ignore_order=True, **kwargs) 68 | log.append((url,str(rtn),str(diff),rtt)) 69 | if diff != {}: 70 | break 71 | cur.executemany("insert into tests (url,result,diff,rtt) values (?,?,?,?);", log ) 72 | return diff 73 | 74 | @live 75 | def test_live_api_block(testdb): 76 | assert api_diff(testdb, "url like '/block/%'", exclude_paths={"root['confirmations']"}) == {} 77 | 78 | @live 79 | def test_live_api_block_index(testdb): 80 | assert api_diff(testdb, "url like '/block-index/%'") == {} 81 | 82 | @live 83 | @nosigs 84 | def test_live_api_rawblock(testdb): # not currently supported 85 | assert True 86 | 87 | @live 88 | def test_live_api_blocks(testdb): # not currently supported 89 | assert True 90 | 91 | @live 92 | def test_live_api_tx(testdb): 93 | assert api_diff(testdb, "url like '/tx/%'") == {} 94 | 95 | @live 96 | @nosigs 97 | def test_live_api_rawtx(testdb): 98 | assert api_diff(testdb, "url like '/rawtx/%'") == {} 99 | 100 | @live 101 | def test_live_api_addr(testdb): 102 | assert api_diff(testdb, "url like '/addr/%'") == {} 103 | 104 | @live 105 | def test_live_api_utxo(testdb): 106 | assert api_diff(testdb, "url like '/addr/%/utxo'") == {} 107 | 108 | @live 109 | def test_live_api_txs_block(testdb): 110 | assert api_diff(testdb, "url like '/txs/?block=%'") == {} 111 | 112 | @live 113 | def test_live_api_txs_addr(testdb): 114 | assert api_diff(testdb, "url like '/txs/?address=%'") == {} 115 | 116 | @live 117 | def test_live_api_addrs(testdb): 118 | assert api_diff(testdb, "url like '/addrs/%/utxo'") == {} 119 | 120 | @live 121 | def test_live_api_status(testdb): 122 | assert True 123 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### sqlChain - The Blockchain as a SQL Layer 2 | 3 | **sqlChain** is a *compact* SQL layer that runs on top of bitcoind (and some altcoins). It extends the query options on the blockchain with a priority placed on low storage overhead. It provides multiple API (compatible) interfaces: 4 | 5 | - Insight API (plus some extensions, like /api/closure) 6 | - Blockchain.info API (including websocket) 7 | - RPC via POST, GET urls 8 | - Web Interface (demo of integrating with API backend; only hints at what you can do) 9 | - Electrum Server (old, still untested, needs TLC) 10 | 11 | **sqlChain** currently consists of three daemon programs. 12 | 13 | - **sqlchaind** - monitors bitcoind and updates a mysql database 14 | - **sqlchain-api** - provides multiple API interfaces over the mysql database 15 | - **sqlchain-electrum** - runs a private electrum server over the sqlchain-api layer 16 | 17 | #### Recent Updates 18 | 19 | (v0.3.0) (In progress) Migration to Python3. 20 | 21 | Also now added a [Roadmap Project](https://github.com/neocogent/sqlchain/projects/1) to Github. So if you are interested in the future of this project and a renewed committment to taking it into the next decade then have a look. 22 | 23 | (v0.2.9) 24 | 25 | Made API modules config selectable or Insight compatible if none set. Now a supplied API can be customized and integrated by editing the config file. This makes it easier to build web apps that need specific API calls. sqlchain-config adds all 5 supplied APIs by default. If upgrading you may need to edit the config file to add ones you use. 26 | 27 | New config option called max_blks that limits how many blocks are kept in SQL db. This is useful when you primarily want to process realtime or not very old data. Used with a pruning node it indexes address/tx data back as far as max_blks, which is something you cannot do on a pruned node normally as txindex cannot be used. It also means you can start from a given block (set block in config) and not have to sync SQL db from Genesis; this allows a quick start up in some cases. Also fixed bugs in sync api. 28 | 29 | (v0.2.5) 30 | 31 | Database sync code updated for SQL transactional engines. Tested with MariaDB using the [RocksDB](https://en.wikipedia.org/wiki/MyRocks) engine. This engine has some nice features but the main ones of interest here are storage size reduction and indexing (instead of Btree) more suited to high entropy keys (tx,address ids). In my tests RocksDB was not much faster initially but didn't drop in speed so much as DB size grows. It's a bit early to fully recommend but initially it looks like a nice option. I'll update the install guide with RocksDB steps (soonish). 32 | 33 | Added **bech32** address support (p2wpkh and p2sh). This requires a database upgrade and along with other changes the best option is to re-sync the blockchain. sqlchain will stop if it detects an old db and if re-sync is not possible then reverting to pre v0.2.5 is best. 34 | 35 | Now supports multiple blockchains and testnet variants. Currently Litecoin, Dogecoin and Reddcoin have been added as test cases (with demo pages) and I hope to add a few more before long. Each coin requires it's own daemon process but sqlchain-config (sqlchain-init replacement) now takes advantage of systemd "instances" so that several can coexist. This means the systemctl commands are now like `systemctl start sqlchain@bitcoin`, and similarly for other coins. There is only one sqlchain@.service and it creates variant instances for each coin described by it's cfg. 36 | 37 | Upstart (Ubuntu 14.04) support has been removed - it probably works fine but the setup process now only automates Ubuntu 16.04 (systemd) and newer platforms. 38 | 39 | As part of new altcoin support there is now an "overlay" feature where custom modules can be loaded based on cointype or for extending an api. SQL schema can be overridden likewise based on cointype or db name. If you have a custom schema you can have it initialized by sqlchain-config simply by using a matching custom db name. Both these options allow customizing and extending code while easing the burden of merging updates. 40 | 41 | New unit tests have been added, see the README in the tests directory. Many bugs fixed and api behaviour improved as a result. 42 | 43 | See re-organized **docs** directory for more detailed info on adding new altcoins and running with alternative database engines. 44 | 45 | #### Supported Features (with more tested history) 46 | 47 | - testnet and segwit - decodes and stores witness data but not much of an segwit api yet 48 | - pruning - since block data is parsing into mysql you can remove old blocks and save ~50% disk space with no loss in api 49 | - no-sig option - can drop signature and witness data to further reduce disk space for uses not requiring proofs and raw tx data 50 | - external blobs - most signature, witness and script data is offloaded to blobs exteral to mysql, giving finer control (losing indexibility) 51 | - split blobs, s3 ready - blobs are split in 5GB chunks, allows mapping older tx data to cheaper offline storage like Amazon s3 52 | - blkdat mode - direct reading of bitcoind block files allows much faster initial sync as sqlchain can read while bitcoind is syncing 53 | - blkbtc - utility to block on/off node network traffic to allow more cpu for sqlchain to catch up, or limit disk used by syncing 54 | - sqlchain-config - dialog based utility to ease setup and generate optimal config files 55 | 56 | sqlchain is still *Beta* software, under sporadic active development. 57 | 58 | sqlchain-electrum has not received much love over the last 2 years but I do plan to get it caught up and functioning again. 59 | 60 | #### Try It Out 61 | 62 | You can try it on Testnet and it doesn't take much time or resources. Even a 1vCPU (1.5 cents/hour) [Vultr](http://www.vultr.com/?ref=7087266) instance can run it quite well. You can snapshot the instance and only run as needed. On this VPS Testnet sync'd in 45 minutes and used ~ 12 GB. It takes ~1.5 days to sync mysql data to block 1156000. The first block with segwit txs seems to be 834624. 63 | 64 | #### TODO 65 | 66 | See the new [Roadmap Project](https://github.com/neocogent/sqlchain/projects/1). 67 | 68 | [Donations](https://www.neocogent.com/pages/btc-donation.html) supporting further development are welcome. 69 | 70 | 71 | -------------------------------------------------------------------------------- /sqlchain/blkdat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Scan blockchain blk*.dat files and build index table. 4 | # With the index, blocks can be read directly for processing. 5 | # 6 | # If you have a fully sync'd blockchain then this isn't useful as bitcoind 7 | # responds fast enough for sqlchaind with rpc calls. This is useful if you 8 | # want to sync sqlchain while bitcoind is still syncing (to save time on 9 | # slow systems) and bitcoind is basically unresponsive via rpc. 10 | # 11 | # Can run standalone to build blkdat table, or be called by sqlchaind 12 | # with less verbose logging info. 13 | # 14 | # Trails the main chain tip by 60 blocks to avoid reorg problems 15 | # 16 | import os 17 | 18 | from struct import unpack 19 | from time import sleep 20 | from hashlib import sha256 21 | from warnings import filterwarnings 22 | import MySQLdb as db 23 | 24 | from sqlchain.version import coincfg, BLKDAT_MAGIC 25 | from sqlchain.util import log 26 | 27 | todo = {} 28 | lastpos = (0,0) 29 | filterwarnings('ignore', category = db.Warning) # pylint:disable=no-member 30 | 31 | sqlmk=''' 32 | CREATE TABLE `blkdat` ( 33 | `id` int(11) NOT NULL, 34 | `hash` binary(32) NOT NULL, 35 | `prevhash` binary(32) NOT NULL, 36 | `filenum` int(11) NOT NULL, 37 | `filepos` int(11) NOT NULL, 38 | UNIQUE KEY `filenum` (`filenum`,`filepos`), 39 | KEY `id` (`id`), 40 | KEY `hash` (`hash`) 41 | ) ENGINE=MyISAM DEFAULT CHARSET=latin1;''' 42 | 43 | def BlkDatHandler(verbose = False): 44 | cur = initdb() 45 | blockpath = sqc.cfg['blkdat'] + "/blocks/blk%05d.dat" 46 | while not sqc.done.isSet(): 47 | blkhash = findBlocks(cur, blockpath, verbose) 48 | if blkhash: 49 | blk,blkhash = getBlkRPC(blkhash) 50 | if blk: 51 | log("Blkdat %d - %s" % (blk,blkhash[::-1].encode('hex')) ) 52 | linkMainChain(cur, blk, blkhash, verbose) 53 | 54 | def findBlocks(cur, blockpath, verbose): 55 | global lastpos # pylint:disable=global-statement 56 | filenum,pos = lastpos 57 | startpos = pos 58 | blkhash = None 59 | if filenum > 0: 60 | while not os.path.exists(blockpath % (filenum+2,)): # we trail by 2 blks file otherwise not reliable 61 | for _ in range(12): 62 | sleep(5) 63 | if sqc.done.isSet(): 64 | return None 65 | cur.execute("select 1;") # keepalive during long waits 66 | try: 67 | with open(blockpath % filenum, "rb") as fd: 68 | while not sqc.done.isSet(): 69 | fd.seek(pos) 70 | buf = fd.read(8) 71 | if len(buf) < 8: 72 | break 73 | magic,blksize = unpack(' 1e6: # skip large end gaps 76 | break 77 | pos += 1 78 | continue 79 | buf = fd.read(80) 80 | blkhash = sha256(sha256(buf).digest()).digest() 81 | prevhash = buf[4:36] 82 | if verbose: 83 | log("%05d:%d %s %s" % (filenum, pos, blkhash[::-1].encode('hex')[:32], prevhash[::-1].encode('hex')[:32]) ) 84 | cur.execute("insert ignore into blkdat (id,hash,prevhash,filenum,filepos) values(-1,%s,%s,%s,%s);", (blkhash,prevhash,filenum,pos)) 85 | pos += blksize 86 | startpos = pos 87 | lastpos = filenum+1,0 88 | return blkhash 89 | except IOError: 90 | print "No file:", blockpath % filenum 91 | lastpos = filenum,pos 92 | sqc.done.set() 93 | return None 94 | 95 | def linkMainChain(cur, highblk, blkhash, verbose): 96 | global todo # pylint:disable=global-statement 97 | todo[highblk] = blkhash 98 | if verbose: 99 | print "TODO", [ (blk,todo[blk][::-1].encode('hex')) for blk in todo ] 100 | tmp = {} 101 | for blk in todo: 102 | blkhash = todo[blk] 103 | while not sqc.done.isSet(): 104 | if verbose: 105 | log("%d - %s" % (blk, blkhash[::-1].encode('hex')) ) 106 | cur.execute("select id from blkdat where id=%s and hash=%s limit 1;", (blk, blkhash)) 107 | row = cur.fetchone() 108 | if row: 109 | break 110 | cur.execute("update blkdat set id=%s where hash=%s;", (blk, blkhash)) 111 | if cur.rowcount < 1: 112 | log("Blkdat hash miss for %d, requeued" % blk) 113 | tmp[blk] = blkhash 114 | break 115 | cur.execute("select prevhash from blkdat where id=%s limit 1;", (blk,)) 116 | row = cur.fetchone() 117 | if row: 118 | blkhash = row[0] 119 | blk -= 1 120 | if blk < 0: 121 | break 122 | todo = tmp 123 | 124 | def getBlkRPC(blkhash): 125 | blk = sqc.rpc.getblock(blkhash[::-1].encode('hex')) 126 | if blk is None: 127 | return 0,'' 128 | blkhash = sqc.rpc.getblockhash(blk['height']-120) # offset to avoid reorg, order problems 129 | return ( blk['height']-120,blkhash.decode('hex')[::-1] ) 130 | 131 | def initdb(): 132 | global todo,lastpos # pylint:disable=global-statement 133 | sql = db.connect(*sqc.cfg['db'].split(':')) 134 | sql.autocommit(True) 135 | cur = sql.cursor() 136 | cur.execute("show tables like 'blkdat';") 137 | if cur.rowcount == 0: 138 | cur.execute(sqlmk) # create table if not existing 139 | 140 | #queries separated for ubuntu 16 compatibility. 141 | cur.execute("select max(filenum) from blkdat;") # find any previous position 142 | maxFileNum = cur.fetchone()[0] 143 | cur.execute("select max(filepos) from blkdat where filenum=%s;", (maxFileNum,) ) 144 | maxFilePos = cur.fetchone()[0] 145 | row = (maxFileNum, maxFilePos) 146 | 147 | if row != (None,None): 148 | lastpos = row 149 | cur.execute("""select (select min(t3.id)-1 from blkdat t3 where t3.id > t1.id) as blk, 150 | (select prevhash from blkdat t4 where t4.id=blk+1) as blkhash from blkdat t1 where not exists 151 | (select t2.id from blkdat t2 where t2.id = t1.id + 1) having blk is not null;""") # scan for id gaps, set todo 152 | for (blk,blkhash) in cur: 153 | todo[blk] = blkhash 154 | return cur 155 | -------------------------------------------------------------------------------- /sqlchain/dbpool.py: -------------------------------------------------------------------------------- 1 | """ 2 | The MIT License 3 | 4 | Copyright (C) 2012 Gordon Chan 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software 7 | and associated documentation files (the "Software"), to deal in the Software without restriction, 8 | including without limitation the rights to use, copy, modify, merge, publish, distribute, 9 | sublicense, and/or sell copies of the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING 16 | BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 18 | DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 | """ 21 | # Extended from gevent.db by neocogent 22 | # Original: https://github.com/gordonc/gevent-db/blob/master/db.py 23 | # 24 | # Modified for use with MySQLdb 25 | # - allowing it to take a connection list arg as well as string 26 | # - adding a cursor __iter__ to allow iteration like MySQLdb supports. 27 | # - adding __enter__ and __exit_ to support using "with" context manager 28 | # - adding executemany on cursor 29 | # - original license copied into file to avoid confusion 30 | # 31 | import gevent.socket 32 | from gevent import queue 33 | 34 | # avoid socket monkey patching 35 | import imp 36 | fp, pathname, description = imp.find_module('socket') 37 | try: 38 | socket_ = imp.load_module('socket_', fp, pathname, description) 39 | finally: 40 | if fp: 41 | fp.close() 42 | 43 | import threading 44 | import logging 45 | 46 | KEEPALIVE_PERIOD = 1800 47 | 48 | class DBPool(): 49 | def __init__(self,connectionstring,poolsize,modulename='pyodbc'): 50 | self.conns = [DBConnection_(socket_.socketpair()) for x in xrange(poolsize)] 51 | self.threads = [threading.Thread(target=self.worker, args=(self.conns[x],)) for x in xrange(poolsize)] 52 | self.queue = queue.Queue(poolsize) 53 | for i in xrange(poolsize): 54 | self.threads[i].daemon = True 55 | self.threads[i].start() 56 | self.conns[i].connect(connectionstring,modulename) 57 | self.queue.put(self.conns[i]) 58 | if KEEPALIVE_PERIOD > 0: 59 | self.monitor = gevent.spawn(self.keepalive) 60 | 61 | def keepalive(self): 62 | while True: 63 | for n in range(len(self.conns)): 64 | self.get().cursor().execute("select 1;") 65 | gevent.sleep(KEEPALIVE_PERIOD) 66 | 67 | def worker(self,conn): 68 | while True: 69 | conn.pipe[1].recv(1) 70 | try: 71 | function = conn.state.function 72 | args = conn.state.args 73 | conn.state.ret = function(*args) 74 | conn.state.status = 0 75 | except Exception as inst: 76 | conn.state.error = inst 77 | conn.state.status = -1 78 | finally: 79 | conn.pipe[1].send('\0') 80 | 81 | def get(self, commit=True): 82 | c = self.queue.get() 83 | if callable(c.conn.autocommit): 84 | c.conn.autocommit(commit) 85 | else: 86 | c.conn.autocommit = commit 87 | return DBConnection(self,c) 88 | 89 | class DBConnection_(): 90 | class State(): 91 | pass 92 | 93 | def __init__(self,pipe): 94 | self.pipe = pipe 95 | self.state = self.State() 96 | 97 | def connect(self,connectionstring,modulename): 98 | self.conn = self.apply(__import__(modulename).connect,*(connectionstring,) if isinstance(connectionstring, basestring) else connectionstring) 99 | 100 | def __del__(): 101 | self.conn.close() 102 | 103 | def apply(self,function,*args): 104 | logging.info(args) 105 | 106 | self.state.function = function 107 | self.state.args = args 108 | gevent.socket.wait_write(self.pipe[0].fileno()) 109 | self.pipe[0].send('\0') 110 | gevent.socket.wait_read(self.pipe[0].fileno()) 111 | self.pipe[0].recv(1) 112 | if self.state.status != 0: 113 | raise self.state.error 114 | return self.state.ret 115 | 116 | class DBConnection(): 117 | def __init__(self,pool,conn_): 118 | self.pool = pool 119 | self.conn_ = conn_ 120 | 121 | def apply(self,function,*args): 122 | return self.conn_.apply(function,*args) 123 | 124 | def __del__(self): 125 | self.pool.queue.put(self.conn_) 126 | 127 | def cursor(self): 128 | return DBCursor(self,self.conn_.apply(self.conn_.conn.cursor)) 129 | 130 | class DBCursor(): 131 | def __init__(self,conn,cursor): 132 | self.conn = conn 133 | self.cursor = cursor 134 | 135 | def __enter__(self): 136 | return self.cursor 137 | 138 | def __exit__(self, type, value, traceback): 139 | pass 140 | 141 | def __iter__(self,*args): 142 | return self.conn.apply(self.cursor.__iter__,*args) 143 | 144 | def execute(self,*args): 145 | return self.conn.apply(self.cursor.execute,*args) 146 | 147 | def executemany(self,*args): 148 | return self.conn.apply(self.cursor.executemany,*args) 149 | 150 | def fetchone(self,*args): 151 | return self.conn.apply(self.cursor.fetchone,*args) 152 | 153 | def fetchall(self,*args): 154 | return self.conn.apply(self.cursor.fetchall,*args) 155 | 156 | def fetchmany(self,*args): 157 | return self.conn.apply(self.cursor.fetchmany,*args) 158 | 159 | @property 160 | def description(self): 161 | return self.cursor.description 162 | 163 | import unittest 164 | import time 165 | 166 | class TestDBPool(unittest.TestCase): 167 | def percentile(self,timings,percent): 168 | idx = int((len(timings)-1) * percent) 169 | return timings[idx] 170 | 171 | def test_benchmark(self): 172 | requests = 1000 173 | concurrency = 10 174 | sql = 'SELECT 1' 175 | 176 | timings = [] 177 | def timer(pool,sql): 178 | conn = pool.get() 179 | t0 = time.time() 180 | cursor = conn.cursor() 181 | cursor.execute(sql) 182 | timings.append(time.time()-t0) 183 | 184 | pool = DBPool(':memory:',concurrency,'sqlite3') 185 | 186 | greenlets = [] 187 | for i in xrange(requests): 188 | greenlets.append(gevent.spawn(timer,pool,sql)) 189 | 190 | for g in greenlets: 191 | g.join() 192 | 193 | print '66%% %f' % self.percentile(timings,0.66) 194 | print '90%% %f' % self.percentile(timings,0.90) 195 | print '99%% %f' % self.percentile(timings,0.99) 196 | print '99.9%% %f' % self.percentile(timings,0.999) 197 | print '100%% %f' % self.percentile(timings,1.00) 198 | 199 | if __name__ == '__main__': 200 | unittest.main() 201 | -------------------------------------------------------------------------------- /utils/sqlchain-upgrade-db: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # sqlchain - upgrade database to support bech32 table and use revised addr id encoding 4 | # 5 | # This program should be run detached from console (as it is long running) and with output piped to a log file. 6 | # eg. sudo sqlchain-upgrade-db /etc/bitcoin/sqlchaind.cfg &>>upgrade.log & disown 7 | # 8 | # Old: addr_id columns had P2SH encoded as odd values, regular addresses as even. 9 | # New: addr_id columns has P2SH encoded with high bit 41, and high bit 42 for bech32. 10 | # This may also result in better division of address indices since p2sh are not interleaved with p2pkh. 11 | # 12 | # Max addr id values and future high bit flags: 13 | # currently addr ids are decimal(13) which takes 6 bytes and has a maximum value of 9,999,999,999,999 = 0x9184E729FFF 14 | # only 5 bytes are used from the hashed address so bits above that can be flags, 0x9xxxxxxxxxx allows 3 high bits 15 | # these are now assigned as: [ 0, bech32, p2sh-p2wsh ] 16 | # Max txs/block is doubled to 20,000: 17 | # block.id and trxs.block_id are both decimal(11) which means a max of 99,999,999,999/20,000 or 4,999,999 blocks 18 | # Max txo/tx is increased from 4096 (12 bits) to 16384 (16 bits): 19 | # output ids are decimal(16) and tx ids are decimal(13). Actual tx_ids are a 5 byte truncation of the txhash, shifted 3 bits right. 20 | # Max tx_id (37 bits) 137,438,953,500 giving a max output id 2,251,799,814,000,000 - still within decimal(16) and 7 byte blob id 21 | # 22 | # Long bech32 hashes (p2wsh) are stored in a new table (bech32) but the short ones (p2wpkh) are in the existing address table. 23 | # 24 | # This program checks if the bech32 table exists as indicator of past table upgrade which repeating would corrupt. 25 | # 26 | # Overview: 27 | # fix address table ids - 2 pass 28 | # update addr_ids in output table to match - 2 pass 29 | # update block_ids for new limit 30 | # update outputs id for new limit 31 | # add new bech32 table 32 | # scan blocks since 481,824 (testnet 834,624)(first segwit) and insert any missing addresses, update outputs 33 | # (fixes missing bech32 and also pkh detection bug in v0.2.2) 34 | # 35 | 36 | import os, sys 37 | import MySQLdb as db 38 | 39 | from sqlchain.util import dotdict, loadcfg, savecfg, getBlobHdr, decodeVarInt, readBlob, insertAddress, decodeScriptPK, log 40 | from sqlchain.version import MAX_IO_TX, MAX_TX_BLK 41 | 42 | __builtins__.sqc = dotdict() # container for super globals 43 | 44 | if len(sys.argv) < 2: 45 | print "Usage: %s \n" % sys.argv[0] 46 | print "Suggest backing up db files first if space available, and then run detached like this:" 47 | print "\tsudo sqlchain-upgrade-db /etc/bitcoin/sqlchaind.cfg &>>upgrade.log & disown\n" 48 | sys.exit(0) 49 | 50 | sqc.cfg = {} 51 | loadcfg(sqc.cfg) 52 | sqc.cfg['cointype'] = 'testnet' if sqc.cfg['testnet'] else 'bitcoin' 53 | 54 | print "\nUpdating cfg for cointype: %s" % sqc.cfg['cointype'] 55 | savecfg(sqc.cfg) 56 | 57 | sql = db.connect(*sqc.cfg['db'].split(':')) 58 | cur = sql.cursor() 59 | sql2 = db.connect(*sqc.cfg['db'].split(':')) 60 | cur2 = sql2.cursor(db.cursors.SSCursor) 61 | 62 | cur.execute("show tables like 'bech32';") # test if old db version and skip upgrade steps if exists 63 | if cur.rowcount == 0: 64 | log("\nDo not interrupt these steps or ids will be corrupted.\n") 65 | 66 | log("Updating address table - id, step 1/8. ") 67 | cur.execute("update address set id = if(id%2=0, (id div 2)|0x40000000000, (id div 2)|0x50000000000);") 68 | 69 | log("Updating address table - mask, step 2/8.") 70 | cur.execute("update address set id = (id & 0x1FFFFFFFFFF);") 71 | 72 | log("Updating outputs table - addr_id, step 3/8.") 73 | cur.execute("update outputs set addr_id = if(addr_id%2=0, (addr_id div 2)|0x40000000000, (addr_id div 2)|0x50000000000);") 74 | 75 | log("Updating outputs table - mask, step 4/8.") 76 | cur.execute("update outputs set addr_id = (addr_id & 0x1FFFFFFFFFF);") 77 | 78 | log("Updating trxs table - expand txs/block, step 5/8.") 79 | cur.execute("update trxs set block_id = (block_id div 10000*{0} + block_id%10000) where 1 order by block_id desc;".format(MAX_TX_BLK)) 80 | 81 | log("Updating outputs table - expand outputs/tx, step 6/8.") 82 | cur.execute("update outputs set id = (id div 4096*{0} + id%4096)|0x10000000000000;".format(MAX_IO_TX)) 83 | 84 | log("Updating outputs table - mask, step 7/8.") 85 | cur.execute("update outputs set id = id&0xFFFFFFFFFFFFF;") 86 | 87 | log("Adding new cols, and bech32 table, step 8/8.") 88 | cur.execute("ALTER TABLE blocks add `chainwork` binary(32), `blksize` int(11);") 89 | cur.execute("CREATE TABLE IF NOT EXISTS `bech32` (`id` decimal(13) NOT NULL, `addr` binary(32) NOT NULL, PRIMARY KEY (`id`)) ENGINE=MyISAM DEFAULT CHARSET=latin1;") 90 | else: 91 | log("Database already upgraded. Skipping to fix up - restart.") 92 | 93 | count,first_block = 1,834624 if sqc.cfg['testnet'] else 481824 # first segwit blocks 94 | cur.execute("select count(*) from blocks;") 95 | blks = cur.fetchone()[0] 96 | chunksz = (blks-first_block)/100 97 | 98 | try: 99 | with open('upgrade.state', 'r') as f: 100 | count,first_block,chunksz = [int(x) for x in next(f).split()] 101 | except IOError: 102 | pass 103 | 104 | log("\nUnknown outputs (like bech32) were stored as zero id.") 105 | log("Scanning zero ids to fix up with new address ids.") 106 | log("\nThis (lengthy) process can be killed/restarted without data corruption.") 107 | log("\nStarting at %d, %d chunks of %d blocks:" % (first_block,101-count,chunksz)) 108 | while first_block <= blks: 109 | fixlist = [] 110 | last_block = first_block + chunksz 111 | cur2.execute("select o.id,t.txdata,t.ins,t.outs from trxs t left join outputs o on t.id=(o.id div %s) where o.addr_id=0 and t.block_id >= %s and t.block_id < %s;", (MAX_IO_TX, first_block*MAX_TX_BLK, last_block*MAX_TX_BLK)) 112 | for oid,txdata,ins,outs in cur2: 113 | tx_n = oid % MAX_IO_TX 114 | hdr = getBlobHdr(txdata, sqc.cfg['path']) 115 | if ins >= 0xC0: 116 | ins = ((ins&0x3F)<<8) + hdr[1] 117 | if outs >= 0xC0: 118 | outs = ((outs&0x3F)<<8) + hdr[2] 119 | vpos = int(txdata) + hdr[0] + ins*7 120 | for n in range(ins): 121 | vsz,off = decodeVarInt(readBlob(vpos, 9, sqc.cfg)) if not hdr[7] else (0,0) # no-sigs flag 122 | vpos += off+vsz+(0 if hdr[6] else 4) 123 | for n in range(outs): 124 | vsz,off = decodeVarInt(readBlob(vpos, 9, sqc.cfg)) 125 | if n == tx_n: 126 | spk = decodeScriptPK( readBlob(vpos+off, vsz, sqc.cfg) ) 127 | if 'addr' in spk: 128 | addr_id = insertAddress(cur, spk['addr']) 129 | fixlist.append((addr_id,oid)) 130 | break 131 | vpos += off+vsz 132 | 133 | log("CHK %02d: %d-%d, %d fixed." % (count,first_block,last_block,len(fixlist))) 134 | cur.executemany("update outputs set addr_id=%s where id=%s limit 1;", fixlist) 135 | first_block = last_block 136 | count += 1 137 | 138 | with open('upgrade.state', 'w') as f: 139 | f.write('%d %d %d' % (count,first_block,chunksz)) 140 | 141 | log("Upgrade complete.") 142 | os.unlink('upgrade.state') 143 | -------------------------------------------------------------------------------- /docs/INSTALL.md: -------------------------------------------------------------------------------- 1 | ### INSTALLATION GUIDE 2 | 3 | At this time sqlChain is only tested on Linux servers. It may work on other platforms but I don't have them to test and probably won't put effort into that in the near future. 4 | 5 | There is a new **sqlchain-config** script that handles most of the configuration and DB init details. 6 | 7 | Tested on a clean Ubuntu 16.04 [Vultr.com](http://www.vultr.com/?ref=7087266) (1vCPU,2GB,40GB SSD) instance. If you try them then please use my [affiliate link](http://www.vultr.com/?ref=7087266) - gives me some much needed server credit for testing. 8 | 9 | ### Getting Started - Step by Step 10 | 11 | First, you need Bitcoin Core and some standard Ubuntu packages for MySQL and Python. 12 | 13 | ``` 14 | sudo apt-get install software-properties-common python-software-properties libev-dev libevent-dev # may not need but won't hurt 15 | sudo add-apt-repository ppa:bitcoin/bitcoin 16 | sudo apt-get update 17 | sudo apt-get install bitcoind mysql-server libmysqlclient-dev python-pip python-dev build-essential dialog 18 | ``` 19 | 20 | If you want to use mariadb instead (fork of mysql that is community based instead of corporate) then you can replace part of last line with `mariadb-server`. This is usually what I use for testing. Check out this [MariaDB page](https://downloads.mariadb.org/mariadb/repositories/) for commands to add a repository for keeping it updated. 21 | 22 | Then you can install sqlChain from PyPi the easy way, includes dependencies and demo API web pages. 23 | 24 | sudo pip install --upgrade pip # may need this, won't hurt 25 | sudo pip install setuptools # likewise 26 | 27 | sudo pip install sqlchain 28 | 29 | That creates binaries in /usr/local/bin and puts python stuff where it should normally go. 30 | 31 | The easy way to create the DB and configure and co-ordinate both bitcoin core and sqlchain daemons: 32 | 33 | sudo sqlchain-config 34 | 35 | This is a terminal dialog based tool that will create a user, mysql db and config files with correct permissions in locations you indicate. There are defaults for everything so you can get by with selecting (8) Update on the settings menu. It will create some demo api/web server files which you can build upon. 36 | 37 | Finally, try starting the daemons, one at a time at first, and check they're running with `htop` or `ps afx`: 38 | 39 | ``` 40 | sudo systemctl start bitcoin 41 | sudo systemctl start sqlchain@bitcoin 42 | sudo systemctl start sqlchain-api@bitcoin 43 | ``` 44 | 45 | If the process doesn't seem to start you can check what happened with the usual systemctl status commands: 46 | 47 | ``` 48 | sudo systemctl status bitcoin 49 | sudo systemctl status sqlchain@bitcoin 50 | sudo systemctl status sqlchain-api@bitcoin 51 | ``` 52 | 53 | It's a good idea to add your normal user to the sqlchain/bitcoin group (by default "btc"), 54 | 55 | sudo adduser `whoami` btc 56 | 57 | That allows you to use the config files easily, such as when using bitcoin-cli. It's also useful to have some aliases for common tasks. You can throw these in your .bash_aliases so they are present on login. Just the most basic ones to build on: 58 | 59 | ``` 60 | alias btc='bitcoin-cli -conf=/etc/sqlchain/bitcoin.conf' 61 | alias sqdlog='sudo less /var/data/sqlchain/bitcoin/daemon.log' 62 | ``` 63 | 64 | ### Updating 65 | 66 | If you want to update sqlchain as I implement or fix stuff, 67 | 68 | sudo pip install --upgrade sqlchain 69 | 70 | should do the trick. Bitcoin will get updated by the Ubuntu PPA package system (unless you do a custom build for manual pruning). 71 | 72 | You can also install git and create a "bare init" repo. There are post-receive and deploy scripts in the docs directory as an example of automating updates. This allows simply "pushing" any updates to the repo and they get installed to correct locations. 73 | 74 | ### Pruning Mode 75 | 76 | If you select "pruning" mode in the sqlchain-config options then it will send rpc calls to bitcoin to let it know when a block is processed. Bitcoin prunes in "block file units", each one being ~128MB. So when sqlchaind has completed all blocks in a given block file it is deleted. The pruning only works in manual mode and this is available in bitcoind >= 0.14.1 (otherwise you need to custom build bitcoind). 77 | 78 | ### Testnet / Other blockchains (as of version 0.2.5) 79 | 80 | sqlChain and the sqlchain-config script now support multiple blockchains. Currently bitcoin, testnet, litecoin and reddcoin have config settings and have been tested. I expect to add more coins as I get time to install and test. 81 | 82 | ### Alternate Database Engines 83 | 84 | I've tested RocksDB and TokuDB using MariaDB 10.2. Both worked well and saved around 50% space compared to MyISAM, and probably a lot more compared to InnoDB (which I have not yet tested). RocksDB syncs faster than others and is probably my recommended best choice for now, though it admitedly needs more testing. You can now set the engine type in the sqlchain-config DB options. 85 | 86 | ### Other Details 87 | 88 | You should probably create an optimized my.cnf override file in /etc/mysql/conf.d/bitcoin.cnf which has values suited to your system. For example I use below with 8 GB RAM and it seems to improve speed (but I don't claim this is optimal). The latest versions of MySQL seem to also add a mysql.conf.d directory and order of configuration is non-deterministic so you may need to play with the cnf location and name while checking how variables get set. This bizarre and confusing cnf loading wasn't a problem for me in earlier versions; call it progress. 89 | 90 | #optimized for bitcoin database using values from mysqltuner.pl adjusted for other uses 91 | [mysqld] 92 | ignore-builtin-innodb 93 | default-storage-engine = myisam 94 | key_buffer_size=6000M 95 | query_cache_type=0 96 | 97 | tmp_table_size=32M 98 | max_heap_table_size=32M 99 | 100 | By default the API server (sqlchain-api) listens on localhost:8085 but of course a simple edit of sqlchain-api.cfg allows changing that. For example, to listen on the normal public port, assuming root privileges (when started as root it will drop to chosen user after): 101 | 102 | "listen":"0.0.0.0:80", 103 | 104 | You can also set an SSL certificate file to use if you want to serve https. I would suggest for public access it's better to use nginx as a reverse proxy. It's fairly easy to setup and better for several reasons, one being secure certificate handling. But, you can edit the sqlchain-api.cfg and add: 105 | 106 | "ssl":"path/to/full-chain-certificate-file", 107 | "key":"path/to/private-key-file", (optional: don't set if you use a combined key+cert in above) 108 | 109 | This could be a file with concatenated private key and certificate blocks, or separate files. It should have read-only permissions; due to how python handles ssl it needs to be readable by the running user. 110 | 111 | A simple proxy config for nginx is below. You could have several api servers behind nginx and it can load balance across them. 112 | 113 | server { 114 | listen 80; 115 | listen [::]:80; 116 | listen 443 ssl http2; 117 | listen [::]:443 ssl http2; 118 | server_name example.com www.example.com; 119 | 120 | ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; 121 | ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; 122 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 123 | 124 | location / { 125 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 126 | proxy_set_header Host $http_host; 127 | proxy_set_header X-Forwarded-Proto $scheme; 128 | proxy_pass http://127.0.0.1:8085; 129 | } 130 | } 131 | 132 | It is also probably better for serving static content and only passing api calls to sqlchain-api, in which case use a blank www in sqlchain-api.cfg to disable root files. eg. 133 | 134 | "www":"", 135 | 136 | The sqlchain-api "dbinfo" cfg option sets whether db state queries are run and at what interval: -1 = never, 0 = at start only, >0 minute interval. The db info output is available as an API call. So the following will refresh info every 10 minutes. 137 | 138 | "dbinfo":10, 139 | 140 | Any questions or suggestions - post issues on GitHub. 141 | 142 | 143 | 144 | -------------------------------------------------------------------------------- /tests/mklivetestdb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Takes an API URL and gets random test data to populate livetest db 4 | # 5 | # Some example test urls to run with: 6 | # 7 | # 8 | # 9 | import sqlite3 as db 10 | import sys, time, signal, random, requests 11 | 12 | millis = lambda: int(round(time.time() * 1000)) 13 | 14 | tx_per_block = 4 15 | site_count,call_count,err_count = 0,0,0 16 | call_ts = millis() 17 | siteID = None 18 | siteurl = None 19 | 20 | sqlmk=''' 21 | CREATE TABLE IF NOT EXISTS calls ( id INTEGER PRIMARY KEY, site INTEGER, url TEXT, result TEXT, rtt INTEGER ); 22 | CREATE TABLE IF NOT EXISTS tests ( id INTEGER PRIMARY KEY, call INTEGER, url TEXT, result TEXT, diff TEXT, rtt INTEGER ); 23 | CREATE TABLE IF NOT EXISTS sites ( site INTEGER PRIMARY KEY, url TEXT ); 24 | ''' 25 | 26 | def clean_exit(_signo, _stack_frame=None): 27 | sql.close() 28 | sys.exit(_signo) 29 | 30 | def api_call(cur, sid, url, apistr, save=True): #pylint:disable=redefined-outer-name 31 | global call_count,err_count,call_ts #pylint:disable=global-statement 32 | cur.execute("select id from calls where url=?;", (apistr,)) 33 | if cur.fetchone() is not None: 34 | print "%03d DUPLICATE CANCEL %s" % (call_count,apistr) 35 | return None 36 | if sid is None: 37 | modestr = "order by random()" if len(sys.argv) > 2 and sys.argv[2] != 'all' else "where site=%d" % (((call_count+err_count) % site_count)+1) 38 | cur.execute("select site,url from sites %s limit 1;" % modestr) 39 | sid,url = cur.fetchone() 40 | if call_ts+tick-millis() > 0: 41 | time.sleep(float(call_ts+tick-millis())/1000) 42 | call_ts = millis() 43 | try: 44 | r = requests.get(url+apistr) 45 | if 'X-RateLimit-Limit' in r.headers and call_count % 30 < site_count: 46 | print "LIMIT:",r.headers['X-RateLimit-Remaining'], url 47 | if r.status_code == requests.codes.ok: #pylint:disable=no-member 48 | if save and cur is not None: 49 | rtt = millis()-call_ts 50 | cur.execute("insert into calls (site,url,result,rtt) values (?,?,?,?);", (sid, apistr, r.text, rtt)) 51 | print "%03d" % call_count,url+apistr 52 | call_count += 1 53 | return r.json() 54 | else: 55 | print "===",r.status_code,url+apistr 56 | except requests.exceptions.ConnectionError: 57 | print "Connect Error:",url+apistr 58 | err_count += 1 59 | return None 60 | 61 | if __name__ == '__main__': 62 | 63 | if len(sys.argv) < 2: 64 | print "Usage: %s [all|random|] [req-count] [limit/min]\nMakes random api calls to site and populates livetest db." % sys.argv[0] 65 | print " required, use btc,ltc,tbc... just selects db varaint." 66 | print "Site URL like: https://someplace.com/api (include any path prefix)\nDefaults: req-count is 300, rate limit is 60 call/min." 67 | print "all=round-robin (default), random=cycle randomly, all past sites in db." 68 | print "You cannot skip args if using a later one.\n" 69 | sys.exit(0) 70 | signal.signal(signal.SIGINT, clean_exit) 71 | coin = sys.argv[1][:4] 72 | reqcount = 300 if len(sys.argv) < 4 else int(sys.argv[3]) 73 | rate = 60 if len(sys.argv) < 5 else int(sys.argv[4]) 74 | tick = 60000/rate 75 | 76 | sql = db.connect('livetest.%s.db' % coin,isolation_level=None) 77 | cur = sql.cursor() 78 | for line in sqlmk.split(';'): 79 | cur.execute(line) 80 | 81 | if len(sys.argv) < 3 or sys.argv[2].lower() == 'all': 82 | cur.execute("select count(*) from sites;") 83 | site_count = cur.fetchone()[0] 84 | if site_count == 0: 85 | print "No sites in db. Must provide at least one" 86 | clean_exit(1) 87 | elif sys.argv[2].lower() != 'random' and sys.argv[2][:7].lower() == 'http://': 88 | siteurl = sys.argv[2] if sys.argv[2][-1] != '/' else sys.argv[2][:-1] 89 | cur.execute("select site from sites where url=?;", (siteurl,)) 90 | row = cur.fetchone() 91 | if row is not None: 92 | siteID = row[0] 93 | print "Using site id:",siteID 94 | else: 95 | cur.execute("insert into sites (url) values (?);", (siteurl,)) 96 | siteID = cur.lastrowid 97 | print "Adding site id:",siteID 98 | else: 99 | print "Unknown site/method: %s" % sys.argv[2] 100 | clean_exit(2) 101 | 102 | # get current block count as upper limit for random blocks 103 | blks = api_call(cur, siteID, siteurl, '/blocks?limit=1', save=False) 104 | if blks is None: 105 | print "REQ",siteurl+'/blocks?limit=1',"FAILED" 106 | clean_exit(2) 107 | lastblk = blks['blocks'][0]['height'] 108 | print "Last Block", lastblk 109 | 110 | # get random blocks 111 | while True: 112 | if call_count > reqcount: 113 | break 114 | bn = random.randint(1,lastblk) 115 | data = api_call(cur, siteID, siteurl, '/block-index/%d' % bn) 116 | if data is None: 117 | continue 118 | blkhash = data['blockHash'] 119 | 120 | # get raw blocks for ~10% 121 | if bn % 10 == 0: 122 | api_call(cur, siteID, siteurl, '/rawblock/%s' % blkhash) 123 | 124 | # get txs for blocks for ~10% 125 | if bn % 10 == 1: 126 | api_call(cur, siteID, siteurl, '/txs/?block=%s' % blkhash) 127 | 128 | # get json block data to drill down on 129 | blkdata = api_call(cur, siteID, siteurl, '/block/%s' % blkhash) 130 | if blkdata is None: 131 | continue 132 | 133 | # get random txs in block ~ half of per_block reqs 134 | txsBlk = len(blkdata['tx']) 135 | if txsBlk > 1: 136 | # get random txs 137 | for tn in random.sample(xrange(0,txsBlk), min(tx_per_block,txsBlk//2)): 138 | txdata = api_call(cur, siteID, siteurl, '/tx/%s' % blkdata['tx'][tn]) 139 | if txdata is None: 140 | break 141 | vinN,voutN = len(txdata['vin']),len(txdata['vout']) 142 | 143 | # get raw tx data for ~10% 144 | if tn % 10 == 0: 145 | api_call(cur, siteID, siteurl, '/rawtx/%s' % blkdata['tx'][tn]) 146 | 147 | # get input address for ~50% 148 | if tn % 10 >= 5 and vinN > 1: 149 | addr = txdata['vin'][random.randint(0,len(txdata['vin'])-1)]['addr'] 150 | api_call(cur, siteID, siteurl, '/addr/%s' % addr) 151 | 152 | # get output address for ~30% with ?noTxList=1 153 | if tn % 10 >= 3 and voutN > 1: 154 | n = random.randint(0,len(txdata['vout'])-1) 155 | if 'addresses' in txdata['vout'][n]['scriptPubKey']: 156 | addr = txdata['vout'][n]['scriptPubKey']['addresses'][0] 157 | api_call(cur, siteID, siteurl, '/addr/%s?noTxList=1' % addr) 158 | 159 | # get output address for ~50% with random property 160 | if tn % 10 <= 5: 161 | n = random.randint(0,len(txdata['vout'])-1) 162 | if 'addresses' in txdata['vout'][n]['scriptPubKey']: 163 | prop = ['balance','totalReceived','totalSent','unconfirmedBalance'][random.randint(0,3)] 164 | addr = txdata['vout'][n]['scriptPubKey']['addresses'][0] 165 | api_call(cur, siteID, siteurl, '/addr/%s/%s' % (addr,prop)) 166 | 167 | # get unspent outputs for input address ~20% 168 | if tn % 10 >= 8 and vinN > 1: 169 | addr = txdata['vin'][random.randint(0,len(txdata['vin'])-1)]['addr'] 170 | api_call(cur, siteID, siteurl, '/addr/%s/utxo' % addr) 171 | 172 | # get unspent outputs for multiple input addresses ~10%, max 5 173 | if tn % 10 >= 7 and vinN > 4: 174 | addrs = ','.join(set([vinx['addr'] for vinx in txdata['vin'] if vinx['addr'] is not None][:5])) 175 | api_call(cur, siteID, siteurl, '/addrs/%s/utxo' % addrs) 176 | 177 | # get txs for output address for ~10% 178 | if tn % 10 == 6: 179 | n = random.randint(0,len(txdata['vout'])-1) 180 | if 'addresses' in txdata['vout'][n]['scriptPubKey']: 181 | addr = txdata['vout'][n]['scriptPubKey']['addresses'][0] 182 | api_call(cur, siteID, siteurl, '/txs/?address=%s' % addr) 183 | 184 | print "Done - %d calls, %d errors" % (call_count,err_count) 185 | sql.close() 186 | -------------------------------------------------------------------------------- /sqlchain/bci.py: -------------------------------------------------------------------------------- 1 | # 2 | # Blockchain.info compatible API module 3 | # 4 | import urlparse, json 5 | 6 | from string import hexdigits 7 | from struct import unpack 8 | 9 | from sqlchain.insight import apiTx, addrUTXOs 10 | from sqlchain.util import is_address, mkaddr, gethdr, addr2id, txh2id, is_BL32, readBlob, getBlobHdr, log 11 | from sqlchain.version import MAX_TX_BLK, MAX_IO_TX 12 | from sqlchain.rpc import do_RPC 13 | 14 | 15 | def do_BCI(env, send_resp): 16 | args = env['PATH_INFO'].split('/')[2:] 17 | if args[0] == 'q': 18 | env['PATH_INFO'] = '/rpc/'+args[1] 19 | return do_RPC(env, send_resp) 20 | 21 | get,cur = urlparse.parse_qs(env['QUERY_STRING']), sqc.dbpool.get().cursor() 22 | send_resp('200 OK', [('Content-Type', 'application/json')]) 23 | if args[0] == "block-height": 24 | return json.dumps(bciHeight(cur, args[1])) 25 | if args[0] == "rawblock": 26 | if all(c in hexdigits for c in args[1]): 27 | return json.dumps(bciBlock(cur, args[1])) 28 | if args[0] == "rawtx": 29 | if all(c in hexdigits for c in args[1]): 30 | return json.dumps(apiTx(cur, args[1], ['raw']) if 'format' in get and get['format'][0] =='hex' else bciTx(cur, args[1])) 31 | if args[0] in ["address","unspent"]: 32 | addrs = get['active'][0].split('|') if 'active' in get else args[1].split(',') 33 | return json.dumps(bciAddr(cur, addrs, args[0] == "unspent", get)) 34 | return [] 35 | 36 | def bciHeight(cur, blk): 37 | if blk.isdigit(): 38 | cur.execute("select hash from blocks where id=%s limit 1;", (blk,)) 39 | else: 40 | cur.execute("select hash from blocks order by id desc limit 1;") 41 | for blkhash, in cur: 42 | return { 'blocks': [ bciBlock(cur, blkhash[::-1].encode('hex')) ] } 43 | return [] 44 | 45 | def bciBlockWS(cur, block): # inconsistent websocket sub has different labels 46 | data = { 'height': int(block), 'tx':[], 'txIndexes':[] } 47 | cur.execute("select hash from blocks where id=%s limit 1;", (block,)) 48 | for data['hash'], in cur: 49 | data['hash'] = data['hash'][::-1].encode('hex') 50 | hdr = gethdr(data['height'], sqc.cfg) 51 | data['blockIndex'] = data['height'] 52 | data['version'] = hdr['version'] 53 | data['time'] = hdr['time'] 54 | data['prevBlockIndex'] = data['height']-1 55 | data['mrklRoot'] = hdr['merkleroot'][::-1].encode('hex') 56 | data['nonce'] = hdr['nonce'] 57 | data['bits'] = hdr['bits'] 58 | cur.execute("select hash from trxs where block_id>=%s and block_id<%s;", (block*MAX_TX_BLK, block*MAX_TX_BLK+MAX_TX_BLK)) 59 | for txhash, in cur: 60 | data['tx'].append(bciTx(cur, txhash[::-1].encode('hex'))) 61 | data['txIndexes'].append(txhash[::-1].encode('hex')) 62 | data['nTx'] = len(data['tx']) 63 | data['reward'] = 0 64 | for out in data['tx'][0]['out']: 65 | data['reward'] += out['value'] 66 | data['totalBTCSent'] = 0 67 | for tx in data['tx']: 68 | for out in tx['out']: 69 | data['totalBTCSent'] += out['value'] 70 | del data['tx'] 71 | return data 72 | return None 73 | 74 | def bciBlock(cur, blkhash): 75 | data = { 'hash':blkhash, 'tx':[] } 76 | cur.execute("select id from blocks where hash=%s limit 1;", (blkhash.decode('hex')[::-1],)) 77 | for blkid, in cur: 78 | data['height'] = data['block_index'] = int(blkid) 79 | hdr = gethdr(data['height'], sqc.cfg) 80 | data['ver'] = hdr['version'] 81 | data['time'] = hdr['time'] 82 | data['prev_block'] = hdr['previousblockhash'][::-1].encode('hex') 83 | data['mrkl_root'] = hdr['merkleroot'][::-1].encode('hex') 84 | data['nonce'] = hdr['nonce'] 85 | data['bits'] = hdr['bits'] 86 | data['main_chain'] = True 87 | cur.execute("select hash from trxs where block_id>=%s and block_id<%s;", (blkid*MAX_TX_BLK, blkid*MAX_TX_BLK+MAX_TX_BLK)) 88 | for txhash, in cur: 89 | data['tx'].append(bciTx(cur, txhash[::-1].encode('hex'))) 90 | data['n_tx'] = len(data['tx']) 91 | data['fee'] = -(5000000000 >> (data['height'] / 210000)) 92 | for out in data['tx'][0]['out']: 93 | data['fee'] += out['value'] 94 | #log('DEBUG:'+str(data)) 95 | return data 96 | return None 97 | 98 | def bciAddr(cur, addrs, utxo, get=None): 99 | data,tops = [],[] 100 | single = (len(addrs) == 1) 101 | for addr in addrs: 102 | if is_address(addr): 103 | addr_id = addr2id(addr, cur) 104 | if addr_id: 105 | if utxo: 106 | data.extend(addrUTXOs(cur, addr_id, addr)) 107 | else: 108 | hdr,txs = bciAddrTXs(cur, addr_id, addr, get) 109 | data.extend(txs) 110 | tops.append(hdr) 111 | if not utxo and single: 112 | tops[0].update({'txs':data}) 113 | return { 'unspent_outputs':data } if utxo else tops[0] if single else { 'addresses':tops, 'txs':data } 114 | 115 | def bciAddrTXs(cur, addr_id, addr, *args): 116 | return {'recd':0},['asasas'] # todo finish this call 117 | 118 | def isTxAddrs(tx, addrs): 119 | for vi in tx['inputs']: 120 | if 'addr' in vi['prev_out'] and vi['prev_out']['addr'] in addrs: 121 | return True 122 | for vo in tx['out']: 123 | if vo['addr'] in addrs: 124 | return True 125 | return False 126 | 127 | def bciTxWS(cur, txhash): # reduced data for websocket subs 128 | data = bciTx(cur, txhash) 129 | if data: 130 | del data['block_height'] 131 | del data['lock_time'] 132 | for vi in data['inputs']: 133 | if 'prev_out' in vi: 134 | del vi['prev_out']['tx_index'] 135 | del vi['prev_out']['n'] 136 | del vi['prev_out']['spent'] 137 | for vo in data['out']: 138 | del vo['tx_index'] 139 | del vo['n'] 140 | return data 141 | return {} 142 | 143 | def bciTx(cur, txhash): 144 | data = { 'hash':txhash } 145 | txh = txhash.decode('hex')[::-1] 146 | cur.execute("select id,txdata,(block_id div {0}),ins,txsize from trxs where id>=%s and hash=%s limit 1;".format(MAX_TX_BLK), (txh2id(txh), txh)) 147 | for txid,blob,blkid,ins,txsize in cur: 148 | hdr = getBlobHdr(int(blob), sqc.cfg) 149 | data['tx_index'] = int(txid) 150 | data['block_height'] = int(blkid) 151 | data['ver'],data['lock_time'] = hdr[4:6] # pylint:disable=unbalanced-tuple-unpacking 152 | data['inputs'],data['vin_sz'] = bciInputs(cur, int(blob), int(ins)) 153 | data['out'],data['vout_sz'] = bciOutputs(cur, int(txid), int(blob)) 154 | data['time'] = gethdr(data['block_height'], sqc.cfg, 'time') if int(blkid) > -1 else 0 155 | data['size'] = txsize if txsize < 0xFF00 else (txsize&0xFF)<<16 + hdr[3] 156 | return data 157 | return None 158 | 159 | def bciInputs(cur, blob, ins): 160 | data = [] 161 | hdr = getBlobHdr(blob, sqc.cfg) # hdrsz,ins,outs,size,version,locktime,stdSeq,nosigs 162 | if ins >= 0xC0: 163 | ins = ((ins&0x3F)<<8) + hdr[1] 164 | if ins == 0: # no inputs 165 | return [{}],ins # only sequence and script here 166 | else: 167 | buf = readBlob(blob+hdr[0], ins*7, sqc.cfg) 168 | if len(buf) < ins*7 or buf == '\0'*ins*7: # means missing blob data 169 | return [{ 'error':'missing data' }],ins 170 | for n in range(ins): 171 | in_id, = unpack('=%s*{0} and o.id<%s*{0};".format(MAX_IO_TX), (txid,txid+1)) 184 | outs = cur.fetchall() 185 | for in_id,n,value,aid in outs: 186 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 187 | for addr, in cur: 188 | vout = { 'n':int(n), 'value':int(value), 'addr':mkaddr(addr,int(aid)), 'type':0, 'tx_index':txid } 189 | if in_id: 190 | vout['spent'] = True 191 | data.append(vout) 192 | return data,len(outs) 193 | -------------------------------------------------------------------------------- /sqlchain-electrum: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from time import sleep 4 | from urllib2 import urlopen, URLError 5 | 6 | import os, sys, getopt, signal, json, daemon 7 | 8 | from gevent import socket, monkey, spawn 9 | from gevent.server import StreamServer 10 | from gevent.queue import Queue 11 | 12 | from sqlchain.version import version 13 | from sqlchain.util import dotdict, loadcfg, savecfg, drop2user, logts, getChunk 14 | 15 | __builtins__.sqc = dotdict() # container for super globals 16 | 17 | sqc.cfg = { 'log':sys.argv[0]+'.log', 'listen':'localhost:8081', 'www':'www', 'api':'http://localhost:8085/api', 18 | 'banner':'docs/electrum.banner', 'path':'/var/data/sqlchain' } 19 | 20 | srvinfo = { 'version':version, 'banner':'', 'block':0, 'header':{} } 21 | subs = { 'numblocks':{}, 'headers':{}, 'address':{}, '_ip_':{} } 22 | 23 | def ReqHandler(): 24 | while True: 25 | #resp = None 26 | fp,req = sqc.reqQ.get() 27 | print 'REQ', req 28 | args = req['method'].split('.') 29 | val = req['params'][0] if len(req['params']) > 0 else 1 30 | if args[-1] == 'subscribe': 31 | if args[1] in subs and not getSubs(args[1], val, fp): 32 | addSub(args[1], val, fp) 33 | respSub(args[1], fp, req) 34 | elif args[0] == 'server': 35 | sqc.respQ.put((fp, req['id'], srvinfo[args[1]] if args[1] in srvinfo else {})) 36 | elif req['method'] in reqFuncs: 37 | spawn(reqFuncs[req['method']], fp, req) 38 | else: 39 | logts("Bad Req %s:%d - %s" % (subs['_ip_'][fp][0]+(req['method'],))) 40 | 41 | def RespHandler(): 42 | while True: 43 | fp,reqid,resp = sqc.respQ.get() 44 | resp = json.dumps({ 'id':reqid, 'result':resp } if resp is None or not 'error' in resp else { 'id':reqid,'error':resp['error'] }) 45 | print "RESP", reqid, resp 46 | fp.write(resp+'\n') 47 | fp.flush() 48 | 49 | def SyncHandler(): 50 | sync_id = 0 51 | while True: 52 | resp = apicall('/sync/'+str(sync_id)) 53 | if resp and 'error' in resp: 54 | sleep(30) 55 | elif resp: 56 | if resp['block'] != srvinfo['block']: 57 | srvinfo['block'] = resp['block'] 58 | pubSubs('numblocks', msg=resp['block']) 59 | hdr = apicall('/block-index/'+str(resp['block'])+'/electrum') 60 | if hdr != srvinfo['header']: 61 | srvinfo['header'] = hdr 62 | pubSubs('headers', msg=hdr) 63 | if len(resp['txs']) > 0: 64 | for tx in resp['txs']: 65 | pubSubs('address', addrs=getAddrs(tx)) 66 | sync_id = resp['sync_id'] 67 | 68 | def TcpHandler(sock, address): 69 | fp = sock.makefile() 70 | addSub('_ip_', address, fp) 71 | while True: 72 | line = fp.readline() 73 | if line: 74 | sqc.reqQ.put((fp, json.loads(line))) 75 | else: 76 | break 77 | delSubs(fp) 78 | sock.shutdown(socket.SHUT_WR) 79 | sock.close() 80 | 81 | def pubSubs(sub, msg=None, addrs=None): 82 | if addrs: 83 | for addr in addrs: 84 | fps = getSubs(sub, addr) 85 | if len(fps) > 0: 86 | data = apicall('/history/'+addr+'/status') 87 | for fp in fps: 88 | sqc.respQ.put((fp, None, data)) 89 | if msg: 90 | for fp in getSubs(sub): 91 | sqc.respQ.put((fp, None, msg)) 92 | 93 | def getSubs(sub, val=1, key=None): 94 | if key: 95 | return key in subs[sub] and val in subs[sub][key] 96 | if val == 1: 97 | return subs[sub].keys() 98 | fps = [] 99 | for k in subs[sub]: 100 | if val in subs[sub][k]: 101 | fps.append(k) 102 | return fps 103 | 104 | def addSub(sub, val, key): 105 | if key in subs[sub]: 106 | subs[sub][key].add(val) 107 | else: 108 | subs[sub][key] = set(val) 109 | 110 | def delSubs(key): 111 | for sub in subs: 112 | if key in subs[sub]: 113 | del subs[sub][key] 114 | 115 | def respSub(to, fp, req): 116 | if to == 'address': 117 | spawn(addrHistory, fp, req, '/status') 118 | elif to == 'numblocks': 119 | sqc.respQ.put((fp, req['id'], srvinfo['block'])) 120 | elif to == 'headers': 121 | sqc.respQ.put((fp, req['id'], srvinfo['header'])) 122 | else: 123 | sqc.respQ.put((fp, req['id'], [])) 124 | 125 | def addrHistory(fp, req, args=''): 126 | data = apicall('/history/'+req['params'][0] + args) 127 | sqc.respQ.put((fp, req['id'], data if args else data['txs'] if len(data['txs']) > 0 else None )) 128 | 129 | def addrBalance(fp, req): 130 | sqc.respQ.put((fp, req['id'], apicall('/history/'+req['params'][0]+'/balance'))) 131 | 132 | def addrMemPool(fp, req): 133 | sqc.respQ.put((fp, req['id'], apicall('/history/'+req['params'][0]+'/uncfmd'))) 134 | 135 | def addrUnspent(fp, req): 136 | sqc.respQ.put((fp, req['id'], apicall('/history/'+req['params'][0]+'/utxo'))) 137 | 138 | def addrProof(fp, req): # pylint:disable=unused-argument 139 | pass 140 | 141 | def blkHeader(fp, req): 142 | sqc.respQ.put((fp, req['id'], apicall('/block-index/'+req['params'][0]+'/electrum') )) 143 | 144 | def blkChunk(fp, req): 145 | sqc.respQ.put((fp, req['id'], getChunk(int(req['params'][0]), sqc.cfg).encode('hex') )) 146 | 147 | def utxoAddress(fp, req): 148 | sqc.respQ.put((fp, req['id'], apicall('/tx/'+req['params'][0]+'/output/%d' % req['params'][1]) )) 149 | 150 | def txGet(fp, req): 151 | sqc.respQ.put((fp, req['id'], apicall('/tx/'+req['params'][0]+'/raw') )) 152 | 153 | def txSend(fp, req): 154 | #logts("Tx Sent: %s" % txid) 155 | sqc.respQ.put((fp, req['id'], apicall('/tx/send', {'rawtx':req['params'][0]}) )) 156 | 157 | def txMerkle(fp, req): 158 | sqc.respQ.put((fp, req['id'], apicall('/merkle/'+req['params'][0]) )) 159 | 160 | def feeEstimate(fp, req): 161 | sqc.respQ.put((fp, req['id'], apicall('/util/estimatefee/'+req['params'][0]) )) 162 | 163 | reqFuncs = { 'blockchain.address.get_history':addrHistory, 'blockchain.address.get_balance':addrBalance, 164 | 'blockchain.address.get_mempool':addrMemPool, 'blockchain.address.get_proof':addrProof, 165 | 'blockchain.address.listunspent':addrUnspent, 'blockchain.utxo.get_address':utxoAddress, 166 | 'blockchain.block.get_header':blkHeader, 'blockchain.block.get_chunk':blkChunk, 167 | 'blockchain.transaction.broadcast':txSend, 'blockchain.transaction.get_merkle':txMerkle, 168 | 'blockchain.transaction.get':txGet, 'blockchain.estimatefee':feeEstimate } 169 | 170 | def getAddrs(tx): 171 | addrs = [] 172 | for vi in tx['inputs']: 173 | if 'addr' in vi['prev_out']: 174 | addrs.append(vi['prev_out']['addr']) 175 | for vo in tx['out']: 176 | addrs.append(vo['addr']) 177 | return addrs 178 | 179 | def options(cfg): # pylint:disable=too-many-branches 180 | try: 181 | opts,_ = getopt.getopt(sys.argv[1:], "hvb:p:c:d:l:w:p:s:a:u:b:", 182 | ["help", "version", "debug", "db=", "log=", "listen=", "www=", "user=", "banner=", "defaults" ]) 183 | except getopt.GetoptError: 184 | usage() 185 | for opt,arg in opts: 186 | if opt in ("-h", "--help"): 187 | usage() 188 | elif opt in ("-v", "--version"): 189 | sys.exit(sys.argv[0]+': '+version) 190 | elif opt in ("-d", "--db"): 191 | cfg['db'] = arg 192 | elif opt in ("-l", "--log"): 193 | cfg['log'] = arg 194 | elif opt in ("-w", "--www"): 195 | cfg['www'] = arg 196 | elif opt in ("-p", "--path"): 197 | cfg['path'] = arg 198 | elif opt in ("-s", "--listen"): 199 | cfg['listen'] = arg 200 | elif opt in ("-a", "--api"): 201 | cfg['api'] = arg 202 | elif opt in ("-u", "--user"): 203 | cfg['user'] = arg 204 | elif opt in ("-b", "--banner"): 205 | cfg['banner'] = arg 206 | elif opt in "--defaults": 207 | savecfg(cfg) 208 | sys.exit("%s updated" % (sys.argv[0]+'.cfg')) 209 | elif opt in "--debug": 210 | cfg['debug'] = True 211 | 212 | def usage(): 213 | print """Usage: {0} [options...][cfg file]\nCommand options are:\n-h,--help\tShow this help info\n-v,--version\tShow version info 214 | --debug\t\tRun in foreground with logging to console 215 | --defaults\tUpdate cfg and exit\nDefault files are {0}.cfg, {0}.log 216 | \nThese options get saved in cfg file as defaults. 217 | -s,--listen\tSet host:port for Electrum server\n-w,--www\tWeb server root directory\n-u,--user\tSet user to run as 218 | -p,--path\tSet path for header data files (/var/data/sqlchain) 219 | -b,--banner\tSet file path for banner text\n-a,--api\tSet host:port for API connection\n-l,--log\tSet log file path""".format(sys.argv[0]) 220 | sys.exit(2) 221 | 222 | def apicall(url, post=None): 223 | try: 224 | data = urlopen(sqc.cfg['api']+url, post).read() 225 | except URLError: 226 | logts("Error: sqlchain-api not at %s" % sqc.cfg['api']) 227 | return { 'error':'No api connection' } 228 | return json.loads(data) 229 | 230 | def sigterm_handler(_signo, _stack_frame): 231 | logts("Shutting down.") 232 | if not sqc.cfg['debug']: 233 | os.unlink(sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid') 234 | sys.exit(0) 235 | 236 | def sighup_handler(_signo, _stack_frame): 237 | path = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 238 | sys.stdout.close() 239 | sys.stdout=open(path,'a') 240 | sys.stderr.close() 241 | sys.stderr=open(path,'a') 242 | logts("SIGHUP Log reopened") 243 | 244 | def run(): 245 | monkey.patch_socket() 246 | 247 | with open(sqc.cfg['banner']) as bf: 248 | srvinfo['banner'] = bf.read() 249 | 250 | hdr = apicall('/block-index/latest/electrum') 251 | if 'error' in hdr: 252 | sys.exit(1) 253 | srvinfo['block'],srvinfo['header'] = hdr['block_height'],hdr 254 | 255 | sqc.reqQ = Queue() 256 | sqc.respQ = Queue() 257 | spawn(ReqHandler) 258 | spawn(RespHandler) 259 | spawn(SyncHandler) 260 | 261 | logts("Starting on %s" % sqc.cfg['listen']) 262 | host,port = sqc.cfg['listen'].split(':') 263 | cert = {'certfile':sqc.cfg['ssl']} if ('ssl' in sqc.cfg) and (sqc.cfg['ssl'] != '') else {} 264 | server = StreamServer((host, int(port)), TcpHandler, **cert) 265 | 266 | drop2user(sqc.cfg, chown=True) 267 | 268 | server.serve_forever() 269 | 270 | if __name__ == '__main__': 271 | 272 | loadcfg(sqc.cfg) 273 | options(sqc.cfg) 274 | 275 | if sqc.cfg['debug']: 276 | signal.signal(signal.SIGINT, sigterm_handler) 277 | run() 278 | else: 279 | logpath = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 280 | pidpath = sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid' 281 | with daemon.DaemonContext(working_directory='.', umask=0002, stdout=open(logpath,'a'), stderr=open(logpath,'a'), 282 | signal_map={signal.SIGTERM:sigterm_handler, signal.SIGHUP:sighup_handler } ): 283 | with file(pidpath,'w') as f: 284 | f.write(str(os.getpid())) 285 | run() 286 | -------------------------------------------------------------------------------- /sqlchain-api: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from collections import OrderedDict 4 | from importlib import import_module 5 | from datetime import datetime 6 | from time import time, sleep 7 | import threading, mimetypes, json 8 | import os, sys, getopt, cgi, signal, daemon 9 | 10 | from geventwebsocket import WebSocketServer, WebSocketApplication, Resource 11 | 12 | from sqlchain.version import version, P2SH_FLAG, BECH32_FLAG 13 | from sqlchain.rpc import do_RPC 14 | from sqlchain.bci import isTxAddrs, bciBlockWS, bciTxWS 15 | from sqlchain.insight import apiStatus 16 | from sqlchain.dbpool import DBPool 17 | from sqlchain.util import dotdict, loadcfg, savecfg, drop2user, getssl, log, logts 18 | 19 | __builtins__.sqc = dotdict() # container for super globals 20 | 21 | sqc.cfg = { 'log':sys.argv[0]+'.log', 'listen':'localhost:8085', 'www':'www', 'block':0, 22 | 'pool':4, 'dbinfo-ts':datetime.now().strftime('%s'), 23 | 'dbinfo':-1, 'path':'/var/data/sqlchain', 'cointype':'bitcoin' } 24 | 25 | sqc.server = {} 26 | sqc.clients = {} # active websockets we publish to 27 | sqc.syncTxs,sqc.lastBlk = [],{} # current sync data shared for every sync/subscription 28 | sqc.sync = threading.Condition() 29 | sqc.sync_id = 0 30 | 31 | def do_Root(env, send_resp): 32 | try: 33 | path = env['PATH_INFO'] 34 | if env['REQUEST_METHOD'] == 'POST': # POST 35 | if path == '/': # the /rpc api is mirrored here as form params 36 | form = cgi.FieldStorage(fp=env['wsgi.input'], environ=env, keep_blank_values=True) 37 | env['PATH_INFO'] = "/rpc/%s/%s" % (form['method'].value, "/".join(form.getlist('params'))) 38 | return do_RPC(env, send_resp) 39 | elif sqc.cfg['www']: # GET static website files, if path configured 40 | path = '/main.html' if path in ['', '/'] else path 41 | if os.path.isfile(sqc.cfg['www']+path): 42 | _,ext = os.path.splitext(path) 43 | filesize = str(os.path.getsize(sqc.cfg['www']+path)) 44 | with open(sqc.cfg['www']+path) as fd: 45 | send_resp('200 OK', [('Content-Type', mimetypes.types_map[ext]), ('Content-Length', filesize), 46 | ('Expires', datetime.utcfromtimestamp(time()+3600).strftime("%a, %d %b %Y %H:%M:%S %ZGMT"))]) 47 | return [ fd.read() ] 48 | send_resp('404 - File Not Found: %s' % path, [("Content-Type", "text/html")], sys.exc_info()) 49 | if not sqc.cfg['www']: 50 | return [] 51 | with open(sqc.cfg['www']+'/404.html') as fd: 52 | return [ fd.read() ] 53 | except IOError: 54 | pass 55 | 56 | class BCIWebSocket(WebSocketApplication): 57 | remote = None 58 | def on_open(self, *args, **kwargs): 59 | self.remote = self.ws.environ['REMOTE_ADDR'] 60 | logts("WS Client connected from %s" % self.remote) 61 | sqc.clients[self.ws.handler.active_client] = { 'subs':[], 'addrs':set() } 62 | 63 | def on_message(self, msg, *args, **kwargs): # pylint:disable=arguments-differ 64 | if msg: 65 | msg = json.loads(msg) 66 | if msg['op'] in [ 'blocks_sub', 'unconfirmed_sub' ]: 67 | sqc.clients[self.ws.handler.active_client]['subs'].append(msg['op']) 68 | if msg['op'] == 'addr_sub' and 'addr' in msg: 69 | sqc.clients[self.ws.handler.active_client]['addrs'].add(msg['addr']) 70 | if msg['op'] == 'ping_block': 71 | self.ws.send({ 'op': 'block', 'x': sqc.lastBlk }) 72 | if msg['op'] == 'ping_tx': 73 | if 'lasttx' in sqc.clients[self.ws.handler.active_client]: 74 | self.ws.send(json.dumps({ 'op': 'utx', 'x': sqc.clients[self.ws.handler.active_client]['lasttx'] })) 75 | 76 | def on_close(self, *args, **kwargs): 77 | logts("WS Client disconnected %s %s" % (self.remote, ''.join(args))) 78 | del sqc.clients[self.ws.handler.active_client] 79 | 80 | # monitor mempool, block, orphan changes - publish to websocket subscriptions, notify waiting sync connections 81 | def syncMonitor(): 82 | with sqc.dbpool.get().cursor() as cur: 83 | cur.execute("select greatest(ifnull(m,0),ifnull(o,0)) from (select max(sync_id) as m from mempool) m,(select max(sync_id) as o from orphans) o;") 84 | sqc.sync_id = cur.fetchone()[0] 85 | cur.execute("select ifnull(max(id),0) from blocks;") 86 | sqc.cfg['block'] = cur.fetchone()[0] 87 | if sqc.cfg['dbinfo'] == 0: 88 | sqc.dbwrk = threading.Thread(target = mkDBInfo) 89 | sqc.dbwrk.start() 90 | while not sqc.done.isSet(): 91 | with sqc.dbpool.get().cursor() as cur: 92 | txs,lastsync = [],0 93 | cur.execute("select hash,sync_id from mempool m, trxs t where m.sync_id > %s and t.id=m.id;", (sqc.sync_id,)) 94 | for txhash,sync_id in cur: 95 | txs.append(bciTxWS(cur, txhash[::-1].encode('hex'))) 96 | lastsync = max(lastsync, sync_id) 97 | if len(txs) > 0: 98 | sqc.syncTxs = txs 99 | cur.execute("select count(*) from orphans where sync_id > %s;", (sqc.sync_id,)) 100 | new_orphans = cur.fetchone()[0] > 0 101 | cur.execute("select max(id) from blocks;") 102 | block = cur.fetchone()[0] 103 | cur.execute("replace into info (class,`key`,value) values('info','ws-clients',%s),('info','connections',%s);", (len(sqc.clients), len(sqc.server.pool) if sqc.server.pool else 0)) 104 | if block != sqc.cfg['block'] or new_orphans or len(txs) > 0: 105 | do_Sync(block, lastsync) 106 | if sqc.cfg['dbinfo'] > 0 and (datetime.now() - datetime.fromtimestamp(int(sqc.cfg['dbinfo-ts']))).total_seconds() > sqc.cfg['dbinfo']*60: 107 | sqc.dbwrk = threading.Thread(target = mkDBInfo) 108 | sqc.dbwrk.start() 109 | sleep(sqc.cfg['sync'] if 'sync' in sqc.cfg else 5) 110 | if sqc.dbwrk: 111 | sqc.dbwrk.join() 112 | 113 | def do_Sync(block, lastsync): 114 | if block != sqc.cfg['block']: 115 | sqc.cfg['block'] = min(block, sqc.cfg['block']+1) 116 | with sqc.dbpool.get().cursor() as cur: 117 | sqc.lastBlk = bciBlockWS(cur, block) 118 | for client in sqc.server.clients.values(): 119 | if 'blocks_sub' in sqc.clients[client]['subs']: 120 | client.ws.send(json.dumps({ 'op': 'block', 'x': sqc.lastBlk })) 121 | sqc.sync_id = lastsync 122 | with sqc.sync: 123 | sqc.sync.notifyAll() 124 | if len(sqc.syncTxs) > 0: 125 | for client in sqc.server.clients.values(): 126 | for tx in sqc.syncTxs: 127 | if 'unconfirmed_sub' in sqc.clients[client]['subs'] or (sqc.clients[client]['addrs'] and isTxAddrs(tx, sqc.clients[client]['addrs'])): 128 | client.ws.send(json.dumps({ 'op': 'utx', 'x': tx })) 129 | sqc.clients[client]['lasttx'] = tx 130 | 131 | def mkDBInfo(): 132 | with sqc.dbpool.get().cursor() as cur: 133 | logts("Updating server db info") 134 | sqc.cfg['dbinfo-ts'] = datetime.now().strftime('%s') 135 | savecfg(sqc.cfg) 136 | apiStatus(cur, 'db') 137 | cur.execute("select count(*) from address where (id & %s = %s);", (P2SH_FLAG,P2SH_FLAG)) 138 | cur.execute("replace into info (class,`key`,value) values('db','address:p2sh',%s);", (cur.fetchone()[0], )) 139 | cur.execute("select count(*) from address where (id & %s = %s);", (BECH32_FLAG,BECH32_FLAG)) 140 | cur.execute("replace into info (class,`key`,value) values('db','address:p2wpkh',%s);", (cur.fetchone()[0], )) 141 | cur.execute("select count(*) from bech32 where 1;") 142 | cur.execute("replace into info (class,`key`,value) values('db','address:p2wsh',%s);", (cur.fetchone()[0], )) 143 | cur.execute("select count(*) from address where cast(conv(hex(reverse(unhex(substr(sha2(addr,0),1,10)))),16,10) as unsigned) != floor(id);") 144 | cur.execute("replace into info (class,`key`,value) values('db','address:id-collisions',%s);", (cur.fetchone()[0], )) 145 | cur.execute("select count(*) from trxs where strcmp(reverse(unhex(hex(id*8))), left(hash,5)) > 0;") 146 | cur.execute("replace into info (class,`key`,value) values('db','trxs:id-collisions',%s);", (cur.fetchone()[0], )) 147 | cur.execute("select count(*) from outputs where addr_id=0;") 148 | cur.execute("replace into info (class,`key`,value) values('db','outputs:non-std',%s);", (cur.fetchone()[0], )) 149 | cur.execute("select count(*) from outputs where tx_id is null;") 150 | cur.execute("replace into info (class,`key`,value) values('db','outputs:unspent',%s);", (cur.fetchone()[0], )) 151 | cur.execute("replace into info (class,`key`,value) values('db','all:updated',now());") 152 | logts("DB info update complete") 153 | sqc.dbwrk = None 154 | 155 | def options(cfg): # pylint:disable=too-many-branches 156 | try: 157 | opts,_ = getopt.getopt(sys.argv[1:], "hvb:p:c:d:l:w:h:p:r:u:i:", 158 | ["help", "version", "debug", "db=", "log=", "www=", "listen=", "path=", "rpc=", "user=", "dbinfo=", "defaults" ]) 159 | except getopt.GetoptError: 160 | usage() 161 | for opt,arg in opts: 162 | if opt in ("-h", "--help"): 163 | usage() 164 | elif opt in ("-v", "--version"): 165 | sys.exit(sys.argv[0]+': '+version) 166 | elif opt in ("-d", "--db"): 167 | cfg['db'] = arg 168 | elif opt in ("-l", "--log"): 169 | cfg['log'] = arg 170 | elif opt in ("-w", "--www"): 171 | cfg['www'] = arg 172 | elif opt in ("-p", "--path"): 173 | cfg['path'] = arg 174 | elif opt in ("-h", "--listen"): 175 | cfg['listen'] = arg 176 | elif opt in ("-r", "--rpc"): 177 | cfg['rpc'] = arg 178 | elif opt in ("-u", "--user"): 179 | cfg['user'] = arg 180 | elif opt in ("-i","--dbinfo"): 181 | cfg['dbinfo'] = int(arg) 182 | elif opt in "--defaults": 183 | savecfg(cfg) 184 | sys.exit("%s updated" % (sys.argv[0]+'.cfg')) 185 | elif opt in "--debug": 186 | cfg['debug'] = True 187 | 188 | def usage(): 189 | print """Usage: {0} [options...][cfg file]\nCommand options are:\n-h,--help\tShow this help info\n-v,--version\tShow version info 190 | --debug\t\tRun in foreground with logging to console 191 | --defaults\tUpdate cfg and exit\nDefault files are {0}.cfg, {0}.log 192 | \nThese options get saved in cfg file as defaults. 193 | -p,--path\tSet path for blob and header data files (/var/data/sqlchain) 194 | -h,--listen\tSet host:port for web server\n-w,--www\tWeb server root directory\n-u,--user\tSet user to run as 195 | -d,--db \tSet mysql db connection, "host:user:pwd:dbname"\n-l,--log\tSet log file path 196 | -r,--rpc\tSet rpc connection, "http://user:pwd@host:port" 197 | -i,--dbinfo\tSet db info update period in minutes (default=180, 0=at start, -1=never) """.format(sys.argv[0]) 198 | sys.exit(2) 199 | 200 | def sigterm_handler(_signo, _stack_frame): 201 | logts("Shutting down.") 202 | sqc.done.set() 203 | if sqc.syncd: 204 | sqc.syncd.join() 205 | if not sqc.cfg['debug']: 206 | os.unlink(sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid') 207 | sys.exit(0) 208 | 209 | def sighup_handler(_signo, _stack_frame): 210 | path = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 211 | sys.stdout.close() 212 | sys.stdout=open(path,'a') 213 | sys.stderr.close() 214 | sys.stderr=open(path,'a') 215 | logts("SIGHUP Log reopened") 216 | 217 | def run(): 218 | sqc.done = threading.Event() 219 | sqc.dbpool = DBPool(sqc.cfg['db'].split(':'), sqc.cfg['pool'], 'MySQLdb') 220 | 221 | mimetypes.init() 222 | mimetypes.add_type('application/x-font-woff', '.woff') 223 | mimetypes.add_type('application/x-font-woff2', '.woff2') 224 | mimetypes.add_type('application/x-font-woff', '.ttf') 225 | 226 | logts("Starting on %s" % sqc.cfg['listen']) 227 | host,port = sqc.cfg['listen'].split(':') 228 | sqc.server = WebSocketServer((host, int(port)), APIs, spawn=10000, **getssl(sqc.cfg)) 229 | sqc.server.start() 230 | 231 | if 'sync' not in sqc.cfg or sqc.cfg['sync'] > 0: 232 | log("Sync monitoring at %d second intervals" % (sqc.cfg['sync'] if 'sync' in sqc.cfg else 5,)) 233 | sqc.syncd = threading.Thread(target = syncMonitor) 234 | sqc.syncd.daemon = True 235 | sqc.syncd.start() 236 | else: 237 | log("Sync monitor disabled") 238 | 239 | drop2user(sqc.cfg, chown=True) 240 | 241 | sqc.server.serve_forever() 242 | 243 | if __name__ == '__main__': 244 | 245 | loadcfg(sqc.cfg) 246 | options(sqc.cfg) 247 | 248 | if 'apis' not in sqc.cfg: 249 | apis = [("/api", getattr(import_module("sqlchain.insight"),"do_API"))] 250 | else: 251 | apis = [] 252 | for api in sqc.cfg['apis']: 253 | log("Adding api at %s" % api[0]) 254 | apis.append((api[0], getattr( import_module(api[1]) if api[1] else sys.modules[__name__], api[2]))) 255 | APIs = Resource(OrderedDict(apis)) 256 | 257 | if sqc.cfg['debug']: 258 | signal.signal(signal.SIGINT, sigterm_handler) 259 | run() 260 | else: 261 | logpath = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 262 | pidpath = sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid' 263 | with daemon.DaemonContext(working_directory='.', umask=0002, stdout=open(logpath,'a'), stderr=open(logpath,'a'), 264 | signal_map={signal.SIGTERM:sigterm_handler, signal.SIGHUP:sighup_handler } ): 265 | with file(pidpath,'w') as f: 266 | f.write(str(os.getpid())) 267 | run() 268 | -------------------------------------------------------------------------------- /www/main.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | sqlChain Demo - Bitcoin SQL Blockchain Explorer 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 19 | 20 | 21 | 22 | 44 | 45 |
46 |
47 | 48 | 49 |
50 | Overview 51 | sqlChain is a compact SQL layer that runs on top of bitcoind. It extends the query options on the 52 | blockchain with a priority towards lower storage demands. 53 | This demonstration server provides multiple API (compatible) interfaces:
54 |
    55 |
  • Insight API (with some extensions)
  • 56 |
  • Blockchain.info API (with WebSocket)
  • 57 |
  • RPC via POST, GET urls
  • 58 |
  • Electrum server
  • 59 |
60 | The Demo API page above documents the queries supported and differences to the original sites.

61 | Three daemon programs are provided:
62 |
    63 |
  • sqlchaind updates the mysql backend from bitcoind.
  • 64 |
  • sqlchain-api provides an the API and web interfaces for querying the database.
  • 65 |
  • sqlchain-electrum adds a layer over sqlchain-api supporting Electrum clients.
  • 66 |
67 | Using sqlChain over a pruning node as a compact alternative, a web socket interface, and other ideas, are currently under development. 68 | Status and blockchain analysis information below is updated periodically from this server. The open source Python code is 69 | available in the neoCogent Github and please check out 70 | my neoCogent blog. 71 | Server StatusThis information is provided by the /api/status/db/html call. 72 |
73 |
74 | 75 |
76 |
77 |
78 |
79 |
80 | 81 |
82 | 83 |
84 |
85 |
86 |
87 |
88 | Insight 89 |
    90 |
  • /api/block/0000000000001271efd5d9f7e539909160a181b2c0a2b8c164d6f8159e5c7dd9
  • 91 |
  • /api/block-index/123432
  • 92 |
  • /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56
  • 93 |
  • /api/rawtx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56
  • 94 |
  • /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH?noTxList=1
  • 95 |
    Spelling mistake on txApperances is maintained for compatibility.
    96 |
  • /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/balance
  • 97 |
  • /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/totalReceived
  • 98 |
  • /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/totalSent
  • 99 |
  • /api/addr/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/unconfirmedBalance
  • 100 |
  • /api/addr/1FvzCLoTPGANNjWoUo6jUGuAG3wg1w4YjR/utxo
  • 101 |
  • /api/addrs/1FvzCLoTPGANNjWoUo6jUGuAG3wg1w4YjR,1CmTtsKEqPxZsW3YjGYXbPSY89xrzkhy94/utxo
  • 102 |
    Also supports POST at '/api/addrs/utxo' with param 'addrs'.
    103 |
  • /api/addrs/17pfg6L3hT1ZPBASPt7DCQZfy9jWeMGq1W,1CmTtsKEqPxZsW3YjGYXbPSY89xrzkhy94/txs?from=0&to=1
  • 104 |
    Also supports POST at '/api/addrs/txs' with params 'addrs,from,to'.
    105 |
  • /api/txs?block=0000000000001271efd5d9f7e539909160a181b2c0a2b8c164d6f8159e5c7dd9
  • 106 |
  • /api/txs?address=1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH
  • 107 |
  • /api/utils/estimatefee?nbBlocks=2
  • 108 |
  • /api/tx/send
  • 109 |
    Send raw transaction by POST method with param 'rawtx'.
    110 |
111 | Blockchain.info 112 |
    113 |
  • /bci/block-height/123432
  • 114 |
    ...
    115 |
  • /bci/rawblock/0000000000001271efd5d9f7e539909160a181b2c0a2b8c164d6f8159e5c7dd9
  • 116 |
  • /bci/rawtx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56?format=hex
  • 117 |
  • /bci/address/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH
  • 118 |
  • /bci/unspent/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH
  • 119 |
  • /bci/latestblock
  • 120 |
  • /bci/q/getblockcount
  • 121 |
  • ws://api-host/ws
  • 122 |
    Blockchain.info compatible websocket interface
    123 |
124 | RPC 125 | This api also available via POST with [method,params] args. 126 |
    127 |
  • /rpc/getinfo
  • 128 |
  • /rpc/getdifficulty
  • 129 |
  • /rpc/getblock/0000000000001271efd5d9f7e539909160a181b2c0a2b8c164d6f8159e5c7dd9
  • 130 |
  • /rpc/getblockhash/123432
  • 131 |
  • /rpc/getblockcount
  • 132 |
  • /rpc/getrawtransaction/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56
  • 133 |
  • /rpc/gettxout/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/1
  • 134 |
  • /rpc/getmempoolinfo
  • 135 |
  • /rpc/getrawmempool
  • 136 |
137 | Electrum Extensions to support sqlchain-electrum server. 138 |
    139 |
  • /api/history/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH
  • 140 |
  • /api/history/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/status
  • 141 |
  • /api/history/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/balance
  • 142 |
  • /api/history/1JK6pUCAXfnvcbXEpdVSxhVZ8W6kxQ4VEH/uncfmd
  • 143 |
  • /api/history/1FvzCLoTPGANNjWoUo6jUGuAG3wg1w4YjR/utxo
  • 144 |
  • /api/block-index/167324/electrum
  • 145 |
  • /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/output/1
  • 146 |
  • /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/raw
  • 147 |
  • /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/addrs
  • 148 |
  • /api/merkle/2acba2c6916cdfdbf3584dfdd32534af5031ab076029ff275167fa5181dee0a8
  • 149 |
150 | Extras Extensions, and status / debugging. 151 |
    152 |
  • /api/auto/123432
  • 153 |
    Auto detect value and return appropriate data.
    Supports 4 modes: block-index, block hash, address, tx hash.
    154 | Also available as POST method with 'data' parameter, as demonstrated above with "Find".
    155 |
  • /api/closure/1M8s2S5bgAzSSzVTeL7zruvMPLvzSkEAuv
  • 156 |
    Compute the closure of an address. That is, the addresses likely to be owned by the same entity.
    157 |
  • /api/status
  • 158 |
  • /api/status/db/html
  • 159 |
    Returns database information like row counts, disk space used.
    Advanced info like multi-sig address count, 160 | id collision counts, non-std and unspent output counts are updated periodically due to slow query time. An html 161 | version returns a table that can be used in web pages with styling.
    162 |
  • /api/tx/23bb66ef300714042085d0ed2d05100531e80d5239020545887df059c0178b56/html
  • 163 |
    An html table formatted version of raw transaction data with hex values and op codes.
    164 |
165 |
166 |
167 | 168 |
169 | Roll Your Own Blockchain Web App 170 | It's easy to create a web app using sqlChain on an Ubuntu server.

See the full install guide on github, but briefly: 171 |
    172 |
  1. Install base packages:
    173 | # may not need but won't hurt
    sudo apt-get install python-software-properties libev-dev libevent-dev
  2. 174 | sudo apt install software-properties-common python-software-properties libev-dev libevent-dev 175 | sudo apt install mariadb-server libmysqlclient20 176 | sudo apt install bitcoind libmysqlclient-dev python-pip python-dev build-essential 177 |
  3. Install sqlchain from PyPi:
    178 | sudo pip install sqlchain
  4. 179 |
  5. Run the init script to setup the mysql/maria db, users and config files:
    180 | sudo sqlchain-init
  6. 181 |
  7. Start the daemons, as needed:
    182 | sudo systemctl start bitcoin
    sudo systemctl start sqlchain
    sudo systemctl start sqlchain-api
  8. 183 |
  9. Modify the /var/www source files as your own app.
  10. 184 |
  11. Scale to serve the world:
    185 | run nginx as front end and mysql replication behind.
  12. 186 |
187 |
188 | 189 |
190 | Support 191 | Huh, What support?

192 | More seriously, I put a lot of work into creating this. If you want to support continued effort by donating 193 | that would be cool. Visit my blog donation page.

194 | I do freelance programming work, and I'm available to build on this commericially for you or work on other Bitcoin related 195 | projects. My rates are very reasonable, given my expertise, because I live in a downright life-is-cheap country. 196 | So if donating rubs you the wrong way then you can always hire me.

197 | Give [Vultr.com](http://www.vultr.com/?ref=7087266) a try out. I've been very happy with them for development and testing sqlchain. 198 | Use my referral link and I get funded for my testing without costing you a penny extra. You can start up and run VPS servers by the 199 | hour using a simple control panel and in my tests they're been both faster and cheaper than Amazon AWS. 200 |
201 |
202 |

203 |
204 |
205 | 206 | 207 | 208 | 209 | 210 | -------------------------------------------------------------------------------- /tests/test_utils_bitcoin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # sqlchain.utils - unit test module 4 | # 5 | 6 | import os, sys 7 | from struct import unpack 8 | 9 | try: 10 | import MySQLdb as db 11 | except ImportError: 12 | print "Cannot run database tests without MySQLdb module" 13 | 14 | import pytest 15 | 16 | from sqlchain.version import ADDR_ID_FLAGS, P2SH_FLAG, BECH32_FLAG, BECH32_LONG 17 | from sqlchain.util import dotdict, is_address, addr2pkh, mkaddr, addr2id, decodeScriptPK, mkOpCodeStr, decodeVarInt, encodeVarInt 18 | from sqlchain.util import txh2id, insertAddress, findTx 19 | 20 | __builtins__['sqc'] = dotdict() # container for super globals 21 | sqc.cfg = { 'cointype':'bitcoin' } 22 | 23 | # memory based test db with same schema 24 | # remains after test run for inspection, cleared at start of each run 25 | # does not survive mysql restart or os reboot 26 | @pytest.fixture(scope="module") 27 | def testdb(request): 28 | if 'MySQLdb' not in sys.modules: 29 | pytest.skip("requires MySQLdb to run") 30 | return None 31 | dbuser,dbpwd = request.config.getoption("--dbuser").split(':') 32 | try: 33 | sql = db.connect('localhost',dbuser,dbpwd,'') 34 | except db.OperationalError: 35 | pytest.skip("requires mysql running + admin user/pwd to run") 36 | return None 37 | cur = sql.cursor() 38 | cur.execute("set sql_notes=0;") 39 | cur.execute("show databases like 'unittest';") 40 | if cur.rowcount > 0: 41 | print "\nClearing test db" 42 | cur.execute("drop database unittest;") 43 | sqlsrc = open('/usr/local/share/sqlchain/docs/sqlchain.sql').read() 44 | sqlcode = '' 45 | for k,v in [('{dbeng}','Memory'),('{dbname}','unittest'),('{dbpwd}',dbpwd),('{dbuser}',dbuser)]: 46 | sqlsrc = sqlsrc.replace(k, v) 47 | for line in sqlsrc.splitlines(): 48 | if line != '' and line[:2] != '--': 49 | sqlcode += line 50 | for stmnt in sqlcode.split(';'): 51 | if stmnt: 52 | cur.execute(stmnt) 53 | return cur 54 | 55 | def test_is_address(): 56 | #p2pkh 57 | assert is_address('1FomKJy8btcmuyKBFXeZmje94ibnQxfDEf') 58 | assert is_address('1EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh') 59 | assert is_address('1MBxxUgVF27trqqBMnoz8Rr7QATEoz1u2Y') 60 | assert not is_address('1MBxxUgVF27trqqCMnoz8Rr7QATEoz1u2Y') 61 | assert not is_address('1EWpTBe9rE27NT9boqg8Zsc643bCFCEdbc') 62 | assert not is_address('3EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh') 63 | #p2sh 64 | assert is_address('3De5zB9JKmwU4zP85EEazYS3MEDVXSmvvm') 65 | assert is_address('3MixsgkBB8NBQe5GAxEj4eGx5YPxvbaSk9') 66 | assert is_address('3HQR7C1Ag53BoaxKDeaA97wTh9bpGuUpgg') 67 | assert not is_address('3HQR7C1Ag53BoaxKDeaA97wTh7bpGuUpgg') 68 | assert not is_address('2MixsgkBB8NBQe5GAxEj4eGx5YPxvbaSk9') 69 | #p2wpkh 70 | assert is_address('bc1qs5d7gy4l7k7nm5rqzda8qruh7kqzhjdhgn7upf') 71 | assert is_address('bc1qvee24y274ymfxx0luvl2jsr6mfxmewd22jfvwd') 72 | assert is_address('bc1qxn8tc5kmuu2sevvjz0xxcz4dm2c42pxd9ea0dt') 73 | assert is_address('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4') 74 | assert is_address('BC1SW50QA3JX3S') 75 | assert is_address('bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj') 76 | assert not is_address('bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t5') 77 | assert not is_address('bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t5') 78 | assert not is_address('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2') 79 | assert not is_address('cb1zw508d6qejxtdg4y5r3zarvaryvg6kdaj') 80 | #p2wsh 81 | assert is_address('bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3') 82 | assert is_address('bc1ql0y3lcuy6937hauw7ur304dd9fmw4ca7tt4kr99uda7cg7walvystw4gyu') 83 | assert is_address('bc1qadvzzmf5fzh7546n2ms76vkl0rd65wlg753dq4ds0v30urtpxlasf5lc7a') 84 | assert is_address('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx') 85 | assert not is_address('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7') 86 | assert not is_address('bc10w508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kw5rljs90') 87 | assert not is_address('bc1rw5uspcuh') 88 | 89 | def test_addr2pkh(): 90 | #p2pkh 91 | assert addr2pkh('127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i') =='0c2f3eb0fa5f65269236658bc361187dfaa964bb'.decode('hex') 92 | assert addr2pkh('1JS2xvSfG2hD3rnMGd3xxEeYSoBs8r7eKh') =='bf362d4dda191483e789ccf3059d6447cd64bb9c'.decode('hex') 93 | assert addr2pkh('1DK2kyHNMUx8XoWZm9t2GWqJGzqBNxUYuv') =='870a76dd469ab77084229a61984db634abaafb8b'.decode('hex') 94 | #p2sh 95 | assert addr2pkh('34H8pSTwFNEngG5xfadqctdQykcGgRmSgf') =='1c6426545908803de2a4ed61caf805ccc282900f'.decode('hex') #2of2 96 | assert addr2pkh('3KKXcGTmxvedJr9GrzWayA8GVnS5AXm8tj') =='c161e4848786150e2add1a93f084fa94a7259b97'.decode('hex') #2of3 97 | assert addr2pkh('38oAwJnDWRTWf1GUg7FJ112bjVRoMjvCmV') =='4df2e66aeb640a642c8476185f63e433ad074220'.decode('hex') #3of4 98 | #p2wpkh-p2sh 99 | assert addr2pkh('3F2YodB6PAzbov1rAkYVMNu6KBB1g9AHrG') =='924b50fdfc0e0afab1b1d12acae31c3b4a215154'.decode('hex') 100 | assert addr2pkh('36LF9sFUJQAzGgxKtrVFDcXqmTF9yyVeow') =='32eaeff4e7e856e74dcf0926724d04324320eb75'.decode('hex') 101 | assert addr2pkh('35FowTfm9qpeKGX9VQuuSrcgDiBd9SczAi') =='271c19a61825788201434354d2a3a6b03d23e316'.decode('hex') 102 | assert addr2pkh('3Pux8TuPxZHm7RsBvAP9zjkF3jCcw9K7wL') =='f3c501dd6b3086911f7b9e7eea0dade0de025287'.decode('hex') 103 | #p2wsh-p2sh - unavailble 104 | #p2wpkh 105 | assert addr2pkh('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4').encode('hex') == '751e76e8199196d454941c45d1b3a323f1433bd6' # bip173 106 | assert addr2pkh('bc1q5lz8xffjt4azkzm4hled45qpgcu46thhl6j7vm').encode('hex') == 'a7c47325325d7a2b0b75bff2dad00146395d2ef7' # electrum 107 | assert addr2pkh('bc1qzlc8mvcyww95ycfgf520y7yvu64qhta6uqxada').encode('hex') == '17f07db304738b4261284d14f2788ce6aa0bafba' # electrum 108 | assert addr2pkh('bc1qtcpsntfzjx7mj6ljqy480sdufnh2nuwqhtsz8g').encode('hex') == '5e0309ad2291bdb96bf2012a77c1bc4ceea9f1c0' # electrum 109 | assert addr2pkh('bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t').encode('hex') == '7906d7156a0932c2d649c9005deebd7c3696c1c9' # electrum 110 | #p2wsh 111 | assert addr2pkh('bc1qm7fcgs9ugg66rw5tg2w7sy0m0afttnnucr59hcmpa87sezd769vsac7pmy') \ 112 | =='df938440bc4235a1ba8b429de811fb7f52b5ce7cc0e85be361e9fd0c89bed159'.decode('hex') #2of2 electrum 113 | assert addr2pkh('bc1q5gp20lfuhz2avvqwau6sgwmakrp5r2qv66x56rfr9t30halv4vfs283f6e') \ 114 | =='a202a7fd3cb895d6300eef35043b7db0c341a80cd68d4d0d232ae2fbf7ecab13'.decode('hex') #2of2 electrum 115 | assert addr2pkh('bc1qs5vep8zczr6rfskq3euz44zjnv05zmhkp84jhkufufsdy2ygfr7qr8x759') \ 116 | =='8519909c5810f434c2c08e782ad4529b1f416ef609eb2bdb89e260d2288848fc'.decode('hex') #2of2 electrum 117 | #bech32, future versions from bip173 spec. 118 | assert addr2pkh('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx') \ 119 | == '751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6'.decode('hex') # witver 0x51 120 | assert addr2pkh('BC1SW50QA3JX3S') == '751e'.decode('hex') # witver 0x60 121 | assert addr2pkh('bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj') =='751e76e8199196d454941c45d1b3a323'.decode('hex') # witver 0x52 122 | 123 | def test_mkaddr(): 124 | #p2pkh 125 | assert mkaddr('0c2f3eb0fa5f65269236658bc361187dfaa964bb'.decode('hex')) == '127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i' 126 | assert mkaddr('bf362d4dda191483e789ccf3059d6447cd64bb9c'.decode('hex')) == '1JS2xvSfG2hD3rnMGd3xxEeYSoBs8r7eKh' 127 | assert mkaddr('870a76dd469ab77084229a61984db634abaafb8b'.decode('hex')) == '1DK2kyHNMUx8XoWZm9t2GWqJGzqBNxUYuv' 128 | #p2sh 129 | assert mkaddr('1c6426545908803de2a4ed61caf805ccc282900f'.decode('hex'),p2sh=True) == '34H8pSTwFNEngG5xfadqctdQykcGgRmSgf' 130 | assert mkaddr('c161e4848786150e2add1a93f084fa94a7259b97'.decode('hex'),p2sh=True) == '3KKXcGTmxvedJr9GrzWayA8GVnS5AXm8tj' 131 | assert mkaddr('4df2e66aeb640a642c8476185f63e433ad074220'.decode('hex'),p2sh=True) == '38oAwJnDWRTWf1GUg7FJ112bjVRoMjvCmV' 132 | #p2wpkh-p2sh 133 | assert mkaddr('924b50fdfc0e0afab1b1d12acae31c3b4a215154'.decode('hex'),p2sh=True) == '3F2YodB6PAzbov1rAkYVMNu6KBB1g9AHrG' 134 | assert mkaddr('32eaeff4e7e856e74dcf0926724d04324320eb75'.decode('hex'),p2sh=True) == '36LF9sFUJQAzGgxKtrVFDcXqmTF9yyVeow' 135 | assert mkaddr('271c19a61825788201434354d2a3a6b03d23e316'.decode('hex'),p2sh=True) == '35FowTfm9qpeKGX9VQuuSrcgDiBd9SczAi' 136 | #p2wsh-p2sh - unavailble 137 | #p2wpkh 138 | assert mkaddr('751e76e8199196d454941c45d1b3a323f1433bd6'.decode('hex'),bech32=True) == 'BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4'.lower() 139 | assert mkaddr('a7c47325325d7a2b0b75bff2dad00146395d2ef7'.decode('hex'),bech32=True) == 'bc1q5lz8xffjt4azkzm4hled45qpgcu46thhl6j7vm' 140 | assert mkaddr('7906d7156a0932c2d649c9005deebd7c3696c1c9'.decode('hex'),bech32=True) == 'bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t' 141 | #p2wsh 142 | assert mkaddr('df938440bc4235a1ba8b429de811fb7f52b5ce7cc0e85be361e9fd0c89bed159'.decode('hex'),bech32=True) \ 143 | == 'bc1qm7fcgs9ugg66rw5tg2w7sy0m0afttnnucr59hcmpa87sezd769vsac7pmy' 144 | assert mkaddr('a202a7fd3cb895d6300eef35043b7db0c341a80cd68d4d0d232ae2fbf7ecab13'.decode('hex'),bech32=True) \ 145 | == 'bc1q5gp20lfuhz2avvqwau6sgwmakrp5r2qv66x56rfr9t30halv4vfs283f6e' 146 | assert mkaddr('8519909c5810f434c2c08e782ad4529b1f416ef609eb2bdb89e260d2288848fc'.decode('hex'),bech32=True) \ 147 | == 'bc1qs5vep8zczr6rfskq3euz44zjnv05zmhkp84jhkufufsdy2ygfr7qr8x759' 148 | 149 | def test_addr2id(): 150 | assert addr2id('127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i') & ADDR_ID_FLAGS == 0 151 | assert addr2id('34H8pSTwFNEngG5xfadqctdQykcGgRmSgf') & ADDR_ID_FLAGS == P2SH_FLAG 152 | assert addr2id('3Pux8TuPxZHm7RsBvAP9zjkF3jCcw9K7wL') & ADDR_ID_FLAGS == P2SH_FLAG 153 | assert addr2id('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4') & ADDR_ID_FLAGS == BECH32_FLAG 154 | assert addr2id('bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t') & ADDR_ID_FLAGS == BECH32_FLAG 155 | assert addr2id('bc1qm7fcgs9ugg66rw5tg2w7sy0m0afttnnucr59hcmpa87sezd769vsac7pmy') & ADDR_ID_FLAGS == BECH32_LONG 156 | assert addr2id('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx') & ADDR_ID_FLAGS == BECH32_LONG 157 | 158 | assert addr2id('127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i', rtnPKH=True) == (369302191541,'0c2f3eb0fa5f65269236658bc361187dfaa964bb'.decode('hex')) 159 | assert addr2id('34H8pSTwFNEngG5xfadqctdQykcGgRmSgf', rtnPKH=True) == (1260639692375,'1c6426545908803de2a4ed61caf805ccc282900f'.decode('hex')) 160 | assert addr2id('3Pux8TuPxZHm7RsBvAP9zjkF3jCcw9K7wL', rtnPKH=True) == (1905635504253,'f3c501dd6b3086911f7b9e7eea0dade0de025287'.decode('hex')) 161 | assert addr2id('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4', rtnPKH=True) \ 162 | == (2239906766591,'751e76e8199196d454941c45d1b3a323f1433bd6'.decode('hex')) 163 | assert addr2id('bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t', rtnPKH=True) \ 164 | == (2322962910768,'7906d7156a0932c2d649c9005deebd7c3696c1c9'.decode('hex')) 165 | assert addr2id('bc1qm7fcgs9ugg66rw5tg2w7sy0m0afttnnucr59hcmpa87sezd769vsac7pmy', rtnPKH=True) \ 166 | == (3310624892327,'df938440bc4235a1ba8b429de811fb7f52b5ce7cc0e85be361e9fd0c89bed159'.decode('hex')) 167 | assert addr2id('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx', rtnPKH=True) \ 168 | == (4041402476188,'751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6'.decode('hex')) 169 | 170 | data = [ 171 | ['76a9140c2f3eb0fa5f65269236658bc361187dfaa964bb88ac','p2pkh','','127RhwC5vQJN4cJ6UaHc1A9NCSpz1e9B4i', 172 | 'OP_DUP OP_HASH160 0c2f3eb0fa5f65269236658bc361187dfaa964bb OP_EQUALVERIFY OP_CHECKSIG'], # p2pkh 173 | 174 | ['a9141c6426545908803de2a4ed61caf805ccc282900f87','p2sh','','34H8pSTwFNEngG5xfadqctdQykcGgRmSgf', 175 | 'OP_HASH160 1c6426545908803de2a4ed61caf805ccc282900f OP_EQUAL'], # p2sh 176 | 177 | ['210298d26fa24aca4b1fdf7bc0d73bf875c3e10b198fb47de414cff39c7229dbacc6AC','p2pk', 178 | '210298d26fa24aca4b1fdf7bc0d73bf875c3e10b198fb47de414cff39c7229dbacc6AC','1G7AYiSCXMKyVeSVcPUe8PqgfygiqxZyeX', 179 | '0298d26fa24aca4b1fdf7bc0d73bf875c3e10b198fb47de414cff39c7229dbacc6 OP_CHECKSIG'], # p2pk compressed 180 | 181 | ['4104E9A095A6A5790BC82FEADE07EE6FC77B05BC4DE7F3790C36D2ECC886D9EC0AC0E44402759C51ED0D3BA2F53E749B30A6D1772F0DAE1E3F465E8C8828DF899FE2AC','p2pk', 182 | '4104E9A095A6A5790BC82FEADE07EE6FC77B05BC4DE7F3790C36D2ECC886D9EC0AC0E44402759C51ED0D3BA2F53E749B30A6D1772F0DAE1E3F465E8C8828DF899FE2AC', 183 | '1JGTdegLcK8N9mqwhXmGjeUgbQNugii3rm', # p2pk uncompressed 184 | '04e9a095a6a5790bc82feade07ee6fc77b05bc4de7f3790c36d2ecc886d9ec0ac0e44402759c51ed0d3ba2f53e749b30a6d1772f0dae1e3f465e8c8828df899fe2 OP_CHECKSIG'], 185 | 186 | ['a914924b50fdfc0e0afab1b1d12acae31c3b4a21515487','p2sh','','3F2YodB6PAzbov1rAkYVMNu6KBB1g9AHrG', 187 | 'OP_HASH160 924b50fdfc0e0afab1b1d12acae31c3b4a215154 OP_EQUAL'], # p2sh(p2wpkh) 188 | 189 | ['0014a7c47325325d7a2b0b75bff2dad00146395d2ef7','p2wpkh','','bc1q5lz8xffjt4azkzm4hled45qpgcu46thhl6j7vm', 190 | 'OP_0 a7c47325325d7a2b0b75bff2dad00146395d2ef7'], # p2wpkh 191 | 192 | ['0020a202a7fd3cb895d6300eef35043b7db0c341a80cd68d4d0d232ae2fbf7ecab13','p2wsh','', 193 | 'bc1q5gp20lfuhz2avvqwau6sgwmakrp5r2qv66x56rfr9t30halv4vfs283f6e', 194 | 'OP_0 a202a7fd3cb895d6300eef35043b7db0c341a80cd68d4d0d232ae2fbf7ecab13'], # p2wsh 195 | 196 | ['5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6','other', 197 | '5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6', # future bech32, witver 0x51 198 | 199 | 'bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx', 200 | 'OP_1 751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6'], 201 | ['6002751e','other','6002751e','BC1SW50QA3JX3S','OP_16 751e'] # future bech32, witver 0x60 202 | ] 203 | 204 | def test_decodeScriptPK(): 205 | for row in data: 206 | r = decodeScriptPK(row[0].decode('hex')) 207 | assert r['type'] == row[1] 208 | assert r['data'].encode('hex').lower() == row[2].lower() 209 | if 'addr' in r: 210 | assert r['addr'] == row[3] 211 | 212 | def test_mkOpCodeStr(): 213 | for row in data: 214 | assert mkOpCodeStr(row[0].decode('hex'), sepPUSH=' ') == row[4] 215 | 216 | def test_VarInt(): 217 | values = [ (1, [ 0,1,2,55,192,192,234,252]), 218 | (3, [ 253, 255,256,257,4000,16500,47654,2**16-1]), 219 | (5, [2**16,2**16+1,2**16+2,2*2**16,2**24,2**32-1]), 220 | (9, [2**32,2**32+1,2**32+2,2**40+234,2**42,2**44+2**24-5,2**48 ]) ] 221 | for (L,grp) in values: 222 | for N in grp: 223 | assert decodeVarInt(encodeVarInt(N)) == ( N,L ) 224 | 225 | def test_insertAddress(testdb, monkeypatch): 226 | addrs = [ '1FomKJy8btcmuyKBFXeZmje94ibnQxfDEf','1EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh','1MBxxUgVF27trqqBMnoz8Rr7QATEoz1u2Y', 227 | '1EWpTBe9rE27NT9b1qg8Zsc643bCFCEdbh','3EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh','3De5zB9JKmwU4zP85EEazYS3MEDVXSmvvm', 228 | '3MixsgkBB8NBQe5GAxEj4eGx5YPxvbaSk9','3HQR7C1Ag53BoaxKDeaA97wTh9bpGuUpgg','2MixsgkBB8NBQe5GAxEj4eGx5YPxvbaSk9', 229 | 'bc1q5lz8xffjt4azkzm4hled45qpgcu46thhl6j7vm','bc1q0yrdw9t2pyev94jfeyq9mm4a0smfdswfweht6t', 230 | '1EWpTBe9rE27NT9boqg8Zsc643bCFCEdbh', # duplicate, should not add row 231 | 'bc1q5gp20lfuhz2avvqwau6sgwmakrp5r2qv66x56rfr9t30halv4vfs283f6e' ] # bech32 table, should not add row 232 | 233 | def fake_id(addr, cur=None, rtnPKH=False): # forces collisions by always returning same id 234 | x = monkeypatch._setattr[0][2](addr, cur, rtnPKH) 235 | return ((x[0]&ADDR_ID_FLAGS)|123456,x[1]) if isinstance(x, tuple) else (x&ADDR_ID_FLAGS)|123456 # keep flags 236 | monkeypatch.setattr("sqlchain.util.addr2id", fake_id) 237 | 238 | for addr in addrs: 239 | insertAddress(testdb, addr) 240 | testdb.execute("select count(1) from address where id !=0;") 241 | assert testdb.fetchone()[0] == 11 # 13 minus 2 addresses not inserted 242 | 243 | def test_findTx(testdb): 244 | trxs = [] 245 | tx1 = bytearray(os.urandom(32)) 246 | for x in range(16): 247 | tx1[-1] = chr((int(tx1[-1])+x)&0xFF) # use sequential tx hashes to test collisions 248 | tid,new = findTx(testdb, tx1, True) 249 | testdb.execute("insert ignore into trxs (id,hash,ins,outs,txsize) values (%s,%s,0,0,0);", (tid,tx1)) 250 | trxs.append(tid) 251 | assert len(set(trxs)) == len(trxs) # all ids should be unique 252 | 253 | for x in range(1000): 254 | tx1 = os.urandom(32) 255 | assert txh2id(tx1) == unpack('> 3 # test 1000 randoms hashes match, just for heck of it 256 | -------------------------------------------------------------------------------- /sqlchaind: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # sqlchaind - daemon to update sql blockchain db 4 | # 5 | # pylint:disable=no-member 6 | from Queue import Queue, Empty 7 | from datetime import datetime 8 | from struct import pack, unpack 9 | 10 | import os, sys, socket, getopt, time, signal, threading, daemon 11 | import MySQLdb as db 12 | 13 | from sqlchain.version import version, coincfg, BLKDAT_NEAR_SYNC, BLKDAT_MAGIC, MAX_IO_TX, MAX_TX_BLK 14 | from sqlchain.util import dotdict, sqlchain_overlay, loadcfg, savecfg, drop2user, rpcPool, blockwork, int2bin32, log, logts 15 | from sqlchain.util import encodeVarInt, decodeBlock, decodeTx, findTx, insertAddress, mkBlobHdr, insertBlob, puthdr, gethdr 16 | from sqlchain.blkdat import BlkDatHandler 17 | 18 | __builtins__.sqc = dotdict() # container for super globals 19 | 20 | sqc.cfg = { 'log':sys.argv[0]+'.log', 'queue':8, 'no-sigs':False, 'db':'', 'rpc':'', 'path':'/var/data/sqlchain', 'cointype':'bitcoin' } 21 | sqc.bestblk = 120000 22 | sqc.zmq = True 23 | blksecs = [] 24 | memPool = set() 25 | 26 | def getBlocks(blk): 27 | sql = db.connect(*sqc.cfg['db'].split(':')) 28 | sql.autocommit(True) # only mempool, most data inserted in handlers 29 | cur = sql.cursor() 30 | cur.execute("show tables like 'bech32';") # test if old db version and abort with log msg 31 | if cur.rowcount == 0: 32 | log("sqlChain Database upgrade required for this daemon version.\nCannot continue. Run sqlchain-upgrade-db.") 33 | sqc.done.set() 34 | return 0 35 | blkinfo = sqc.rpc.getblockchaininfo() # wait for node to be ready 36 | if blkinfo is None: 37 | return 0 38 | if blk == 0: 39 | cur.execute('select ifnull(max(id), -1) from blocks') 40 | blk = int(cur.fetchone()[0] + 1) 41 | cur.execute("select hex(chainwork) from blocks where id=%s;", (blk-1,)) 42 | row = cur.fetchone() 43 | chainwork = int(row[0],16) if row is not None else 0 44 | if blk == 0 and 'max_blks' in sqc.cfg: 45 | blk = blkinfo['blocks'] - sqc.cfg['max_blks'] 46 | log("Using block limit %d" % sqc.cfg['max_blks']) 47 | blkhash = sqc.rpc.getblockhash(blk) 48 | chainwork = int(sqc.rpc.getblockheader(blkhash)['chainwork'],16) 49 | else: 50 | blkhash = sqc.rpc.getblockhash(blk) 51 | chainwork = int(sqc.rpc.getblockheader(blkhash)['chainwork'],16) 52 | startblk = blk 53 | 54 | sqc.bestblk = blkinfo['headers'] if 'headers' in blkinfo else 0 55 | if 'blkdat' in sqc.cfg and (sqc.bestblk - startblk) > coincfg(BLKDAT_NEAR_SYNC): 56 | blk = getBlocksDirect(cur, blk, chainwork) # use direct file access for catch up 57 | if not sqc.done.isSet(): 58 | log("Using rpc mode. Monitoring blocks / mempool on " + sqc.cfg['cointype']) 59 | poll_delay = 0.05 60 | while not sqc.done.isSet(): 61 | blkinfo = sqc.rpc.getblockchaininfo() 62 | if blkinfo is None or blk > blkinfo['blocks']: 63 | if sqc.zmq and 'zmq' in sqc.cfg: 64 | blk = getBlocksZMQ(cur, blk, chainwork) # try to upgrade to ZMQ, more efficient 65 | else: 66 | checkMemPool(cur) 67 | time.sleep(5) 68 | continue 69 | if blockQ.qsize() >= sqc.cfg['queue']: 70 | time.sleep(min(poll_delay,5)) 71 | poll_delay *= 2 72 | continue 73 | poll_delay = 0.05 74 | if 'pruned' in blkinfo and blkinfo['pruned']: 75 | chkPruning(blk - blockQ.qsize()) 76 | rpcstart = time.time() 77 | blkhash = sqc.rpc.getblockhash(blk) 78 | if blkhash is not None: 79 | data = decodeBlock(sqc.rpc.getblock(blkhash, False).decode('hex')) 80 | data['height'] = blk 81 | chainwork += blockwork(data['bits']) 82 | data['chainwork'] = int2bin32(chainwork) 83 | data['rpc'] = time.time()-rpcstart 84 | blockQ.put(data) 85 | blk += 1 86 | return blk - startblk 87 | 88 | def getBlocksDirect(cur, blk, chainwork): 89 | blkscan = threading.Thread(target = BlkDatHandler, args=(True,)) 90 | blkscan.start() 91 | idle_count = 0 92 | log("Using blkdat mode: %s" % sqc.cfg['blkdat']) 93 | while not sqc.done.isSet(): 94 | if blockQ.qsize() >= sqc.cfg['queue']: 95 | time.sleep(0.01) 96 | continue 97 | chkPruning(blk - blockQ.qsize()) 98 | if (sqc.bestblk - blk) < coincfg(BLKDAT_NEAR_SYNC): 99 | logts("Near sync %d. Aborting direct mode" % blk) 100 | return blk 101 | if idle_count >= 60: 102 | logts("No blkdat activity, 3 minutes. Aborting direct mode") 103 | return blk 104 | cur.execute("select filenum,filepos from blkdat where id=%s limit 1;", (blk,)) 105 | row = cur.fetchone() 106 | if row: 107 | filenum,pos = row 108 | started = time.time() 109 | with open(sqc.cfg['blkdat'] + "/blocks/blk%05d.dat" % filenum, 'rb') as fd: 110 | fd.seek(pos) 111 | magic,blksize = unpack(' 120000 and blk % 100 == 0: 170 | blkinfo = sqc.rpc.getblockchaininfo() 171 | if blkinfo is not None and blkinfo['pruned']: 172 | keep = 20 if not 'prune' in sqc.cfg else max(sqc.cfg['prune'], 20) 173 | sqc.rpc.pruneblockchain(blk-keep) # keep at least 20 blocks for reorgs but can now config higher 174 | sqc.bestblk = blkinfo['headers'] 175 | 176 | def limitBlocks(cur, max_blks): 177 | cur.execute("select id from blocks order by id desc limit %s, 1", (max_blks,)) 178 | row = cur.fetchone() 179 | if row: 180 | blkid = row[0] 181 | cur.execute("select id from trxs where block_id < %s", ((blkid+1)*MAX_TX_BLK,)) 182 | txids = [ txid for txid, in cur ] 183 | for txid in txids: 184 | cur.execute("delete from outputs where id >= %s*{0} and id < %s*{0}".format(MAX_IO_TX), (txid,txid)) 185 | cur.execute("delete from trxs where id=%s", (txid,)) 186 | cur.execute("delete from blocks where id<=%s", (blkid,)) 187 | 188 | def BlockHandler(): 189 | sql = db.connect(*sqc.cfg['db'].split(':')) 190 | cur = sql.cursor() 191 | while not sqc.done.isSet(): 192 | try: 193 | insertBlock(cur, blockQ.get(True, 5)) 194 | sql.commit() 195 | if 'max_blks' in sqc.cfg: 196 | limitBlocks(cur, sqc.cfg['max_blks']) 197 | sql.commit() 198 | except Empty: 199 | pass 200 | 201 | def OutputHandler(): 202 | sql = db.connect(*sqc.cfg['db'].split(':')) 203 | cur = sql.cursor() 204 | cur.execute("select count(*) from mempool;") 205 | poolcnt = cur.fetchone()[0] 206 | ins,outs = [],[] 207 | while True: 208 | try: 209 | xo,xi = outQ.get(True, 3) 210 | sqc.flushed.clear() 211 | outs.extend(xo) 212 | ins.extend(xi) 213 | if len(outs) + len(ins) > (8192 if not poolcnt else 0): 214 | cur.executemany("insert ignore into outputs (id,addr_id,value) values(%s,%s,%s)", outs) 215 | cur.executemany("update outputs set tx_id=%s where id=%s limit 1", ins) 216 | sql.commit() 217 | ins,outs = [],[] 218 | except Empty: 219 | if len(outs) > 0 or len(ins) > 0: 220 | cur.executemany("insert ignore into outputs (id,addr_id,value) values(%s,%s,%s)", outs) 221 | cur.executemany("update outputs set tx_id=%s where id=%s limit 1", ins) 222 | sql.commit() 223 | sqc.flushed.set() 224 | if sqc.alldone.isSet(): 225 | print "Flushed outQ - outs %d - ins %d" % (len(outs), len(ins)) 226 | return 227 | ins,outs = [],[] 228 | 229 | def checkMemPool(cur): 230 | cur.execute("select ifnull(max(sync_id),0) from mempool;") 231 | sync_id = cur.fetchone()[0] 232 | if len(memPool) == 0: 233 | cur.execute("delete from mempool;") 234 | trxs = sqc.rpc.getrawmempool() 235 | if trxs is not None: 236 | for tx in trxs: 237 | txx = tx.decode('hex')[::-1][:8] # uses 1/4 space, only for detecting changes in mempool 238 | if txx not in memPool: 239 | rawtx = sqc.rpc.getrawtransaction(tx,0) 240 | if rawtx is not None: 241 | insertTxMemPool(cur, decodeTx(rawtx.decode('hex')), sync_id+1) 242 | memPool.add(txx) 243 | 244 | def addOrphan(cur, height): 245 | cur.execute("select ifnull(max(sync_id),0) from mempool;") 246 | sync_id = cur.fetchone()[0] 247 | hdr = gethdr(height, sqc.cfg, 'raw') 248 | cur.execute("select hash,coinbase from blocks where id=%s limit 1;", (height,)) 249 | for blkhash,coinbase in cur: 250 | cur.execute("insert into orphans (sync_id,block_id,hash,hdr,coinbase) values(%s,%s,%s,%s,%s);", (sync_id,height,blkhash,hdr,coinbase)) 251 | 252 | def checkReOrg(cur, data): 253 | if data['height'] == 0: 254 | return 255 | blkhash,height = data['previousblockhash'],data['height']-1 256 | while True: 257 | cur.execute("select id from blocks where hash=%s limit 1;", (blkhash,)) 258 | row = cur.fetchone() 259 | if row is None: 260 | log("No previous block %d - ok if first run, ReOrg aborted" % height) 261 | return 262 | if row and row[0] == height: # rewind until block in good chain 263 | break 264 | height -= 1 265 | blkhash = sqc.rpc.getblockhash(height).decode('hex')[::-1] 266 | if blkhash is None: 267 | log("Rewind failure during ReOrg - Check manually.") 268 | return 269 | height += 1 270 | if height < data['height']: 271 | sqc.flushed.wait() # make sure all outputs committed before we start re-org 272 | cur.execute("update trxs set block_id=-1 where block_id >= %s;", (height*MAX_TX_BLK,)) # set bad chain txs uncfmd 273 | logts("Block %d *** ReOrg: %d orphan(s), %d txs affected" % (height, data['height']-height, cur.rowcount)) 274 | doReOrg(cur, data, height) 275 | 276 | def doReOrg(cur, data, height): 277 | while height < data['height']: 278 | blkhash = sqc.rpc.getblockhash(height) 279 | if blkhash is None: 280 | log("Block failure during ReOrg - Check manually.") 281 | return 282 | data = decodeBlock(sqc.rpc.getblock(blkhash, False).decode('hex')) # get good chain blocks 283 | if data: 284 | for n,tx in enumerate(data['tx']): 285 | tx_id,found = findTx(cur, tx['txid'], mkNew=True) 286 | if found: 287 | cur.execute("update trxs set block_id=%s where id=%s limit 1;", (height*MAX_TX_BLK+n, tx_id)) 288 | else: 289 | insertTx(cur, tx, tx_id, height*MAX_TX_BLK + n) # occurs if tx wasn't in our mempool or orphan block 290 | addOrphan(cur, height) 291 | data['chainwork'] = sqc.rpc.getblockheader(data['hash'][::-1].encode('hex'))['chainwork'].decode('hex') 292 | cur.execute("update blocks set hash=%s,coinbase=%s,chainwork=%s,blksize=%s where id=%s;", (data['hash'], data['coinbase'], data['chainwork'], data['size'], height)) 293 | puthdr(data['height'], sqc.cfg, data['hdr']) 294 | height += 1 295 | 296 | def insertTxMemPool(cur, tx, sync_id): 297 | tx_id,found = findTx(cur, tx['txid'], mkNew=True) 298 | if not found: 299 | insertTx(cur, tx, tx_id, -1) # -1 means trx has no block 300 | cur.execute("insert ignore into mempool (id,sync_id) values(%s,%s);", (tx_id, sync_id)) 301 | 302 | def insertTx(cur, tx, tx_id, blk_id): # pylint:disable=too-many-locals 303 | inlist,outlist = [],[] 304 | in_ids,txdata = '','' 305 | tx['stdSeq'] = True 306 | for vin in tx['vin']: 307 | if vin['sequence'] != 0xffffffff: 308 | tx['stdSeq'] = False 309 | break 310 | for vin in tx['vin']: 311 | if 'coinbase' not in vin: 312 | in_id = findTx(cur, vin['txid']) 313 | if in_id and vin['vout'] < MAX_IO_TX: 314 | in_id = (in_id*MAX_IO_TX) + vin['vout'] 315 | inlist.append(( tx_id, in_id )) 316 | in_ids += pack(' 0: 346 | continue 347 | insertTx(cur, tx, tx_id, blk_id + n) 348 | 349 | cur.execute("insert ignore into blocks (id,hash,coinbase,chainwork,blksize) values (%s,%s,%s,%s,%s);", (data['height'], data['hash'], data['coinbase'], data['chainwork'], data['size'])) 350 | puthdr(data['height'], sqc.cfg, data['hdr']) 351 | 352 | blktime = time.time() - blkstart 353 | log("Block %d [ Q:%d %4d txs - %s - %3.0fms %2.1fs %3.0f tx/s]" % ( data['height'], blockQ.qsize(), 354 | len(data['tx']), datetime.fromtimestamp(data['time']).strftime('%d-%m-%Y'), data['rpc']*1000, blktime, len(data['tx'])/blktime) ) 355 | 356 | blksecs.append(blktime) 357 | if len(blksecs) > 18: # ~3 hour moving avg 358 | del blksecs[0] 359 | cur.execute("replace into info (class,`key`,value) value('info','avg-block-sync',%s);", ("%2.1f"%(sum(blksecs)/len(blksecs)), )) 360 | 361 | def options(cfg): # pylint:disable=too-many-branches 362 | try: 363 | opts,_ = getopt.getopt(sys.argv[1:], "hvd:l:r:w:p:q:u:b:f:", 364 | ["help", "version", "debug", "db=", "log=", "rpc=", "path=", "queue=", "user=", "block=", "blkdat=", "no-sigs", "defaults" ]) 365 | except getopt.GetoptError: 366 | usage() 367 | for opt,arg in opts: 368 | if opt in ("-h", "--help"): 369 | usage() 370 | elif opt in ("-v", "--version"): 371 | sys.exit(sys.argv[0]+': '+version) 372 | elif opt in ("-d", "--db"): 373 | cfg['db'] = arg 374 | elif opt in ("-l", "--log"): 375 | cfg['log'] = arg 376 | elif opt in ("-r", "--rpc"): 377 | cfg['rpc'] = arg 378 | elif opt in ("-p", "--path"): 379 | cfg['path'] = arg 380 | elif opt in ("-q", "--queue"): 381 | cfg['queue'] = int(arg) 382 | elif opt in ("-u", "--user"): 383 | cfg['user'] = arg 384 | elif opt in "--no-sigs": 385 | cfg['no-sigs'] = True 386 | elif opt in "--defaults": 387 | savecfg(cfg) 388 | sys.exit("%s updated" % (sys.argv[0]+'.cfg')) 389 | elif opt in ("-b", "--block"): 390 | cfg['block'] = int(arg) 391 | elif opt in "--debug": 392 | cfg['debug'] = True 393 | elif opt in ("-f", "--blkdat"): 394 | cfg['blkdat'] = arg 395 | 396 | def usage(): 397 | print """Usage: {0} [options...][cfg file]\nCommand options are:\n-h,--help\tShow this help info\n-v,--version\tShow version info 398 | -b,--block\tStart at block number (instead of from last block done) 399 | -f,--blkdat\tSet path to block data and use direct file access (no mempool/re-org) 400 | --debug\t\tRun in foreground with logging to console 401 | --defaults\tUpdate cfg and exit\nDefault files are {0}.cfg, {0}.log 402 | \nThese options get saved in cfg file as defaults. 403 | -p,--path\tSet path for blob and block header data file (/var/data/sqlchain) 404 | -q,--queue\tSet block queue size (8)\n-u,--user\tSet user to run as\n-d,--db \tSet mysql db connection, "host:user:pwd:dbname" 405 | -l,--log\tSet log file path\n-r,--rpc\tSet rpc connection, "http://user:pwd@host:port" 406 | --no-sigs\tDo not store input sigScript data """.format(sys.argv[0]) 407 | sys.exit(2) 408 | 409 | def sigterm_handler(_signo, _stack_frame): 410 | sqc.done.set() 411 | def sighup_handler(_signo, _stack_frame): 412 | path = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 413 | sys.stdout.close() 414 | sys.stdout=open(path,'a') 415 | sys.stderr.close() 416 | sys.stderr=open(path,'a') 417 | logts("SIGHUP Log reopened") 418 | 419 | def run(): 420 | sqc.done = threading.Event() 421 | sqc.alldone = threading.Event() 422 | sqc.flushed = threading.Event() 423 | 424 | sqlchain_overlay(sqc.cfg['cointype']) 425 | 426 | blkwrk = threading.Thread(target = BlockHandler) 427 | blkwrk.start() 428 | outwrk = threading.Thread(target = OutputHandler) 429 | outwrk.start() 430 | 431 | blksdone = None 432 | workstart = time.time() 433 | while not sqc.done.isSet(): 434 | try: 435 | blksdone = getBlocks(sqc.cfg['block'] if 'block' in sqc.cfg else 0) 436 | break 437 | except socket.error: 438 | log("Cannot connect to rpc") 439 | time.sleep(5) 440 | 441 | sqc.done.set() 442 | blkwrk.join() 443 | sqc.alldone.set() 444 | outwrk.join() 445 | if blksdone: 446 | log("Session %d blocks, %.2f blocks/s" % (blksdone, float(blksdone / (time.time() - workstart))) ) 447 | 448 | if __name__ == '__main__': 449 | 450 | loadcfg(sqc.cfg) 451 | options(sqc.cfg) 452 | drop2user(sqc.cfg) 453 | 454 | sqc.rpc = rpcPool(sqc.cfg) 455 | blockQ = Queue() 456 | outQ = Queue(64) 457 | 458 | if sqc.cfg['debug']: 459 | signal.signal(signal.SIGINT, sigterm_handler) 460 | run() 461 | else: 462 | logpath = sqc.cfg['log'] if 'log' in sqc.cfg else sys.argv[0]+'.log' 463 | pidpath = sqc.cfg['pid'] if 'pid' in sqc.cfg else sys.argv[0]+'.pid' 464 | with daemon.DaemonContext(working_directory='.', umask=0002, stdout=open(logpath,'a'), stderr=open(logpath,'a'), 465 | signal_map={signal.SIGTERM:sigterm_handler, signal.SIGHUP:sighup_handler} ): 466 | with file(pidpath,'w') as f: 467 | f.write(str(os.getpid())) 468 | run() 469 | os.unlink(pidpath) 470 | -------------------------------------------------------------------------------- /sqlchain/insight.py: -------------------------------------------------------------------------------- 1 | # 2 | # Insight compatible API module 3 | # 4 | import os, urlparse, cgi, json 5 | from string import hexdigits 6 | from struct import pack, unpack 7 | from datetime import datetime 8 | from hashlib import sha256 9 | 10 | from bitcoinrpc.authproxy import AuthServiceProxy 11 | from gevent import sleep 12 | 13 | from sqlchain.version import version, MAX_TX_BLK, MAX_IO_TX 14 | from sqlchain.util import is_address, mkaddr, addr2id, txh2id, mkSPK, getBlobData, getBlobsSize, is_BL32 15 | from sqlchain.util import encodeVarInt, gethdr, coin_reward, bits2diff, mkOpCodeStr, logts 16 | 17 | RESULT_ROW_LIMIT = 1000 18 | zF = lambda x: int(x) if int(x) == x else x 19 | 20 | #main entry point for api calls 21 | def do_API(env, send_resp): # pylint:disable=too-many-branches 22 | result = [] 23 | get,args,cur = urlparse.parse_qs(env['QUERY_STRING']), env['PATH_INFO'].split('/')[2:], sqc.dbpool.get().cursor() 24 | send_resp('200 OK', [('Content-Type', 'application/json')]) 25 | if args[0] == 'auto' or env['REQUEST_METHOD'] == 'POST': 26 | result = apiAuto(cur, env, args, get) 27 | elif args[0] == "block-index": 28 | result = json.dumps(apiHeader(cur, args[1], args[2:])) 29 | elif args[0] == "block": 30 | if len(args[1]) == 64 and all(c in hexdigits for c in args[1]): 31 | result = json.dumps(apiBlock(cur, args[1])) 32 | elif args[0] in ["tx","rawtx"]: 33 | if len(args[1]) == 64 and all(c in hexdigits for c in args[1]): 34 | result = json.dumps(apiTx(cur, args[1], args)) 35 | elif args[0] == "txs": 36 | result = json.dumps({ 'pagesTotal':1, 'txs': apiTxs(cur, get) }) 37 | elif args[0] in ["addr","addrs"]: 38 | result = json.dumps(apiAddr(cur, args[1].split(','), args[2:], get)) 39 | elif args[0] == "history": 40 | result = json.dumps(addrHistory(cur, args[1], args[2:])) 41 | elif args[0] == "status": 42 | result = json.dumps(apiStatus(cur, *args[1:])) 43 | elif args[0] == "merkle": 44 | result = json.dumps(apiMerkle(cur, args[1])) 45 | elif args[0] == "utils": 46 | result = json.dumps(apiRPC(args[1], get['nbBlocks'][0] if 'nbBlocks' in get else args[2] if len(args) > 2 else 2)) 47 | elif args[0] == "sync": 48 | result = json.dumps(apiSync(cur, *[int(x) if x.isdigit() else 0 for x in args[1:]])) 49 | elif args[0] == "closure": 50 | result = json.dumps(apiClosure(cur, args[1].split(',') )) 51 | return result 52 | 53 | def apiAuto(cur, env, args, get): 54 | result = [] 55 | form = cgi.FieldStorage(fp=env['wsgi.input'], environ=env, keep_blank_values=True) 56 | if args[0] == "auto": 57 | param = form['data'].value if 'data' in form else args[1] 58 | if param.isdigit() and int(param) <= sqc.cfg['block']: 59 | blkhash = apiHeader(cur, param, args[2:]) 60 | result = json.dumps(apiBlock(cur, blkhash['blockHash'])) if blkhash else [] 61 | elif len(param) == 64: 62 | if param[:8] == '00000000': 63 | result = json.dumps(apiBlock(cur, param)) 64 | result = json.dumps(apiTx(cur, param, args)) 65 | elif is_address(param): 66 | result = json.dumps(apiAddr(cur, [ param ], args[2:], get)) 67 | elif args[0] == "addrs": 68 | result = json.dumps(apiAddr(cur, form['addrs'].value.split(','), args[2:], get)) 69 | elif args[0] == "tx" and args[1] == "send": 70 | result = apiRPC('send', form['rawtx'].value) 71 | return result 72 | 73 | def apiHeader(cur, blk, args): 74 | if blk.isdigit(): 75 | cur.execute("select id,hash from blocks where id=%s limit 1;", (blk,)) 76 | else: 77 | cur.execute("select id,hash from blocks order by id desc limit 1;") 78 | for blkid,blkhash in cur: 79 | hdr = gethdr(int(blkid), sqc.cfg) 80 | if 'electrum' in args: 81 | return { 'block_height':int(blkid), 'version':hdr['version'], 'time':hdr['time'], 'bits':hdr['bits'], 'nonce':hdr['nonce'], 82 | 'merkle_root':hdr['merkleroot'][::-1].encode('hex'), 'prev_block_hash':hdr['previousblockhash'][::-1].encode('hex') } 83 | return { 'blockHash': blkhash[::-1].encode('hex') } 84 | return {} 85 | 86 | def apiBlock(cur, blkhash): 87 | data = { 'hash':blkhash, 'tx':[] } 88 | cur.execute("select id,chainwork,blksize from blocks where hash=%s limit 1;", (blkhash.decode('hex')[::-1],)) 89 | for blk,work,blksz in cur: 90 | data['height'] = int(blk) 91 | data['confirmations'] = sqc.cfg['block'] - data['height'] + 1 92 | data.update(gethdr(data['height'], sqc.cfg)) 93 | data['previousblockhash'] = data['previousblockhash'][::-1].encode('hex') 94 | data['merkleroot'] = data['merkleroot'][::-1].encode('hex') 95 | data['difficulty'] = zF(int(bits2diff(data['bits'])*1e8)/1e8) 96 | data['bits'] = '%08x' % data['bits'] 97 | data['reward'] = zF(coin_reward(data['height'])) 98 | data['isMainChain'] = True 99 | data['size'] = blksz 100 | data['chainwork'] = work.encode('hex') 101 | data['poolInfo'] = {} 102 | cur.execute("select hash from trxs where block_id>=%s and block_id<%s;", (blk*MAX_TX_BLK, blk*MAX_TX_BLK+MAX_TX_BLK)) 103 | for txhash, in cur: 104 | data['tx'].append(txhash[::-1].encode('hex')) 105 | cur.execute("select hash from blocks where id=%s limit 1;", (data['height']+1,)) 106 | for txhash, in cur: 107 | data['nextblockhash'] = txhash[::-1].encode('hex') 108 | return data 109 | return {} 110 | 111 | def apiAddr(cur, addrs, args, get): 112 | data = [] 113 | for addr in addrs: 114 | if is_address(addr): 115 | addr_id = addr2id(addr, cur) 116 | if addr_id: 117 | if 'utxo' in args: 118 | data.append(addrUTXOs(cur, addr_id, addr, get)) 119 | else: 120 | data.append(addrTXs(cur, addr_id, addr, args, get)) 121 | return data if len(data) != 1 else data[0] 122 | 123 | 124 | def addrTXs(cur, addr_id, addr, args, get): # pylint:disable=too-many-locals 125 | incTxs = 'noTxList' not in get or get['noTxList'][0] == '0' 126 | offset = int(get['from'][0]) if 'from' in get else 0 127 | limit = min(int(get['to'][0])-offset, RESULT_ROW_LIMIT) if 'to' in get else RESULT_ROW_LIMIT 128 | txs = [] 129 | sums = [[0,0],[0,0]] 130 | untxs = 0 131 | count = 0 132 | cur.execute("select value,t.id,tx_id,hash,block_id from trxs t left join outputs o on t.id=(o.id div {0}) or t.id=o.tx_id where addr_id=%s order by block_id desc;".format(MAX_IO_TX), (addr_id,)) 133 | for value,tx_id,spend_id,txhash,blk in cur: 134 | uncfmd = 1 if blk < 0 else 0 135 | untxs += uncfmd 136 | spend = 1 if tx_id == spend_id else 0 137 | sums[uncfmd][spend] += value 138 | 139 | if count >= offset and count < offset+limit: 140 | txhash = txhash[::-1].encode('hex') 141 | if incTxs and txhash not in txs: 142 | txs.append(txhash) 143 | count += 1 144 | 145 | if 'balance' in args: 146 | return int(sums[0][0]-sums[0][1]) 147 | if 'unconfirmedBalance' in args: 148 | return int(sums[1][0]-sums[1][1]) 149 | if 'totalReceived' in args: 150 | return int(sums[0][0]) 151 | if 'totalSent' in args: 152 | return int(sums[0][1]) 153 | 154 | return { 'addrStr':addr, 'balanceSat':int(sums[0][0]-sums[0][1]), 'balance':float(sums[0][0]-sums[0][1])/1e8 or 0, 'totalReceivedSat':int(sums[0][0]), 155 | 'totalReceived': float(sums[0][0])/1e8, 'totalSentSat':int(sums[0][1]), 'totalSent':float(sums[0][1])/1e8, 156 | 'unconfirmedBalanceSat':int(sums[1][0]-sums[1][1]), 'unconfirmedBalance':float(sums[1][0]-sums[1][1])/1e8 or 0, 157 | 'txApperances':len(txs), 'transactions':txs, 'unconfirmedTxApperances':untxs } 158 | 159 | def addrUTXOs(cur, addr_id, addr, get): 160 | offset = int(get['from'][0]) if 'from' in get else 0 161 | limit = min(int(get['to'][0])-offset, RESULT_ROW_LIMIT) if 'to' in get else RESULT_ROW_LIMIT 162 | data = [] 163 | cur.execute("select value,o.id,hash,block_id div {1} from trxs t left join outputs o on t.id=(o.id div {0}) and o.tx_id is null where addr_id=%s order by block_id limit %s,%s;".format(MAX_IO_TX,MAX_TX_BLK), (addr_id,limit,offset)) 164 | for value,out_id,txhash,blk in cur: 165 | data.append({ 'address':addr, 'txid':txhash[::-1].encode('hex'), 'vout':int(out_id)%MAX_IO_TX, 'amount':float(value)/1e8, 166 | 'confirmations':sqc.cfg['block']-int(blk)+1 if blk>=0 else 0, 'ts':gethdr(int(blk), sqc.cfg, 'time') if blk>=0 else 0 }) 167 | return data 168 | 169 | def addrHistory(cur, addr, args): 170 | txt = '' 171 | data = { 'cfmd':0, 'uncfmd':0 } if 'balance' in args else { 'txs':[] } 172 | addr_id = addr2id(addr, cur) 173 | if addr_id: 174 | cur.execute("select value,t.id,o.tx_id,hash,block_id,o.id%%{0} from outputs o, trxs t where addr_id=%s and (t.id=(o.id div {0}) or t.id=o.tx_id) order by block_id;".format(MAX_IO_TX), (addr_id,)) 175 | for value,tx_id,spent_id,txhash,blk,n in cur: 176 | value = int(value) 177 | blk = blk//MAX_TX_BLK if blk >= 0 else 0 178 | if 'balance' in args: 179 | if blk == 0: 180 | data['uncfmd'] += value if tx_id == spent_id else -value 181 | else: 182 | data['cfmd'] += value if tx_id == spent_id else -value 183 | elif 'utxo' in args and not spent_id: 184 | tmp = { 'tx_hash':txhash[::-1].encode('hex'), 'height':int(blk), 'value':value, 'n':int(n) } 185 | else: 186 | tmp = { 'tx_hash':txhash[::-1].encode('hex'), 'height':int(blk) } 187 | if 'status' in args: 188 | txt += tmp['tx_hash'] + ":%d:" % tmp['height'] 189 | elif ('uncfmd' not in args or tmp['height'] == 0) and 'balance' not in args: 190 | data['txs'].append(tmp) 191 | return (sha256(txt).digest().encode('hex') if txt else None) if 'status' in args else data 192 | 193 | def apiTxs(cur, get): 194 | txs = [] 195 | if 'block' in get: 196 | blkhash = get['block'][0] 197 | if len(blkhash) == 64 and all(c in hexdigits for c in blkhash): 198 | txhashes = apiBlock(cur, blkhash) 199 | txhashes = txhashes['tx'] if 'tx' in txhashes else [] 200 | elif 'address' in get: 201 | txhashes = apiAddr(cur, [ get['address'][0] ], {}, {}) 202 | txhashes = txhashes['transactions'] if 'transactions' in txhashes else [] 203 | for txhash in txhashes: 204 | txs.append(apiTx(cur, txhash, [])) 205 | return txs 206 | 207 | def apiTx(cur, txhash, args): 208 | if 'output' in args: 209 | return txoAddr(cur, txhash, args[-1]) 210 | if 'addrs' in args: 211 | return txAddrs(cur, txhash) 212 | data = { 'txid':txhash } 213 | txh = txhash.decode('hex')[::-1] 214 | cur.execute("select id,hash,txdata,block_id div {0},ins,outs,txsize from trxs where id>=%s and hash=%s limit 1;".format(MAX_TX_BLK), (txh2id(txh), txh)) 215 | for tid,txh,txdata,blkid,ins,outs,txsize in cur: 216 | blob = getBlobData(txdata, ins, outs, txsize) 217 | if [i for i in ['rawtx','html'] if i in args]: 218 | return mkRawTx(cur, args, tid, blob, blkid) 219 | data['blockheight'] = blkid 220 | data['confirmations'] = sqc.cfg['block'] - blkid + 1 if blkid >= 0 else 0 221 | data['version'],data['locktime'] = blob['hdr'][4],blob['hdr'][5] 222 | data['valueIn'],data['vin'] = apiInputs(cur, blkid, blob['ins']) 223 | data['valueOut'],data['vout'] = apiOutputs(cur, int(tid), blob['outs']) 224 | data['fees'] = round(data['valueIn'] - data['valueOut'],8) 225 | data['size'] = blob['size'] 226 | cur.execute("select hash from blocks where id=%s limit 1;", (blkid,)) 227 | for txhash2, in cur: 228 | data['blockhash'] = txhash2[::-1].encode('hex') 229 | data['time'] = data['blocktime'] = gethdr(blkid, sqc.cfg, 'time') 230 | if 'coinbase' in data['vin'][0]: 231 | del data['valueIn'] 232 | del data['fees'] 233 | data['isCoinBase'] = True 234 | return data 235 | return {} 236 | 237 | def apiInputs(cur, height, ins): 238 | total,data = 0,[] 239 | if len(ins) == 0: 240 | cur.execute("select coinbase from blocks where id=%s;", (height,)) 241 | return 0,[{ 'n':0, 'coinbase':cur.fetchone()[0].encode('hex') }] 242 | else: 243 | for n,xin in enumerate(ins): 244 | cur.execute("select value,addr_id,hash from outputs o, trxs t where o.id=%s and t.id=o.id div %s limit 1;", (xin['outid'], MAX_IO_TX)) 245 | rows = cur.fetchall() 246 | for value,aid,txhash in rows: 247 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 248 | for addr, in cur: 249 | btc = float(value)/1e8 250 | data.append({ 'n':n, 'vout':xin['outid']%MAX_IO_TX, 'value':round(btc,8), 'valueSat':int(value), 251 | 'txid':txhash[::-1].encode('hex'), 'addr':mkaddr(addr,int(aid)), 'sequence':unpack('=%s*{0} and o.id<%s*{0};".format(MAX_IO_TX), (txid,txid+1)) 260 | rows = cur.fetchall() 261 | for out_id,n,value,aid,in_id in rows: 262 | btc = float(value)/1e8 263 | total += btc 264 | vout = { 'n':int(n), 'value':"%1.8f" % btc, 'scriptPubKey':{} } 265 | if aid == 0: 266 | vout['scriptPubKey']['hex'] = outs[int(n)] 267 | else: 268 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 269 | for addr, in cur: 270 | vout['scriptPubKey']['addresses'] = [ mkaddr(addr,int(aid)) ] 271 | vout['scriptPubKey']['hex'] = mkSPK(addr,int(aid))[1] 272 | vout['scriptPubKey']['asm'] = mkOpCodeStr(vout['scriptPubKey']['hex'], sepPUSH=' ') 273 | vout['scriptPubKey']['hex'] = vout['scriptPubKey']['hex'].encode('hex') 274 | if in_id: 275 | vout.update(apiSpent(cur, int(in_id), int(out_id))) 276 | data.append(vout) 277 | return round(total,8),data 278 | 279 | def apiSpent(cur, txid, out_id): 280 | cur.execute("select txdata,hash,block_id div {0},ins from trxs where id=%s limit 1;".format(MAX_TX_BLK), (txid,)) 281 | for txdata,txh,blk,ins in cur: 282 | blob = getBlobData(txdata, ins) 283 | for n,xin in enumerate(blob['ins']): 284 | if xin['outid'] == out_id: 285 | return { 'spentTxId':txh[::-1].encode('hex'), 'spentIndex':n, 'spentHeight':int(blk) } 286 | return {} 287 | 288 | def txoAddr(cur, txhash, n): 289 | txid = txh2id(txhash.decode('hex')[::-1]) 290 | cur.execute("select addr_id from outputs o where o.id>=%s*{0} and o.id<%s*{0} and o.id%%{0}=%s limit 1;".format(MAX_IO_TX), (txid,txid+1,int(n))) 291 | aids = cur.fetchall() 292 | for aid, in aids: 293 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 294 | addr = cur.fetchone()[0] 295 | return mkaddr(addr,int(aid)) 296 | return None 297 | 298 | def txAddrs(cur, txhash): 299 | data = [] 300 | txid = txh2id(txhash.decode('hex')[::-1]) 301 | cur.execute("select addr_id from outputs o where o.id>=%s*{0} and o.id<%s*{0};".format(MAX_IO_TX), (txid,txid+1)) 302 | for aid, in cur: 303 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 304 | addr = cur.fetchone()[0] 305 | data.append( mkaddr(addr,int(aid)) ) 306 | cur.execute("select txdata,ins from trxs where id=%s limit 1;", (txid,)) 307 | txins = cur.fetchall() 308 | for txdata,ins in txins: 309 | blob = getBlobData(int(txdata), ins) 310 | if ins > 0: 311 | for _,xin in enumerate(blob['ins']): 312 | cur.execute("select addr_id from outputs o where o.id=%s limit 1;", (xin['outid'],)) 313 | aids = cur.fetchall() 314 | for aid, in aids: 315 | cur.execute("select addr from {0} where id=%s limit 1;".format('bech32' if is_BL32(int(aid)) else 'address'), (aid,)) 316 | addr = cur.fetchone()[0] 317 | data.append(mkaddr(addr,int(aid))) 318 | return data 319 | 320 | def apiMerkle(cur, txhash): 321 | txh = txhash.decode('hex')[::-1] 322 | cur.execute("select block_id from trxs where id>=%s and hash=%s limit 1", (txh2id(txh), txh)) 323 | for blkid, in cur: 324 | blk,pos = divmod(int(blkid), MAX_TX_BLK) 325 | cur.execute("select hash from trxs where block_id>=%s and block_id<%s order by block_id;", (blk*MAX_TX_BLK, blk*MAX_TX_BLK+MAX_TX_BLK)) 326 | mkt = [ tx for tx, in cur ] 327 | mkb,t = [],pos 328 | while len(mkt) > 1: 329 | if len(mkt) % 2 == 1: 330 | mkt.append(mkt[-1]) 331 | mkb.append(mkt[t-1][::-1].encode('hex') if t % 2 == 1 else mkt[t+1][::-1].encode('hex')) 332 | mkt = [ sha256(sha256(mkt[i]+mkt[i+1]).digest()).digest() for i in range(0,len(mkt),2) ] 333 | t //= 2 334 | if mkt[0] != gethdr(blk, sqc.cfg, 'merkleroot'): 335 | logts("Panic! Merkle tree failure, tx %s" % txhash ) 336 | return { "block_height": blk, "merkle": mkb, "pos": pos } 337 | return [] 338 | 339 | rawTxHdr = [ 'version','# inputs','# outputs', 'locktime' ] 340 | rawCbHdr = [ 'null txid','n','coinbase size','coinbase bytes','sequence' ] 341 | rawInHdr = [ 'in txid #%d','n #%d','sigScript size #%d','sigScript bytes #%d','sequence #%d' ] 342 | rawOutHdr = [ 'out value #%d','scriptPK size #%d','scriptPK
bytes/asm #%d' ] 343 | 344 | def rawHTML(out, vi, vo): 345 | outhex = [ x.encode('hex') for x in out ] 346 | tags = [ x for x in rawTxHdr ] 347 | for n in range(vo): 348 | tags[3:3] = [ s%(vo-n-1) for s in rawOutHdr ] 349 | outhex[3+5*vi+3*n+2] += "
"+mkOpCodeStr(out[3+5*vi+3*n+2]).replace('\n', '
')+"
" 350 | if vi == 0: 351 | tags[2:2] = rawCbHdr 352 | else: 353 | for n in range(vi): 354 | tags[2:2] = [ s%(vi-n-1) for s in rawInHdr ] 355 | return ""+"".join(['' % (k,v) for k,v in zip(tags,outhex) ])+"
%s%s
" 356 | 357 | def mkRawTx(cur, args, txid, blob, blkid): 358 | out = [ pack('= sqc.sync_id: 395 | with sqc.sync: 396 | sqc.sync.wait(timeout) # long polling support for sync connections 397 | if sync_req >= sqc.sync_id: 398 | return None # timeout 399 | if sync_req == 0 or sync_req == sqc.sync_id: 400 | utxs = sqc.syncTxs 401 | else: 402 | utxs = [] 403 | cur.execute("select hash from mempool m, trxs t where m.sync_id > %s and t.id=m.id;", (sync_req,)) 404 | for txhash, in cur: 405 | utxs.append(bciTxWS(cur, txhash[::-1].encode('hex'))) 406 | cur.execute("select min(block_id) from orphans where sync_id > %s;", (sync_req if sync_req > 0 else sqc.sync_id,)) 407 | orphan = cur.fetchone()[0] 408 | return { 'block':sqc.cfg['block'] if orphan is None else orphan, 'orphan':(not orphan is None), 'txs':utxs, 'sync_id':sqc.sync_id } 409 | 410 | # based on the closure code from 411 | # https://github.com/sharkcrayon/bitcoin-closure 412 | def apiClosure(cur, addrs): 413 | closure,balance = [],0 414 | txDone = [] 415 | while len(addrs) > 0: # pylint:disable=too-many-nested-blocks 416 | sleep(0) 417 | addr = addrs.pop(0) 418 | closure.append(addr) 419 | txs = apiTxs(cur, { 'address':[ addr ] }) 420 | for tx in txs: 421 | if not tx['txid'] in txDone: 422 | if len(tx['vin']) == 1: 423 | txDone.append(tx['txid']) 424 | else: 425 | in_addrs = [ vin['addr'] for vin in tx['vin'] ] 426 | if addr in in_addrs: 427 | txDone.append(tx['txid']) 428 | for ain in in_addrs: 429 | if not ain in closure and not ain in addrs: 430 | addrs.append(ain) 431 | 432 | utxos = apiAddr(cur, closure, 'utxo', {}) 433 | for addr in utxos: 434 | for utxo in addr: 435 | balance += utxo['amount'] 436 | return { 'closure':closure, 'balance':balance } 437 | 438 | def apiStatus(cur, cls='info', *args): 439 | data = {} 440 | cur.execute("select value from info where `class`='sys' and `key`='updated';") 441 | row = cur.fetchone() 442 | if not row or (datetime.now() - datetime.strptime(row[0],'%Y-%m-%d %H:%M:%S')).total_seconds() > 60: 443 | cur.execute("replace into info (class,`key`,value) values('info','block',%s);", (sqc.cfg['block'], )) 444 | cur.execute("replace into info (class,`key`,value) values('info','version',%s);", (version, )) 445 | cur.execute("replace into info (class,`key`,value) values('sys','updated',now());") 446 | if cls == 'db': 447 | total_bytes = 0 448 | cur.execute("show table status;") 449 | for tbl in cur: 450 | if tbl[0] not in ['blocks','trxs','address','outputs']: 451 | continue 452 | if tbl[6]+tbl[8] < 1e9: 453 | cur.execute("replace into info (class,`key`,value) values('db','{0}:rows',%s),('db','{0}:data-MB',%s),('db','{0}:idx-MB',%s),('db','{0}:total-MB',%s),('db','{0}:total-bytes',%s);".format(tbl[0]), 454 | (tbl[4], float("%.1f"%float(tbl[6]/1e6)), float("%.1f"%float(tbl[8]/1e6)), float("%.1f"%float(tbl[6]/1e6+tbl[8]/1e6)), tbl[6]+tbl[8])) 455 | else: 456 | cur.execute("replace into info (class,`key`,value) values('db','{0}:rows',%s),('db','{0}:data-GB',%s),('db','{0}:idx-GB',%s),('db','{0}:total-GB',%s),('db','{0}:total-bytes',%s);".format(tbl[0]), 457 | (tbl[4], float("%.1f"%float(tbl[6]/1e9)), float("%.1f"%float(tbl[8]/1e9)), float("%.1f"%float(tbl[6]/1e9+tbl[8]/1e9)), tbl[6]+tbl[8])) 458 | total_bytes += tbl[6]+tbl[8] 459 | blobs_size = getBlobsSize(sqc.cfg) 460 | cur.execute("replace into info (class,`key`,value) values('db','outputs:max-io-tx',%s);", (MAX_IO_TX, )) 461 | cur.execute("replace into info (class,`key`,value) values('db','blocks:hdr-data',%s);", (os.stat(sqc.cfg['path']+'/hdrs.dat').st_size, )) 462 | cur.execute("replace into info (class,`key`,value) values('db','trxs:blob-data',%s);", (blobs_size, )) 463 | cur.execute("replace into info (class,`key`,value) values('db','trxs:blob-GB',%s);", (float("%.1f"%float(blobs_size/1e9)), )) 464 | cur.execute("replace into info (class,`key`,value) values('db','trxs:max-tx-block',%s);", (MAX_TX_BLK, )) 465 | cur.execute("replace into info (class,`key`,value) values('db','all:total-bytes',%s);", (total_bytes, )) 466 | cur.execute("replace into info (class,`key`,value) values('db','all:total-GB',%s);", (float("%.1f"%float(total_bytes/1e9)), )) 467 | 468 | cur.execute("select `key`,value from info where class=%s;", (cls, )) 469 | for k,v in cur: 470 | if ':' in k: 471 | k1,k2 = k.split(':', 1) 472 | if k1 in data: 473 | data[k1].update({ k2:v }) 474 | else: 475 | data[k1] = { k2:v } 476 | else: 477 | data[k] = v 478 | if 'html' in args: 479 | pass # todo wrap data as html table 480 | return data 481 | --------------------------------------------------------------------------------