├── .dockerignore ├── .flake8 ├── .github └── workflows │ └── py-tests.yml ├── .gitignore ├── README.md ├── TODO.md ├── Vagrantfile ├── bmon ├── __init__.py ├── asgi.py ├── bitcoin │ ├── __init__.py │ ├── api.py │ └── rpc.py ├── bitcoind_monitor.py ├── bitcoind_tasks.py ├── conftest.py ├── hosts.py ├── logparse.py ├── mempool.py ├── migrations │ ├── 0001_initial.py │ ├── 0002_logprogress_alter_connectblockevent_version_and_more.py │ ├── 0003_mempoolaccept_processlineerror.py │ ├── 0004_connectblockdetails_created_at_and_more.py │ ├── 0005_blockconnectedevent_blockdisconnectedevent_peer_and_more.py │ ├── 0006_peer_bip152_hb_from_peer_bip152_hb_to_peer_relaytxes_and_more.py │ ├── 0007_alter_peer_bip152_hb_from_alter_peer_bip152_hb_to.py │ ├── 0008_alter_peer_servicesnames.py │ ├── 0009_mempoolreject.py │ ├── 0010_mempoolreject_reason_code_alter_mempoolreject_peer_and_more.py │ ├── 0011_host_mempoolreject_unique_reject_host_unique_host.py │ ├── 0012_rename_host_logprogress_hostname.py │ ├── 0013_remove_peer_unique_peer_blockconnectedevent_hostobj_and_more.py │ ├── 0014_rename_host_processlineerror_hostname.py │ ├── 0015_remove_mempoolreject_unique_reject_and_more.py │ ├── 0016_remove_mempoolreject_unique_reject_and_more.py │ ├── 0017_remove_mempoolreject_unique_reject_and_more.py │ ├── 0018_alter_host_name.py │ ├── 0019_peerstats.py │ ├── 0020_remove_host_unique_host_host_bitcoin_listen_and_more.py │ ├── 0021_alter_host_bitcoin_listen.py │ ├── 0022_host_disabled.py │ ├── 0023_host_bmon_host_name_88c4dd_idx_and_more.py │ ├── 0024_alter_mempoolreject_timestamp.py │ ├── 0025_blockdownloadtimeout.py │ ├── 0026_headertotipevent.py │ ├── 0027_mempoolreject_wtxid.py │ └── __init__.py ├── models.py ├── redis_util.py ├── server_monitor.py ├── server_tasks.py ├── settings.py ├── settings_test.py ├── static │ ├── ninepin.ttf │ └── style.css ├── templates │ └── tips.html ├── test_bitcoind.py ├── test_bitcoind_tasks.py ├── test_hosts.py ├── test_integration.py ├── test_logparse.py ├── test_mempool.py ├── test_models.py ├── testdata │ ├── block-timeouts.log │ ├── getpeerinfo.json │ ├── logs_badblock_cb_overspent_018.txt │ ├── logs_connectblock_010.txt │ ├── logs_connectblock_basic.txt │ ├── logs_gotblock_012.txt │ ├── logs_gotblock_013.txt │ ├── logs_gotblock_018.txt │ ├── logs_reorg_23.txt │ ├── mempool-accepts-log.txt │ └── new-header.log ├── urls.py ├── util.py ├── util_cli.py ├── views.py ├── views_api.py └── wsgi.py ├── dev ├── docker-compose.yml ├── docker ├── entrypoint.sh ├── py.Dockerfile └── wait-for ├── etc ├── alertmanager-template.yml ├── alertmanager.yaml ├── bitcoin │ └── bitcoin-template.conf ├── bitcoind-logrotate.conf ├── grafana-dashboards-template.yml ├── grafana-datasources-template.yml ├── grafana-template.ini ├── grafana │ └── dashboards │ │ └── bitcoind.json ├── loki-template.yml ├── postgres.conf ├── prom-alerts.yml ├── prom-template.yml ├── promtail-template.yml ├── server-nginx.conf ├── systemd-bitcoind-unit.service ├── systemd-server-sentry-unit.service └── systemd-server-unit.service ├── frontend-build ├── bundle-prod.e9a10db85a6bff80a378.js ├── bundle-prod.e9a10db85a6bff80a378.js.LICENSE.txt └── index.html ├── frontend ├── .eslintrc.js ├── .yarnrc ├── Dockerfile ├── docker_entrypoint.sh ├── package.json ├── src │ ├── assets │ │ └── styles │ │ │ ├── main.css │ │ │ └── style.js │ ├── index.html │ ├── index.js │ └── util.js ├── webpack.config.js └── yarn.lock ├── git-hooks └── pre-commit ├── images ├── bmon.png ├── netmon.png └── screenshot.png ├── infra ├── bmon_infra │ ├── __init__.py │ ├── config.py │ ├── getbitcoin.py │ └── infra.py ├── hosts_dev.yml ├── hosts_prod.yml └── pyproject.toml ├── manage.py ├── pyproject.toml └── pytest.ini /.dockerignore: -------------------------------------------------------------------------------- 1 | services/ 2 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | extend-ignore = 3 | # import at top of file 4 | E402, 5 | # line length 6 | E501, 7 | # linebreak operator 8 | W503, 9 | W504, 10 | # whitespace before index 11 | E203 12 | -------------------------------------------------------------------------------- /.github/workflows/py-tests.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | test: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v3 11 | - uses: actions/setup-python@v4 12 | with: 13 | python-version: '3.11' 14 | 15 | - name: Pull test container 16 | run: docker pull docker.io/jamesob/bmon:test-latest 17 | 18 | - name: create config 19 | run: | 20 | pip install .[tests] 21 | pip install ./infra 22 | bmon-config 23 | 24 | - name: flake8 + pytest 25 | run: | 26 | ./dev test 27 | 28 | # - name: mypy 29 | # run: | 30 | # ./dev mypy 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | services/ 3 | staticfiles-build/ 4 | frontend-build/bundle\.* 5 | 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | pip-wheel-metadata/ 29 | share/python-wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | MANIFEST 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Unit test / coverage reports 46 | htmlcov/ 47 | .tox/ 48 | .nox/ 49 | .coverage 50 | .coverage.* 51 | .cache 52 | nosetests.xml 53 | coverage.xml 54 | *.cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # celery beat schedule file 98 | celerybeat-schedule 99 | 100 | # SageMath parsed files 101 | *.sage.py 102 | 103 | # Environments 104 | .env 105 | .venv 106 | env/ 107 | venv/ 108 | ENV/ 109 | env.bak/ 110 | venv.bak/ 111 | 112 | # Spyder project settings 113 | .spyderproject 114 | .spyproject 115 | 116 | # Rope project settings 117 | .ropeproject 118 | 119 | # mkdocs documentation 120 | /site 121 | 122 | # mypy 123 | .mypy_cache/ 124 | .dmypy.json 125 | dmypy.json 126 | 127 | # Pyre type checker 128 | .pyre/ 129 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![bmon](images/bmon.png) 2 | 3 | A Bitcoin network monitor 4 | 5 | ![screenshot](images/screenshot.png) 6 | 7 | Provides log aggregation, a Grafana dashboard, automated alerting, and a framework for 8 | doing realtime analysis (via logs and RPC) on a collection of bitcoind nodes. 9 | 10 | 11 | ## Local dev 12 | 13 | 1. Ensure you have Python 3.10+, Docker, and docker-compose on your host. 14 | - `pip install docker-compose` 15 | 1. Install the local infrastructure tools: 16 | - `pip install -e ./infra` 17 | 18 | ### Easy way 19 | 20 | 1. Bring everything up with `bmon-config && ./dev reup` 21 | 22 | ### Manual way 23 | 24 | 1. Build local config tree: `bmon-config` 25 | 1. Run the database migrations: `./dev managepy migrate` 26 | 1. Bring docker-compose up: `docker-compose up [-d]` 27 | 28 | Then browse to `http://localhost:3000` to access Grafana; use the default admin 29 | credentials `admin`/`admin`. You should see a nice little sample dashboard displaying 30 | bitcoind logs etc. 31 | 32 | 33 | ## Running tests 34 | 35 | 1. `./dev test` 36 | 1. Try generating a block locally: 37 | - `docker-compose up -d` 38 | - In one terminal: `./dev watchlogs` 39 | - In another: `./dev generateblock` 40 | 41 | 42 | ## Important tools for investigation 43 | 44 | ### Full grep of all node logs 45 | 46 | ```sh 47 | bmon-infra rg 48 | ``` 49 | 50 | ### Query redis contents 51 | 52 | ```sh 53 | ssh some-bmon-host 54 | cd bmon/ 55 | ./dev shell 56 | ``` 57 | ```python 58 | from bmon.server_tasks import redisdb 59 | from bmon.mempool import full_scan 60 | full_scan(redisdb, '**') 61 | ``` 62 | 63 | ## Adding alerts 64 | 65 | Modify `./etc/prom-alerts.yml` and redeploy to the server with 66 | `bmon-infra -f bmon deploy`. 67 | 68 | ## Onboarding a new bitcoind host 69 | 70 | 1. Add an entry to `./infra/hosts_prod.yml` corresponding to the desired bitcoind 71 | settings. You might want to specify `ssh_hostname:` and `become_method:`. 72 | 1. Run `bmon-infra bootstrap` with the required arguments. If for some reason 73 | the script doesn't or can't run to completion, just do the stuff that's in there 74 | manually - it shouldn't be hard to figure out. This will output a wireguard pubkey 75 | that you should use in subsequent steps. 76 | 1. Modify `wg-bmon` wireguard configuration on the serverside (the bmon administrator 77 | has to do this) using the bitcoind wg pubkey. 78 | 1. Update the bmon secrets store with `sudo_password` for host. 79 | 1. Test deployment to the new host 80 | ```sh 81 | bmon-infra -f new-hostname deploy 82 | ``` 83 | 1. If that succeeds, update the server's monitoring configs etc. 84 | ```sh 85 | bmon-infra -t server deploy 86 | ``` 87 | 88 | And the new host should be fully online. 89 | 90 | ## Design 91 | 92 | Bmon consists of two machine types: one server and many nodes. The nodes run bitcoind, 93 | and provide information to the server, which collects and synthesizes all the data 94 | necessary. The server also provides views on the data, including log exploration, 95 | metric presentation, and other high-level insights (TBD). 96 | 97 | The bmon server runs 98 | 99 | - loki, for log aggregation 100 | - alertmanager, for alerts 101 | - grafana, for presenting logs and metrics 102 | - prometheus, for aggregating metrics 103 | - [tbd] bmon_collector, which aggregates insights 104 | 105 | Each bmon node (the analogue of a bitcoind node) runs 106 | 107 | - bitcoind, which runs bitcoin 108 | - promtail, which pushes logs into loki (on the server) 109 | - node_exporter, which offers system metrics for scraping by prometheus 110 | - bmon_exporter, which pushes interesting high-level data into 111 | 112 | ```mermaid 113 | flowchart TD 114 | subgraph node 115 | node_exporter 116 | bmon_exporter 117 | end 118 | subgraph server 119 | loki 120 | grafana 121 | alertmanager 122 | prometheus 123 | loki --> grafana 124 | prometheus --> grafana 125 | bmon_exporter --> bmon_collector 126 | end 127 | subgraph node 128 | promtail 129 | promtail --> loki 130 | bitcoind --> /bmon/logs/bitcoin.log 131 | /bmon/logs/bitcoin.log --> promtail 132 | bitcoind --> bmon_exporter 133 | node_exporter --> prometheus 134 | prometheus --> alertmanager 135 | end 136 | ``` 137 | 138 | For simplification, all servers participate in a single wireguard network. 139 | 140 | ## How are hosts configured? 141 | 142 | All known participants in bmon are listed in `./infra/hosts.yml`. This file is parsed 143 | by `./infra/bmon_infra/infra.py` (which gets installed as the `bmon-infra`), which 144 | then configures each host over SSH (using [fscm](https://github.com/jamesob/fscm), 145 | which itself uses mitogen, a Python library that basically facilitates remote execution of 146 | Python code over an SSH connection). 147 | 148 | During provisioning, a copy of the `bmon` repo is cloned on each host at `~/bmon`, 149 | and then `bmon-config` (`./infra/bmon_infra/config.py`) is run to generate a `.env` 150 | file with all configuration and secrets based on the host's entry in `hosts.yml`. 151 | 152 | The `.env` file is read in by docker-compose and used to set various parameters of the 153 | container runtimes. The docker-compose lifecycle is managed by systemd on each host; a 154 | user-level systemd unit is installed by the `bmon-infra` command. 155 | 156 | 157 | ## How is wireguard used? 158 | 159 | Since monitored hosts will live on different networks, wireguard is used to create a 160 | flat networking topology so that all hosts can be easily reached by the central bmon 161 | server, which aggregates measurements across each host. 162 | 163 | To add a host, file an issue here and I'll give a wireguard config to use. 164 | 165 | Wireguard is also used to simulate geographical dispersion of the monitored nodes. A 166 | VPN provider gives us Wireguard configurations for diverse networks, which we then use 167 | on certain monitored bitcoind hosts. 168 | 169 | 170 | ### Node versions 171 | 172 | - One for each major release 173 | - One for current RC 174 | - Maintain 3 rotating versions of master, staggered backwards by 175 | - 1 week 176 | - 4 weeks 177 | - 16 weeks 178 | 179 | ### Uses 180 | 181 | - [ ] For a given block, determine when it was seen by each node. Present variance. 182 | Alert on anomalous variance. 183 | 184 | - [ ] For a given transaction, determine when it was seen by each node. Present 185 | variance. Alert on anomalous variance. 186 | 187 | - [ ] "Selfish mining" detector: alert on multiple blocks in rapid succession that 188 | cause a reorg. 189 | 190 | ### Notify on 191 | 192 | - [ ] mempool empty 193 | - [ ] inflation (rolling sum of UTXO amounts + (block_created_amt - block_destroyed_amt) > supply_at_height) 194 | - [ ] tip older than 90 minutes 195 | - [ ] transactions rejected from mempool 196 | - [ ] bad blocks 197 | - [ ] reorgs 198 | 199 | ### Measurements 200 | 201 | - [ ] block reception time per node 202 | - [ ] txn reception time per node 203 | - [ ] reorg count (number of unused tips?) 204 | - [ ] usual system metrics: memory usage, disk usage, CPU load, etc. 205 | 206 | ### Comparison across nodes 207 | 208 | - [ ] mempool contents 209 | - [ ] getblocktemplate contents (do they differ at all?) 210 | - [ ] block processing time (per logs) 211 | - [ ] block reception time diff 212 | - [ ] txn reception time diff 213 | 214 | ### Features 215 | 216 | - [ ] logs sent to a centralized log explorer (Loki-Grafana) 217 | 218 | 219 | ### Misc. 220 | 221 | #### Resizing existing vagrant disk 222 | 223 | sudo cfdisk /dev/sda 224 | sudo resize2fs -p -F /dev/sda1 225 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | - [ ] alert on no ATMPs processed in last hour 2 | - [ ] alert on low peer count 3 | - [ ] alert on high mean ping time 4 | - [ ] "catch all" sentry logging for lines that look interesting: "warning", "invalid", 5 | etc. 6 | 7 | For BG: 8 | 9 | - [ ] save parsed coinbase on block reception 10 | - [ ] better block propagation measures 11 | - chart across heights 12 | - average for each node 13 | - [ ] parse block coinbase: which pool orphaned which? 14 | - orphans because of topology? i.e. are sub blocks in reorg beating difficult? 15 | 16 | - [ ] compare tip with e.g. mempool.space and alert if not current 17 | - [ ] set up sentry 18 | - [ ] metric: at any given time, what feerate necessary to get into n blocks 19 | - [ ] separate infrastructure in different unit file 20 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | Vagrant.configure("2") do |config| 2 | 3 | ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa_yubikey.pub").first.strip 4 | 5 | config.vm.provision "shell" do |s| 6 | s.inline = <<-SHELL 7 | echo #{ssh_pub_key} >> /home/vagrant/.ssh/authorized_keys 8 | mkdir -p /root/.ssh 9 | echo #{ssh_pub_key} >> /root/.ssh/authorized_keys 10 | apt-get update && apt-get install --yes curl sudo 11 | SHELL 12 | end 13 | 14 | config.vm.provider "virtualbox" do |vb| 15 | vb.memory = "1024" 16 | end 17 | 18 | config.vm.define "bmon-server" do |box| 19 | box.vm.network "private_network", ip: "192.168.56.2" 20 | box.vm.hostname = "bmon-server" 21 | box.vm.box = "debian/testing64" 22 | end 23 | 24 | config.vm.define "bmon-b1" do |box| 25 | box.vm.network "private_network", ip: "192.168.56.3" 26 | box.vm.hostname = "bmon-b1" 27 | box.vm.box = "debian/testing64" 28 | end 29 | 30 | config.vm.define "bmon-b2" do |box| 31 | box.vm.network "private_network", ip: "192.168.56.4" 32 | box.vm.hostname = "bmon-b2" 33 | box.vm.box = "debian/testing64" 34 | end 35 | 36 | end 37 | -------------------------------------------------------------------------------- /bmon/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaincodelabs/bmon/39a0f8204f48cd9f84a9d181abaf69ab43747bd0/bmon/__init__.py -------------------------------------------------------------------------------- /bmon/asgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | ASGI config for bmon project. 3 | 4 | It exposes the ASGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/4.1/howto/deployment/asgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.asgi import get_asgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bmon.settings') 15 | 16 | application = get_asgi_application() 17 | -------------------------------------------------------------------------------- /bmon/bitcoin/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from . import api # noqa 3 | from .api import * # noqa 4 | -------------------------------------------------------------------------------- /bmon/bitcoin/api.py: -------------------------------------------------------------------------------- 1 | import time 2 | import typing as t 3 | import logging 4 | import sys 5 | from concurrent.futures import ThreadPoolExecutor 6 | from functools import cache 7 | from pathlib import Path 8 | 9 | from .rpc import BitcoinRpc 10 | import bmon_infra as infra 11 | 12 | from django.conf import settings 13 | 14 | 15 | log = logging.getLogger(__name__) 16 | 17 | 18 | @cache 19 | def read_raw_bitcoind_version() -> str: 20 | assert settings.BITCOIND_VERSION_PATH 21 | return Path(settings.BITCOIND_VERSION_PATH).read_text().strip() 22 | 23 | 24 | @cache 25 | def bitcoind_version(ver: None | str = None) -> tuple[tuple[int, ...], None | str]: 26 | """Returns the version tuple and the git sha, if any.""" 27 | ver = ver or read_raw_bitcoind_version() 28 | ver = ver.strip('v') 29 | gitsha = None 30 | 31 | if '-' in ver: 32 | ver, gitsha = ver.split('-', 1) 33 | 34 | vertuple = tuple(int(i.split('rc')[0]) for i in ver.split('.')) 35 | if len(vertuple) == 2: 36 | vertuple += (0,) 37 | assert len(vertuple) == 3 38 | assert isinstance(vertuple, tuple) 39 | 40 | return vertuple, gitsha 41 | 42 | 43 | def is_pre_taproot(ver: str | tuple[int, ...] | None = None) -> bool: 44 | """This this bitcoind node pre-taproot?""" 45 | if isinstance(ver, str): 46 | ver_tuple = bitcoind_version(ver)[0] 47 | elif isinstance(ver, tuple): 48 | ver_tuple = ver 49 | elif ver is None: 50 | ver_tuple = bitcoind_version()[0] 51 | else: 52 | raise ValueError("unexpected ver argument") 53 | 54 | return ver_tuple < (0, 21, 1) 55 | 56 | 57 | @cache 58 | def get_rpc(host: None | str = None, boot_tries: int = 5, boot_delay_secs: int = 5) -> BitcoinRpc: 59 | """ 60 | Return an RPC object to bitcoind. 61 | 62 | Will block until successfully connected. 63 | """ 64 | host = host or settings.BITCOIN_RPC_HOST # type: ignore 65 | url = ( 66 | f"http://{settings.BITCOIN_RPC_USER}:" 67 | f"{settings.BITCOIN_RPC_PASSWORD}@{host}" 68 | f":{settings.BITCOIN_RPC_PORT}" 69 | ) 70 | rpc = BitcoinRpc(url) 71 | 72 | if boot_tries == 0: 73 | return rpc 74 | 75 | while boot_tries > 0: 76 | boot_tries -= 1 77 | try: 78 | rpc.getblockchaininfo()['chain'] 79 | return rpc 80 | except Exception as e: 81 | log.info("hit excption waiting for bitcoin rpc to boot: %s", 82 | e.__class__.__name__) 83 | log.debug("bitcoin RPC exception", exc_info=e) 84 | time.sleep(boot_delay_secs) 85 | boot_delay_secs *= 2 86 | 87 | raise RuntimeError(f"couldn't boot RPC {url}") 88 | 89 | 90 | @cache 91 | def get_rpc_for_hosts(hosts: t.Tuple[infra.Host]) -> t.Dict[str, BitcoinRpc]: 92 | # TODO: assumes that all hosts use same ports, credentials 93 | return {host.name: get_rpc(host.bmon_ip) for host in hosts} 94 | 95 | 96 | RPC_ERROR_RESULT = object() 97 | 98 | 99 | def gather_rpc(rpc_call_arg: str | t.Callable[[BitcoinRpc], t.Any]) -> t.Dict[str, t.Any]: 100 | """ 101 | Gather RPC resuls from all bitcoin hosts. 102 | 103 | Args: 104 | rpc_call_arg: either a string that represents the RPC call or a 105 | function that takes the RPC object as its only argument. 106 | """ 107 | rpcmap = get_rpc_for_hosts(infra.get_bitcoind_hosts()) 108 | promises = {} 109 | results: dict[str, t.Any] = {} 110 | 111 | with ThreadPoolExecutor(max_workers=10) as e: 112 | for hostname, rpc in rpcmap.items(): 113 | 114 | if isinstance(rpc_call_arg, str): 115 | promises[hostname] = e.submit(rpc.call, rpc_call_arg) 116 | else: 117 | promises[hostname] = e.submit(rpc_call_arg, rpc) 118 | 119 | for hostname, promise in promises.items(): 120 | try: 121 | results[hostname] = promise.result() 122 | except Exception as e: 123 | log.exception( 124 | "host %r encountered an error running %s: %s", 125 | hostname, 126 | rpc_call_arg, 127 | e, 128 | ) 129 | results[hostname] = RPC_ERROR_RESULT 130 | 131 | return results 132 | 133 | 134 | def wait_for_synced(): 135 | """ 136 | Wait until bitcoind's tip is reasonably current. 137 | 138 | This is helpful for bootstrapping new monited bitcoind instances without 139 | generating a bunch of spurious data. 140 | """ 141 | tries = 12 142 | backoff_secs = 2 143 | is_synced = False 144 | got = {} 145 | i = 0 146 | rpc = get_rpc() 147 | 148 | while not is_synced: 149 | try: 150 | got = rpc.getblockchaininfo() 151 | except Exception as e: 152 | print(f"exception getting verification progress: {e}") 153 | tries -= 1 154 | time.sleep(backoff_secs) 155 | if backoff_secs < 120: 156 | backoff_secs *= 2 157 | else: 158 | is_synced = float(got["verificationprogress"]) > 0.9999 159 | time.sleep(1) 160 | tries = 12 161 | 162 | if i % 40 == 0: 163 | print(f"At height {got['blocks']} ({got['verificationprogress']})", flush=True) 164 | 165 | i += 1 166 | 167 | if not is_synced: 168 | print("Failed to sync!") 169 | sys.exit(1) 170 | 171 | print(f"Synced to height: {got['blocks']}") 172 | -------------------------------------------------------------------------------- /bmon/bitcoin/rpc.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2007 Jan-Klaas Kollhof 2 | # Copyright (C) 2011-2018 The python-bitcoinlib developers 3 | # Copyright (C) 2020 James O'Beirne 4 | # 5 | # This section is part of python-bitcoinlib. 6 | # 7 | # It is subject to the license terms in the LICENSE file found in the top-level 8 | # directory of the python-bitcoinlib distribution. 9 | # 10 | # No part of python-bitcoinlib, including this section, may be copied, modified, 11 | # propagated, or distributed except according to the terms contained in the 12 | # LICENSE file. 13 | import time 14 | import socket 15 | import http.client as httplib 16 | import json 17 | import base64 18 | import re 19 | import urllib.parse as urlparse 20 | import logging 21 | from decimal import Decimal 22 | from typing import IO 23 | 24 | 25 | DEFAULT_USER_AGENT = "AuthServiceProxy/0.1" 26 | DEFAULT_HTTP_TIMEOUT = 30 27 | 28 | log = logging.getLogger("bitcoin-rpc") 29 | 30 | 31 | class JSONRPCError(Exception): 32 | """JSON-RPC protocol error base class 33 | Subclasses of this class also exist for specific types of errors; the set 34 | of all subclasses is by no means complete. 35 | """ 36 | 37 | def __init__(self, rpc_error): 38 | super(JSONRPCError, self).__init__( 39 | "msg: %r code: %r" % (rpc_error["message"], rpc_error["code"]) 40 | ) 41 | self.error = rpc_error 42 | 43 | @property 44 | def code(self) -> int: 45 | return int(self.error["code"]) 46 | 47 | 48 | class BitcoinRpc(object): 49 | """Base JSON-RPC proxy class. Contains only private methods; do not use 50 | directly.""" 51 | 52 | def __init__( 53 | self, 54 | service_url, 55 | service_port=None, 56 | net_name=None, 57 | timeout=DEFAULT_HTTP_TIMEOUT, 58 | debug_stream: IO | None = None, 59 | wallet_name=None, 60 | ): 61 | 62 | self.debug_stream = debug_stream 63 | authpair = None 64 | net_name = net_name or "mainnet" 65 | self.timeout = timeout 66 | self.net_name = net_name 67 | 68 | if service_port is None: 69 | service_port = { 70 | "mainnet": 8332, 71 | "testnet3": 18332, 72 | "regtest": 18443, 73 | }.get(net_name, 18332) 74 | 75 | url = urlparse.urlparse(service_url) 76 | authpair = "%s:%s" % (url.username or "", url.password or "") 77 | 78 | # Do our best to autodetect testnet. 79 | if url.port: 80 | self.net_name = net_name = { 81 | 18332: "testnet3", 82 | 18443: "regtest", 83 | }.get(url.port, 'mainnet') 84 | 85 | if authpair == ":": 86 | raise ValueError("need auth") 87 | 88 | if wallet_name: 89 | service_url = service_url.rstrip("/") 90 | service_url += f"/wallet/{wallet_name}" 91 | 92 | log.debug(f"Connecting to bitcoind: {service_url}") 93 | self.url = service_url 94 | 95 | # Credential redacted 96 | self.public_url = re.sub(r":[^/]+@", ":***@", self.url, 1) 97 | self._parsed_url = urlparse.urlparse(service_url) 98 | self.host = self._parsed_url.hostname 99 | 100 | log.debug(f"Initializing RPC client at {self.public_url}") 101 | # XXX keep for debugging, but don't ship: 102 | # logger.info(f"[REMOVE THIS] USING AUTHPAIR {authpair}") 103 | 104 | if self._parsed_url.scheme not in ("http",): 105 | raise ValueError("Unsupported URL scheme %r" % self._parsed_url.scheme) 106 | 107 | self.__id_count = 0 108 | 109 | self.__auth_header = None 110 | if authpair: 111 | self.__auth_header = b"Basic " + base64.b64encode(authpair.encode("utf8")) 112 | 113 | @property 114 | def port(self) -> int: 115 | if self._parsed_url.port is None: 116 | return httplib.HTTP_PORT 117 | else: 118 | return self._parsed_url.port 119 | 120 | def _getconn(self, timeout=None): 121 | return httplib.HTTPConnection( 122 | self._parsed_url.hostname, 123 | port=self.port, 124 | timeout=timeout, 125 | ) 126 | 127 | def call(self, rpc_str: str, **kwargs) -> dict: 128 | """Call a method with a string.""" 129 | [meth, *args] = rpc_str.split() 130 | return self._call(meth, *args, **kwargs) 131 | 132 | def _call(self, rpc_call_name, *args, **kwargs): 133 | self.__id_count += 1 134 | kwargs.setdefault("timeout", self.timeout) 135 | 136 | postdata = json.dumps( 137 | { 138 | "version": "1.1", 139 | "method": rpc_call_name, 140 | "params": args, 141 | "id": self.__id_count, 142 | } 143 | ) 144 | 145 | log.debug(f"[{self.public_url}] calling %s%s", rpc_call_name, args) 146 | 147 | headers = { 148 | "Host": self._parsed_url.hostname, 149 | "User-Agent": DEFAULT_USER_AGENT, 150 | "Content-type": "application/json", 151 | } 152 | 153 | if self.__auth_header is not None: 154 | headers["Authorization"] = self.__auth_header 155 | 156 | path = self._parsed_url.path 157 | tries = 5 158 | backoff = 0.3 159 | conn = None 160 | while tries: 161 | try: 162 | conn = self._getconn(timeout=kwargs["timeout"]) 163 | conn.request("POST", path, postdata, headers) 164 | except (BlockingIOError, httplib.CannotSendRequest, socket.gaierror): 165 | log.exception( 166 | f"hit request error: {path}, {postdata}, {self._parsed_url}" 167 | ) 168 | tries -= 1 169 | if not tries: 170 | raise 171 | time.sleep(backoff) 172 | backoff *= 2 173 | else: 174 | break 175 | 176 | assert conn 177 | response = self._get_response(conn) 178 | err = response.get("error") 179 | if err is not None: 180 | if isinstance(err, dict): 181 | raise JSONRPCError( 182 | { 183 | "code": err.get("code", -345), 184 | "message": err.get("message", "error message not specified"), 185 | } 186 | ) 187 | raise JSONRPCError({"code": -344, "message": str(err)}) 188 | elif "result" not in response: 189 | raise JSONRPCError({"code": -343, "message": "missing JSON-RPC result"}) 190 | else: 191 | return response["result"] 192 | 193 | def _get_response(self, conn): 194 | http_response = conn.getresponse() 195 | if http_response is None: 196 | raise JSONRPCError( 197 | {"code": -342, "message": "missing HTTP response from server"} 198 | ) 199 | 200 | rdata = http_response.read().decode("utf8") 201 | try: 202 | loaded = json.loads(rdata, parse_float=Decimal) 203 | log.debug(f"[{self.public_url}] -> {loaded}") 204 | return loaded 205 | except Exception: 206 | raise JSONRPCError( 207 | { 208 | "code": -342, 209 | "message": ( 210 | "non-JSON HTTP response with '%i %s' from server: '%.20s%s'" 211 | % ( 212 | http_response.status, 213 | http_response.reason, 214 | rdata, 215 | "..." if len(rdata) > 20 else "", 216 | ) 217 | ), 218 | } 219 | ) 220 | 221 | def __getattr__(self, name): 222 | if name.startswith("__") and name.endswith("__"): 223 | # Prevent RPC calls for non-existing python internal attribute 224 | # access. If someone tries to get an internal attribute 225 | # of RawProxy instance, and the instance does not have this 226 | # attribute, we do not want the bogus RPC call to happen. 227 | raise AttributeError 228 | 229 | # Create a callable to do the actual call 230 | def _call_wrapper(*args, **kwargs): 231 | return self._call(name, *args, **kwargs) 232 | 233 | # Make debuggers show rather than > 235 | _call_wrapper.__name__ = name 236 | return _call_wrapper 237 | -------------------------------------------------------------------------------- /bmon/bitcoind_monitor.py: -------------------------------------------------------------------------------- 1 | import os 2 | import signal 3 | import sys 4 | from pathlib import Path 5 | from wsgiref.simple_server import make_server 6 | import logging 7 | 8 | import django 9 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bmon.settings") 10 | django.setup() 11 | from django.conf import settings 12 | from clii import App 13 | from prometheus_client import make_wsgi_app, Gauge 14 | 15 | from . import bitcoind_tasks, models 16 | from bmon_infra import infra 17 | 18 | 19 | log = logging.getLogger(__name__) 20 | 21 | cli = App() 22 | 23 | 24 | LAST_BITCOIND_LOG_SEEN_AT = Gauge( 25 | "bmon_last_bitcoind_log_seen_at", 26 | "Time that the last bitcoind log line was processed", 27 | ) 28 | 29 | BITCOIND_EVENT_TASKS_QUEUE_DEPTH = Gauge( 30 | "bmon_bitcoind_event_queue_depth", 31 | "The depth of the queue processing bitcoind events.", 32 | ) 33 | 34 | BITCOIND_MEMPOOL_TASKS_QUEUE_DEPTH = Gauge( 35 | "bmon_bitcoind_mempool_queue_depth", 36 | "The depth of the queue processing bitcoind mempool activity.", 37 | ) 38 | 39 | LAST_CONNECT_BLOCK_AT = Gauge( 40 | "bmon_last_connect_block_at", 41 | "Time of the block in the last ConnectBlockEvent", 42 | ) 43 | 44 | MEMPOOL_ACTIVITY_CACHE_SIZE = Gauge( 45 | "bmon_mempool_activity_cache_size", 46 | "Size of the mempool activity cache", 47 | unit='mibibytes', 48 | ) 49 | 50 | BITCOIND_LOG_SIZE = Gauge( 51 | "bmon_bitcoind_debug_log_size", 52 | "Size of the current debug.log", 53 | unit='mibibytes', 54 | ) 55 | 56 | 57 | assert settings.BITCOIND_LOG_PATH 58 | bitcoind_log = Path(settings.BITCOIND_LOG_PATH) 59 | 60 | 61 | def refresh_metrics(): 62 | log_dt = ( 63 | models.LogProgress.objects.filter(hostname=settings.HOSTNAME) 64 | .order_by("-id") 65 | .values_list("timestamp", flat=True) 66 | .first() 67 | ) 68 | 69 | if log_dt: 70 | LAST_BITCOIND_LOG_SEEN_AT.set(log_dt.timestamp()) 71 | 72 | BITCOIND_EVENT_TASKS_QUEUE_DEPTH.set(len(bitcoind_tasks.events_q)) 73 | BITCOIND_MEMPOOL_TASKS_QUEUE_DEPTH.set(len(bitcoind_tasks.mempool_q)) 74 | 75 | cb = ( 76 | models.ConnectBlockEvent.objects.filter(host__name=settings.HOSTNAME) 77 | .order_by("-id") 78 | .first() 79 | ) 80 | 81 | if cb: 82 | LAST_CONNECT_BLOCK_AT.set(cb.timestamp.timestamp()) 83 | 84 | if bitcoind_tasks.CURRENT_MEMPOOL_FILE.exists(): 85 | MEMPOOL_ACTIVITY_CACHE_SIZE.set( 86 | os.path.getsize(bitcoind_tasks.CURRENT_MEMPOOL_FILE) / (1024 ** 2)) 87 | 88 | if bitcoind_log.exists(): 89 | BITCOIND_LOG_SIZE.set( 90 | os.path.getsize(bitcoind_log) / (1024 ** 2)) 91 | 92 | 93 | def sigterm_handler(*_): 94 | print("exiting") 95 | sys.exit(0) 96 | 97 | 98 | @cli.main 99 | def main(addr="0.0.0.0", port=infra.BMON_BITCOIND_EXPORTER_PORT): 100 | app = make_wsgi_app() 101 | 102 | signal.signal(signal.SIGTERM, sigterm_handler) 103 | 104 | def refresh(*args, **kwargs): 105 | try: 106 | refresh_metrics() 107 | except Exception: 108 | log.exception("failed to refresh bitcoind worker metrics") 109 | 110 | return app(*args, **kwargs) 111 | 112 | httpd = make_server(addr, port, refresh) 113 | print(f"serving bitcoind monitor {addr}:{port}") 114 | httpd.serve_forever() 115 | -------------------------------------------------------------------------------- /bmon/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import multiprocessing 4 | from pathlib import Path 5 | import typing as t 6 | 7 | import redis 8 | import pytest 9 | from django.conf import settings 10 | 11 | from bmon import bitcoin, models 12 | 13 | 14 | def read_data_file(dirname) -> t.List[str]: 15 | dir_path = Path(os.path.dirname(os.path.realpath(__file__))) 16 | return (dir_path / "testdata" / dirname).read_text().splitlines() 17 | 18 | 19 | def read_json_data(filename): 20 | dir_path = Path(os.path.dirname(os.path.realpath(__file__))) 21 | return json.loads((dir_path / "testdata" / filename).read_text()) 22 | 23 | 24 | @pytest.fixture(scope="session", autouse=True) 25 | def raw_bitcoind_version(): 26 | bitcoin.api.read_raw_bitcoind_version = lambda: "v23.99.0-447f50e4aed9" 27 | 28 | 29 | @pytest.fixture(autouse=True) 30 | def clear_redis(): 31 | for url in (settings.REDIS_SERVER_URL, settings.REDIS_LOCAL_URL): 32 | redis.Redis.from_url(url).flushall() 33 | 34 | 35 | def make_host(name: str, bitcoin_version: str = "v23.0"): 36 | assert settings.BITCOIN_DBCACHE 37 | assert settings.BITCOIN_PRUNE 38 | 39 | return models.Host.objects.get_or_create( 40 | name=name, 41 | cpu_info="test", 42 | memory_bytes=1024, 43 | nproc=multiprocessing.cpu_count(), 44 | bitcoin_version=bitcoin_version, 45 | bitcoin_gitref="", 46 | bitcoin_gitsha="", 47 | bitcoin_dbcache=int(settings.BITCOIN_DBCACHE), 48 | bitcoin_prune=int(settings.BITCOIN_PRUNE), 49 | bitcoin_extra={ 50 | "flags": "-regtest", 51 | }, 52 | defaults={ 53 | "region": "", 54 | }, 55 | )[0] 56 | 57 | 58 | @pytest.fixture() 59 | def fake_hosts(): 60 | """ 61 | These hosts should match up with the file in ./infra/hosts_dev.yml 62 | """ 63 | host1 = make_host('bitcoind', 'v0.18.0') 64 | host2 = make_host('bitcoind-02') 65 | return host1, host2 66 | -------------------------------------------------------------------------------- /bmon/hosts.py: -------------------------------------------------------------------------------- 1 | from django.conf import settings 2 | 3 | import bmon_infra as infra 4 | from . import models, mempool, bitcoin 5 | 6 | 7 | def get_bitcoind_hosts_to_policy_cohort() -> dict[models.Host, mempool.PolicyCohort]: 8 | hosts = infra.get_bitcoind_hosts() 9 | # TODO this is an O(n) query 10 | host_objs = list( 11 | filter( 12 | None, 13 | [ 14 | models.Host.objects.filter(name=h.name).order_by("-id").first() 15 | for h in hosts 16 | ], 17 | ) 18 | ) 19 | if not settings.TESTING: 20 | assert len(host_objs) == len(hosts) 21 | return { 22 | h: mempool.PolicyCohort.segwit 23 | if bitcoin.is_pre_taproot(h.bitcoin_version) 24 | else mempool.PolicyCohort.taproot 25 | for h in host_objs 26 | } 27 | -------------------------------------------------------------------------------- /bmon/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.1 on 2022-10-03 16:21 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | initial = True 9 | 10 | dependencies = [ 11 | ] 12 | 13 | operations = [ 14 | migrations.CreateModel( 15 | name='ConnectBlockDetails', 16 | fields=[ 17 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 18 | ('host', models.CharField(max_length=200)), 19 | ('timestamp', models.DateTimeField()), 20 | ('blockhash', models.CharField(max_length=80)), 21 | ('height', models.IntegerField()), 22 | ('load_block_from_disk_time_ms', models.FloatField()), 23 | ('sanity_checks_time_ms', models.FloatField()), 24 | ('fork_checks_time_ms', models.FloatField()), 25 | ('txin_count', models.IntegerField()), 26 | ('tx_count', models.IntegerField()), 27 | ('connect_txs_time_ms', models.FloatField()), 28 | ('verify_time_ms', models.FloatField()), 29 | ('index_writing_time_ms', models.FloatField()), 30 | ('connect_total_time_ms', models.FloatField()), 31 | ('flush_coins_time_ms', models.FloatField()), 32 | ('flush_chainstate_time_ms', models.FloatField()), 33 | ('connect_postprocess_time_ms', models.FloatField()), 34 | ('connectblock_total_time_ms', models.FloatField()), 35 | ], 36 | ), 37 | migrations.CreateModel( 38 | name='ConnectBlockEvent', 39 | fields=[ 40 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 41 | ('host', models.CharField(max_length=200)), 42 | ('timestamp', models.DateTimeField()), 43 | ('blockhash', models.CharField(max_length=80)), 44 | ('height', models.IntegerField()), 45 | ('log2_work', models.FloatField()), 46 | ('total_tx_count', models.IntegerField()), 47 | ('version', models.CharField(max_length=200, null=True)), 48 | ('date', models.DateTimeField()), 49 | ('cachesize_mib', models.FloatField(null=True)), 50 | ('cachesize_txo', models.IntegerField()), 51 | ('warning', models.CharField(max_length=1024, null=True)), 52 | ], 53 | ), 54 | ] 55 | -------------------------------------------------------------------------------- /bmon/migrations/0002_logprogress_alter_connectblockevent_version_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-04 18:47 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0001_initial'), 10 | ] 11 | 12 | operations = [ 13 | migrations.CreateModel( 14 | name='LogProgress', 15 | fields=[ 16 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 17 | ('host', models.CharField(max_length=200, unique=True)), 18 | ('timestamp', models.DateTimeField()), 19 | ('loghash', models.CharField(max_length=200)), 20 | ], 21 | ), 22 | migrations.AlterField( 23 | model_name='connectblockevent', 24 | name='version', 25 | field=models.CharField(blank=True, max_length=200, null=True), 26 | ), 27 | migrations.AlterField( 28 | model_name='connectblockevent', 29 | name='warning', 30 | field=models.CharField(blank=True, max_length=1024, null=True), 31 | ), 32 | ] 33 | -------------------------------------------------------------------------------- /bmon/migrations/0003_mempoolaccept_processlineerror.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-17 20:14 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0002_logprogress_alter_connectblockevent_version_and_more'), 10 | ] 11 | 12 | operations = [ 13 | migrations.CreateModel( 14 | name='MempoolAccept', 15 | fields=[ 16 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 17 | ('host', models.CharField(max_length=200)), 18 | ('timestamp', models.DateTimeField()), 19 | ('txhash', models.CharField(max_length=80)), 20 | ('peer_num', models.IntegerField()), 21 | ('pool_size_txns', models.IntegerField()), 22 | ('pool_size_kb', models.IntegerField()), 23 | ], 24 | ), 25 | migrations.CreateModel( 26 | name='ProcessLineError', 27 | fields=[ 28 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 29 | ('host', models.CharField(max_length=200)), 30 | ('timestamp', models.DateTimeField(auto_now_add=True)), 31 | ('listener', models.CharField(max_length=240)), 32 | ('line', models.CharField(max_length=2048)), 33 | ], 34 | ), 35 | ] 36 | -------------------------------------------------------------------------------- /bmon/migrations/0004_connectblockdetails_created_at_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-20 17:43 2 | 3 | from django.db import migrations, models 4 | import django.utils.timezone 5 | 6 | 7 | class Migration(migrations.Migration): 8 | 9 | dependencies = [ 10 | ('bmon', '0003_mempoolaccept_processlineerror'), 11 | ] 12 | 13 | operations = [ 14 | migrations.AddField( 15 | model_name='connectblockdetails', 16 | name='created_at', 17 | field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), 18 | preserve_default=False, 19 | ), 20 | migrations.AddField( 21 | model_name='connectblockevent', 22 | name='created_at', 23 | field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), 24 | preserve_default=False, 25 | ), 26 | ] 27 | -------------------------------------------------------------------------------- /bmon/migrations/0005_blockconnectedevent_blockdisconnectedevent_peer_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-22 16:01 2 | 3 | from django.db import migrations, models 4 | import django.db.models.deletion 5 | 6 | 7 | class Migration(migrations.Migration): 8 | 9 | dependencies = [ 10 | ('bmon', '0004_connectblockdetails_created_at_and_more'), 11 | ] 12 | 13 | operations = [ 14 | migrations.CreateModel( 15 | name='BlockConnectedEvent', 16 | fields=[ 17 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 18 | ('created_at', models.DateTimeField(auto_now_add=True)), 19 | ('host', models.CharField(max_length=200)), 20 | ('timestamp', models.DateTimeField()), 21 | ('blockhash', models.CharField(max_length=80)), 22 | ('height', models.IntegerField()), 23 | ], 24 | ), 25 | migrations.CreateModel( 26 | name='BlockDisconnectedEvent', 27 | fields=[ 28 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 29 | ('created_at', models.DateTimeField(auto_now_add=True)), 30 | ('host', models.CharField(max_length=200)), 31 | ('timestamp', models.DateTimeField()), 32 | ('blockhash', models.CharField(max_length=80)), 33 | ('height', models.IntegerField()), 34 | ], 35 | ), 36 | migrations.CreateModel( 37 | name='Peer', 38 | fields=[ 39 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 40 | ('created_at', models.DateTimeField(auto_now_add=True)), 41 | ('host', models.CharField(max_length=200)), 42 | ('addr', models.CharField(max_length=256)), 43 | ('connection_type', models.CharField(max_length=256)), 44 | ('num', models.IntegerField()), 45 | ('inbound', models.BooleanField()), 46 | ('network', models.CharField(max_length=256)), 47 | ('services', models.CharField(max_length=256)), 48 | ('servicesnames', models.JSONField()), 49 | ('subver', models.CharField(max_length=256)), 50 | ('version', models.IntegerField()), 51 | ], 52 | ), 53 | migrations.CreateModel( 54 | name='ReorgEvent', 55 | fields=[ 56 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 57 | ('created_at', models.DateTimeField(auto_now_add=True)), 58 | ('finished_timestamp', models.DateTimeField()), 59 | ('host', models.CharField(max_length=200)), 60 | ('min_height', models.IntegerField()), 61 | ('max_height', models.IntegerField()), 62 | ('old_blockhashes', models.JSONField()), 63 | ('new_blockhashes', models.JSONField()), 64 | ], 65 | ), 66 | migrations.CreateModel( 67 | name='RequestBlockEvent', 68 | fields=[ 69 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 70 | ('created_at', models.DateTimeField(auto_now_add=True)), 71 | ('host', models.CharField(max_length=200)), 72 | ('timestamp', models.DateTimeField()), 73 | ('blockhash', models.CharField(max_length=80)), 74 | ('height', models.IntegerField(blank=True, null=True)), 75 | ('peer_num', models.IntegerField()), 76 | ('method', models.CharField(max_length=256)), 77 | ('peer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='bmon.peer')), 78 | ], 79 | ), 80 | ] 81 | -------------------------------------------------------------------------------- /bmon/migrations/0006_peer_bip152_hb_from_peer_bip152_hb_to_peer_relaytxes_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-23 15:03 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0005_blockconnectedevent_blockdisconnectedevent_peer_and_more'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AddField( 14 | model_name='peer', 15 | name='bip152_hb_from', 16 | field=models.BooleanField(default=False), 17 | preserve_default=False, 18 | ), 19 | migrations.AddField( 20 | model_name='peer', 21 | name='bip152_hb_to', 22 | field=models.BooleanField(default=False), 23 | preserve_default=False, 24 | ), 25 | migrations.AddField( 26 | model_name='peer', 27 | name='relaytxes', 28 | field=models.BooleanField(default=False), 29 | preserve_default=False, 30 | ), 31 | migrations.AddConstraint( 32 | model_name='peer', 33 | constraint=models.UniqueConstraint(fields=('host', 'num', 'addr', 'connection_type', 'inbound', 'network', 'services', 'subver', 'version', 'relaytxes', 'bip152_hb_from', 'bip152_hb_to'), name='unique_peer'), 34 | ), 35 | ] 36 | -------------------------------------------------------------------------------- /bmon/migrations/0007_alter_peer_bip152_hb_from_alter_peer_bip152_hb_to.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-24 19:32 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0006_peer_bip152_hb_from_peer_bip152_hb_to_peer_relaytxes_and_more'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AlterField( 14 | model_name='peer', 15 | name='bip152_hb_from', 16 | field=models.BooleanField(blank=True, null=True), 17 | ), 18 | migrations.AlterField( 19 | model_name='peer', 20 | name='bip152_hb_to', 21 | field=models.BooleanField(blank=True, null=True), 22 | ), 23 | ] 24 | -------------------------------------------------------------------------------- /bmon/migrations/0008_alter_peer_servicesnames.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-24 19:57 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0007_alter_peer_bip152_hb_from_alter_peer_bip152_hb_to'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AlterField( 14 | model_name='peer', 15 | name='servicesnames', 16 | field=models.JSONField(blank=True, null=True), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /bmon/migrations/0009_mempoolreject.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-25 01:53 2 | 3 | from django.db import migrations, models 4 | import django.db.models.deletion 5 | 6 | 7 | class Migration(migrations.Migration): 8 | 9 | dependencies = [ 10 | ('bmon', '0008_alter_peer_servicesnames'), 11 | ] 12 | 13 | operations = [ 14 | migrations.CreateModel( 15 | name='MempoolReject', 16 | fields=[ 17 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 18 | ('created_at', models.DateTimeField(auto_now_add=True)), 19 | ('host', models.CharField(max_length=200)), 20 | ('timestamp', models.DateTimeField()), 21 | ('txhash', models.CharField(max_length=80)), 22 | ('peer_num', models.IntegerField()), 23 | ('reason', models.CharField(max_length=1024)), 24 | ('reason_data', models.JSONField(blank=True, default=dict)), 25 | ('peer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='bmon.peer')), 26 | ], 27 | options={ 28 | 'abstract': False, 29 | }, 30 | ), 31 | ] 32 | -------------------------------------------------------------------------------- /bmon/migrations/0010_mempoolreject_reason_code_alter_mempoolreject_peer_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-26 12:34 2 | 3 | from django.db import migrations, models 4 | import django.db.models.deletion 5 | 6 | from bmon.models import MempoolReject as CurrMempoolReject 7 | 8 | 9 | def cleanup_rejects(apps, schema_editor): 10 | MempoolReject = apps.get_model("bmon", "MempoolReject") 11 | db_alias = schema_editor.connection.alias 12 | 13 | # Remove old pre-taproot junk 14 | deleted = MempoolReject.objects.using(db_alias).filter( 15 | models.Q(host__in=['b-02.slug', 'b-03.slug']), 16 | models.Q(reason__startswith='scriptpubkey') | 17 | models.Q(reason__startswith="non-mandatory-script-verify-flag")).delete() 18 | print(f"DELETED {deleted}") 19 | 20 | 21 | def add_reasoncode(apps, schema_editor): 22 | MempoolReject = apps.get_model("bmon", "MempoolReject") 23 | db_alias = schema_editor.connection.alias 24 | 25 | objs = 0 26 | 27 | for rej in MempoolReject.objects.using(db_alias).all(): 28 | rej.reason_code = CurrMempoolReject.get_reason_reject_code(rej.reason) 29 | rej.save() 30 | objs += 1 31 | 32 | print(f"Updated {objs} objects") 33 | 34 | 35 | class Migration(migrations.Migration): 36 | 37 | dependencies = [ 38 | ('bmon', '0009_mempoolreject'), 39 | ] 40 | 41 | operations = [ 42 | migrations.RunPython(cleanup_rejects), 43 | migrations.AddField( 44 | model_name='mempoolreject', 45 | name='reason_code', 46 | field=models.CharField(default='', help_text='A code indicating the rejection reason', max_length=256), 47 | ), 48 | migrations.AlterField( 49 | model_name='mempoolreject', 50 | name='peer', 51 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.peer'), 52 | ), 53 | migrations.AlterField( 54 | model_name='mempoolreject', 55 | name='reason', 56 | field=models.CharField(help_text='The full reason string', max_length=1024), 57 | ), 58 | migrations.AlterField( 59 | model_name='mempoolreject', 60 | name='reason_data', 61 | field=models.JSONField(blank=True, default=dict, help_text='Extra data associated with the reason'), 62 | ), 63 | migrations.RunPython(add_reasoncode), 64 | ] 65 | -------------------------------------------------------------------------------- /bmon/migrations/0011_host_mempoolreject_unique_reject_host_unique_host.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-26 17:52 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0010_mempoolreject_reason_code_alter_mempoolreject_peer_and_more'), 10 | ] 11 | 12 | operations = [ 13 | migrations.CreateModel( 14 | name='Host', 15 | fields=[ 16 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 17 | ('created_at', models.DateTimeField(auto_now_add=True)), 18 | ('name', models.CharField(max_length=256, unique=True)), 19 | ('cpu_info', models.CharField(max_length=1024)), 20 | ('memory_bytes', models.FloatField()), 21 | ('nproc', models.IntegerField(help_text='The number of processors')), 22 | ('region', models.CharField(blank=True, max_length=256, null=True)), 23 | ('bitcoin_version', models.CharField(help_text='As reported by bitcoind -version', max_length=256)), 24 | ('bitcoin_gitref', models.CharField(blank=True, max_length=256, null=True)), 25 | ('bitcoin_gitsha', models.CharField(blank=True, max_length=256, null=True)), 26 | ('bitcoin_dbcache', models.IntegerField()), 27 | ('bitcoin_prune', models.IntegerField()), 28 | ('bitcoin_extra', models.JSONField(help_text='Extra data about this bitcoind instance')), 29 | ], 30 | ), 31 | migrations.AddConstraint( 32 | model_name='mempoolreject', 33 | constraint=models.UniqueConstraint(fields=('host', 'timestamp', 'txhash', 'peer_num'), name='unique_reject'), 34 | ), 35 | migrations.AddConstraint( 36 | model_name='host', 37 | constraint=models.UniqueConstraint(fields=('name', 'cpu_info', 'memory_bytes', 'nproc', 'bitcoin_version', 'bitcoin_gitref', 'bitcoin_gitsha', 'bitcoin_dbcache', 'bitcoin_prune', 'bitcoin_extra'), name='unique_host'), 38 | ), 39 | ] 40 | -------------------------------------------------------------------------------- /bmon/migrations/0012_rename_host_logprogress_hostname.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-26 18:02 2 | 3 | from django.db import migrations 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0011_host_mempoolreject_unique_reject_host_unique_host'), 10 | ] 11 | 12 | operations = [ 13 | migrations.RenameField( 14 | model_name='logprogress', 15 | old_name='host', 16 | new_name='hostname', 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /bmon/migrations/0013_remove_peer_unique_peer_blockconnectedevent_hostobj_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-26 18:07 2 | 3 | from django.db import migrations, models 4 | import django.db.models.deletion 5 | 6 | 7 | class Migration(migrations.Migration): 8 | 9 | dependencies = [ 10 | ('bmon', '0012_rename_host_logprogress_hostname'), 11 | ] 12 | 13 | operations = [ 14 | migrations.RemoveConstraint( 15 | model_name='peer', 16 | name='unique_peer', 17 | ), 18 | migrations.AddField( 19 | model_name='blockconnectedevent', 20 | name='hostobj', 21 | field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 22 | ), 23 | migrations.AddField( 24 | model_name='blockdisconnectedevent', 25 | name='hostobj', 26 | field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 27 | ), 28 | migrations.AddField( 29 | model_name='connectblockdetails', 30 | name='hostobj', 31 | field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 32 | ), 33 | migrations.AddField( 34 | model_name='connectblockevent', 35 | name='hostobj', 36 | field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 37 | ), 38 | migrations.AddField( 39 | model_name='mempoolreject', 40 | name='hostobj', 41 | field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 42 | ), 43 | migrations.AddField( 44 | model_name='peer', 45 | name='hostobj', 46 | field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 47 | ), 48 | migrations.AddField( 49 | model_name='reorgevent', 50 | name='hostobj', 51 | field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 52 | ), 53 | migrations.AddField( 54 | model_name='requestblockevent', 55 | name='hostobj', 56 | field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 57 | ), 58 | migrations.AddConstraint( 59 | model_name='peer', 60 | constraint=models.UniqueConstraint(fields=('host', 'hostobj', 'num', 'addr', 'connection_type', 'inbound', 'network', 'services', 'subver', 'version', 'relaytxes', 'bip152_hb_from', 'bip152_hb_to'), name='unique_peer'), 61 | ), 62 | ] 63 | -------------------------------------------------------------------------------- /bmon/migrations/0014_rename_host_processlineerror_hostname.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-26 18:11 2 | 3 | from django.db import migrations 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0013_remove_peer_unique_peer_blockconnectedevent_hostobj_and_more'), 10 | ] 11 | 12 | operations = [ 13 | migrations.RenameField( 14 | model_name='processlineerror', 15 | old_name='host', 16 | new_name='hostname', 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /bmon/migrations/0015_remove_mempoolreject_unique_reject_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-26 18:23 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0014_rename_host_processlineerror_hostname'), 10 | ] 11 | 12 | operations = [ 13 | migrations.RemoveConstraint( 14 | model_name='mempoolreject', 15 | name='unique_reject', 16 | ), 17 | migrations.AddConstraint( 18 | model_name='mempoolreject', 19 | constraint=models.UniqueConstraint(fields=('host', 'hostobj', 'timestamp', 'txhash', 'peer_num'), name='unique_reject'), 20 | ), 21 | ] 22 | -------------------------------------------------------------------------------- /bmon/migrations/0016_remove_mempoolreject_unique_reject_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-26 19:45 2 | 3 | from django.db import migrations, models 4 | import django.db.models.deletion 5 | 6 | 7 | class Migration(migrations.Migration): 8 | 9 | dependencies = [ 10 | ('bmon', '0015_remove_mempoolreject_unique_reject_and_more'), 11 | ] 12 | 13 | operations = [ 14 | migrations.RemoveConstraint( 15 | model_name='mempoolreject', 16 | name='unique_reject', 17 | ), 18 | migrations.RemoveConstraint( 19 | model_name='peer', 20 | name='unique_peer', 21 | ), 22 | migrations.RemoveField( 23 | model_name='blockconnectedevent', 24 | name='host', 25 | ), 26 | migrations.RemoveField( 27 | model_name='blockdisconnectedevent', 28 | name='host', 29 | ), 30 | migrations.RemoveField( 31 | model_name='connectblockdetails', 32 | name='host', 33 | ), 34 | migrations.RemoveField( 35 | model_name='connectblockevent', 36 | name='host', 37 | ), 38 | migrations.RemoveField( 39 | model_name='mempoolreject', 40 | name='host', 41 | ), 42 | migrations.RemoveField( 43 | model_name='peer', 44 | name='host', 45 | ), 46 | migrations.RemoveField( 47 | model_name='reorgevent', 48 | name='host', 49 | ), 50 | migrations.RemoveField( 51 | model_name='requestblockevent', 52 | name='host', 53 | ), 54 | migrations.AlterField( 55 | model_name='blockconnectedevent', 56 | name='hostobj', 57 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 58 | ), 59 | migrations.AlterField( 60 | model_name='blockdisconnectedevent', 61 | name='hostobj', 62 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 63 | ), 64 | migrations.AlterField( 65 | model_name='connectblockdetails', 66 | name='hostobj', 67 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 68 | ), 69 | migrations.AlterField( 70 | model_name='connectblockevent', 71 | name='hostobj', 72 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 73 | ), 74 | migrations.AlterField( 75 | model_name='mempoolreject', 76 | name='hostobj', 77 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 78 | ), 79 | migrations.AlterField( 80 | model_name='peer', 81 | name='hostobj', 82 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 83 | ), 84 | migrations.AlterField( 85 | model_name='reorgevent', 86 | name='hostobj', 87 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 88 | ), 89 | migrations.AlterField( 90 | model_name='requestblockevent', 91 | name='hostobj', 92 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host'), 93 | ), 94 | migrations.AddConstraint( 95 | model_name='mempoolreject', 96 | constraint=models.UniqueConstraint(fields=('hostobj', 'timestamp', 'txhash', 'peer_num'), name='unique_reject'), 97 | ), 98 | migrations.AddConstraint( 99 | model_name='peer', 100 | constraint=models.UniqueConstraint(fields=('hostobj', 'num', 'addr', 'connection_type', 'inbound', 'network', 'services', 'subver', 'version', 'relaytxes', 'bip152_hb_from', 'bip152_hb_to'), name='unique_peer'), 101 | ), 102 | ] 103 | -------------------------------------------------------------------------------- /bmon/migrations/0017_remove_mempoolreject_unique_reject_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-26 19:57 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0016_remove_mempoolreject_unique_reject_and_more'), 10 | ] 11 | 12 | operations = [ 13 | migrations.RemoveConstraint( 14 | model_name='mempoolreject', 15 | name='unique_reject', 16 | ), 17 | migrations.RemoveConstraint( 18 | model_name='peer', 19 | name='unique_peer', 20 | ), 21 | migrations.RenameField( 22 | model_name='blockconnectedevent', 23 | old_name='hostobj', 24 | new_name='host', 25 | ), 26 | migrations.RenameField( 27 | model_name='blockdisconnectedevent', 28 | old_name='hostobj', 29 | new_name='host', 30 | ), 31 | migrations.RenameField( 32 | model_name='connectblockdetails', 33 | old_name='hostobj', 34 | new_name='host', 35 | ), 36 | migrations.RenameField( 37 | model_name='connectblockevent', 38 | old_name='hostobj', 39 | new_name='host', 40 | ), 41 | migrations.RenameField( 42 | model_name='mempoolreject', 43 | old_name='hostobj', 44 | new_name='host', 45 | ), 46 | migrations.RenameField( 47 | model_name='peer', 48 | old_name='hostobj', 49 | new_name='host', 50 | ), 51 | migrations.RenameField( 52 | model_name='reorgevent', 53 | old_name='hostobj', 54 | new_name='host', 55 | ), 56 | migrations.RenameField( 57 | model_name='requestblockevent', 58 | old_name='hostobj', 59 | new_name='host', 60 | ), 61 | migrations.AddConstraint( 62 | model_name='mempoolreject', 63 | constraint=models.UniqueConstraint(fields=('host', 'timestamp', 'txhash', 'peer_num'), name='unique_reject'), 64 | ), 65 | migrations.AddConstraint( 66 | model_name='peer', 67 | constraint=models.UniqueConstraint(fields=('host', 'num', 'addr', 'connection_type', 'inbound', 'network', 'services', 'subver', 'version', 'relaytxes', 'bip152_hb_from', 'bip152_hb_to'), name='unique_peer'), 68 | ), 69 | ] 70 | -------------------------------------------------------------------------------- /bmon/migrations/0018_alter_host_name.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-28 19:20 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0017_remove_mempoolreject_unique_reject_and_more'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AlterField( 14 | model_name='host', 15 | name='name', 16 | field=models.CharField(max_length=256), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /bmon/migrations/0019_peerstats.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.2 on 2022-10-29 21:56 2 | 3 | from django.db import migrations, models 4 | import django.db.models.deletion 5 | 6 | 7 | class Migration(migrations.Migration): 8 | 9 | dependencies = [ 10 | ('bmon', '0018_alter_host_name'), 11 | ] 12 | 13 | operations = [ 14 | migrations.CreateModel( 15 | name='PeerStats', 16 | fields=[ 17 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 18 | ('created_at', models.DateTimeField(auto_now_add=True)), 19 | ('num_peers', models.IntegerField()), 20 | ('ping_mean', models.FloatField()), 21 | ('ping_min', models.FloatField()), 22 | ('ping_max', models.FloatField()), 23 | ('bytesrecv', models.FloatField()), 24 | ('bytessent', models.FloatField()), 25 | ('bytesrecv_per_msg', models.JSONField()), 26 | ('bytessent_per_msg', models.JSONField()), 27 | ('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host')), 28 | ], 29 | options={ 30 | 'abstract': False, 31 | }, 32 | ), 33 | ] 34 | -------------------------------------------------------------------------------- /bmon/migrations/0020_remove_host_unique_host_host_bitcoin_listen_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.3 on 2022-11-07 20:23 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0019_peerstats'), 10 | ] 11 | 12 | operations = [ 13 | migrations.RemoveConstraint( 14 | model_name='host', 15 | name='unique_host', 16 | ), 17 | migrations.AddField( 18 | model_name='host', 19 | name='bitcoin_listen', 20 | field=models.IntegerField(default=0), 21 | ), 22 | migrations.AddConstraint( 23 | model_name='host', 24 | constraint=models.UniqueConstraint(fields=('name', 'cpu_info', 'memory_bytes', 'nproc', 'bitcoin_version', 'bitcoin_gitref', 'bitcoin_gitsha', 'bitcoin_dbcache', 'bitcoin_prune', 'bitcoin_extra', 'bitcoin_listen'), name='unique_host'), 25 | ), 26 | ] 27 | -------------------------------------------------------------------------------- /bmon/migrations/0021_alter_host_bitcoin_listen.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.3 on 2022-11-07 20:51 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0020_remove_host_unique_host_host_bitcoin_listen_and_more'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AlterField( 14 | model_name='host', 15 | name='bitcoin_listen', 16 | field=models.BooleanField(default=False), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /bmon/migrations/0022_host_disabled.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.7 on 2023-04-12 15:44 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0021_alter_host_bitcoin_listen'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AddField( 14 | model_name='host', 15 | name='disabled', 16 | field=models.BooleanField(default=False), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /bmon/migrations/0023_host_bmon_host_name_88c4dd_idx_and_more.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.7 on 2023-04-12 19:35 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0022_host_disabled'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AddIndex( 14 | model_name='host', 15 | index=models.Index(fields=['name'], name='bmon_host_name_88c4dd_idx'), 16 | ), 17 | migrations.AddIndex( 18 | model_name='peerstats', 19 | index=models.Index(fields=['created_at'], name='bmon_peerst_created_f06d50_idx'), 20 | ), 21 | ] 22 | -------------------------------------------------------------------------------- /bmon/migrations/0024_alter_mempoolreject_timestamp.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.1.7 on 2023-04-12 20:02 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0023_host_bmon_host_name_88c4dd_idx_and_more'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AlterField( 14 | model_name='mempoolreject', 15 | name='timestamp', 16 | field=models.DateTimeField(db_index=True), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /bmon/migrations/0025_blockdownloadtimeout.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.2 on 2023-05-15 20:06 2 | 3 | from django.db import migrations, models 4 | import django.db.models.deletion 5 | 6 | 7 | class Migration(migrations.Migration): 8 | 9 | dependencies = [ 10 | ('bmon', '0024_alter_mempoolreject_timestamp'), 11 | ] 12 | 13 | operations = [ 14 | migrations.CreateModel( 15 | name='BlockDownloadTimeout', 16 | fields=[ 17 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 18 | ('created_at', models.DateTimeField(auto_now_add=True)), 19 | ('timestamp', models.DateTimeField(db_index=True)), 20 | ('blockhash', models.CharField(max_length=80)), 21 | ('peer_num', models.IntegerField()), 22 | ('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host')), 23 | ('peer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.peer')), 24 | ], 25 | options={ 26 | 'abstract': False, 27 | }, 28 | ), 29 | ] 30 | -------------------------------------------------------------------------------- /bmon/migrations/0026_headertotipevent.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.2 on 2023-05-16 19:42 2 | 3 | from django.db import migrations, models 4 | import django.db.models.deletion 5 | 6 | 7 | class Migration(migrations.Migration): 8 | 9 | dependencies = [ 10 | ('bmon', '0025_blockdownloadtimeout'), 11 | ] 12 | 13 | operations = [ 14 | migrations.CreateModel( 15 | name='HeaderToTipEvent', 16 | fields=[ 17 | ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 18 | ('created_at', models.DateTimeField(auto_now_add=True)), 19 | ('blockhash', models.CharField(max_length=80)), 20 | ('height', models.IntegerField()), 21 | ('saw_header_at', models.DateTimeField(help_text='When we first saw the Saw new header message')), 22 | ('reconstruct_block_at', models.DateTimeField(help_text='When the (compact)block was reconstructed')), 23 | ('tip_at', models.DateTimeField(help_text='When the block became tip')), 24 | ('header_to_tip_secs', models.FloatField(help_text='Time between header seen and new tip appended')), 25 | ('header_to_block_secs', models.FloatField(help_text='Time between header seen and full block obtained')), 26 | ('block_to_tip_secs', models.FloatField(help_text='Time between full block obtained and tip updated')), 27 | ('blocktime_minus_header_secs', models.FloatField(help_text='Difference between blocktime and header seen')), 28 | ('reconstruction_data', models.JSONField(blank=True, default=dict, help_text='Extra data associated with the block reconstruction')), 29 | ('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bmon.host')), 30 | ], 31 | options={ 32 | 'abstract': False, 33 | }, 34 | ), 35 | ] 36 | -------------------------------------------------------------------------------- /bmon/migrations/0027_mempoolreject_wtxid.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 4.2 on 2023-10-11 13:37 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('bmon', '0026_headertotipevent'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AddField( 14 | model_name='mempoolreject', 15 | name='wtxid', 16 | field=models.CharField(blank=True, max_length=80, null=True), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /bmon/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaincodelabs/bmon/39a0f8204f48cd9f84a9d181abaf69ab43747bd0/bmon/migrations/__init__.py -------------------------------------------------------------------------------- /bmon/redis_util.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import redis 4 | 5 | log = logging.getLogger(__name__) 6 | 7 | 8 | def try_set_key( 9 | redisdb: redis.Redis, keyname: str, content: str | float | int, **kwargs 10 | ) -> bool: 11 | """ 12 | Ensure we set a key in Redis, retrying if necessary. 13 | """ 14 | tries = 3 15 | while tries > 0: 16 | if redisdb.set(keyname, content, **kwargs): 17 | return True 18 | 19 | log.error("failed to set key %s; retrying", keyname) 20 | tries -= 1 21 | 22 | log.error("failed to set key %s", keyname) 23 | return False 24 | -------------------------------------------------------------------------------- /bmon/server_monitor.py: -------------------------------------------------------------------------------- 1 | import os 2 | import signal 3 | import sys 4 | from wsgiref.simple_server import make_server 5 | import logging 6 | 7 | import django 8 | 9 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bmon.settings") 10 | django.setup() 11 | from clii import App 12 | from prometheus_client import make_wsgi_app, Gauge 13 | 14 | from . import server_tasks, models, util 15 | from .mempool import PolicyCohort, MempoolAcceptAggregator 16 | from .hosts import get_bitcoind_hosts_to_policy_cohort 17 | from bmon_infra import infra 18 | 19 | 20 | log = logging.getLogger(__name__) 21 | 22 | cli = App() 23 | 24 | 25 | SERVER_EVENT_QUEUE_DEPTH = Gauge( 26 | "bmon_server_event_queue_depth", 27 | "The depth of the queue processing all events.", 28 | ) 29 | 30 | SERVER_MEMPOOL_EVENT_QUEUE_DEPTH = Gauge( 31 | "bmon_server_mempool_event_queue_depth", 32 | "The depth of the queue processing for server mempool events.", 33 | ) 34 | 35 | MEMPOOL_TOTAL_TXIDS_ACCEPTED = Gauge( 36 | "bmon_mempool_total_txids_accepted", 37 | "The number of unique transactions we've accepted to all mempools", 38 | ) 39 | 40 | HOST_LABELS = ["host", "bitcoin_version", "region", "cohort"] 41 | 42 | MEMPOOL_TOTAL_TXIDS_ACCEPTED_PER_HOST = Gauge( 43 | "bmon_mempool_total_txids_accepted_per_host", 44 | "The number of unique transactions we've accepted to a host's mempool", 45 | HOST_LABELS, 46 | ) 47 | 48 | MEMPOOL_TOTAL_TXIDS_IN_HOUR = Gauge( 49 | "bmon_mempool_total_txids_accepted_last_hour", 50 | "The number of unique transactions we've accepted to all mempools in the last hour", 51 | ) 52 | 53 | 54 | MEMPOOL_TOTAL_TXIDS_IN_HOUR_PER_HOST = Gauge( 55 | "bmon_mempool_total_txids_accepted_last_hour_per_host", 56 | "The number of unique transactions we've accepted to all mempools in the last hour " 57 | " per host", 58 | HOST_LABELS, 59 | ) 60 | 61 | MEMPOOL_TOTAL_TXIDS_ACCEPTED_BY_ALL_IN_HOUR = Gauge( 62 | "bmon_mempool_total_txids_accepted_by_all_last_hour", 63 | "The number of txids accepted by all hosts in the last hour", 64 | ) 65 | 66 | MEMPOOL_TOTAL_TXIDS_ACCEPTED_BY_COHORT_IN_HOUR = Gauge( 67 | "bmon_mempool_total_txids_accepted_by_cohort_last_hour", 68 | "The number of txids accepted by all hosts in a policy cohort in the last hour", 69 | ["cohort"], 70 | ) 71 | 72 | MEMPOOL_MAX_PROPAGATION_SPREAD_IN_HOUR = Gauge( 73 | "bmon_mempool_max_propagation_spread_in_hour", 74 | "The greatest difference between the first host and the last host seeing a " 75 | "particular transaction (in the last hour)", 76 | ) 77 | 78 | MEMPOOL_MIN_PROPAGATION_SPREAD_IN_HOUR = Gauge( 79 | "bmon_mempool_min_propagation_spread_in_hour", 80 | "The least difference between the first host and the last host seeing a " 81 | "particular transaction (in the last hour)", 82 | ) 83 | 84 | REDIS_KEYS = Gauge( 85 | "bmon_server_redis_key_count", 86 | "The number of keys in the server redis instance", 87 | ) 88 | 89 | TASK_COUNT = Gauge( 90 | "bmon_pending_task_count", 91 | "The number of each kind of async task pending for execution", 92 | ["name"], 93 | ) 94 | 95 | 96 | def refresh_metrics( 97 | mempool_agg: MempoolAcceptAggregator | None = None, 98 | hosts_to_cohort: dict[models.Host, PolicyCohort] | None = None, 99 | ): 100 | SERVER_EVENT_QUEUE_DEPTH.set(len(server_tasks.server_q)) 101 | SERVER_MEMPOOL_EVENT_QUEUE_DEPTH.set(len(server_tasks.mempool_q)) 102 | 103 | host_to_cohort = hosts_to_cohort or get_bitcoind_hosts_to_policy_cohort() 104 | labels_for_host: dict[str, dict[str, str]] = { 105 | h.name: { 106 | "host": h.name, 107 | "bitcoin_version": h.bitcoin_version, 108 | "region": h.region or '', 109 | "cohort": cohort.name, 110 | } 111 | for h, cohort in host_to_cohort.items() 112 | } 113 | 114 | mempool_agg = mempool_agg or server_tasks.get_mempool_aggregator() 115 | MEMPOOL_TOTAL_TXIDS_ACCEPTED.set(mempool_agg.get_total_txids_processed()) 116 | 117 | for host, total in mempool_agg.get_total_txids_processed_per_host().items(): 118 | MEMPOOL_TOTAL_TXIDS_ACCEPTED_PER_HOST.labels(**labels_for_host[host]).set(total) 119 | 120 | REDIS_KEYS.set(server_tasks.redisdb.dbsize()) 121 | 122 | total_txids_in_hour = 0 123 | total_txids_in_hour_per_host = {h: 0 for h in mempool_agg.host_to_cohort.keys()} 124 | total_txids_in_hour_by_all = 0 125 | total_txids_in_hour_by_cohort: dict[PolicyCohort, int] = { 126 | cohort: 0 for cohort in PolicyCohort 127 | } 128 | max_spread = 0.0 129 | min_spread = 1e6 130 | 131 | for event in mempool_agg.get_propagation_events(): 132 | total_txids_in_hour += 1 133 | 134 | for host in event.host_to_timestamp: 135 | total_txids_in_hour_per_host[host] += 1 136 | 137 | if event.all_complete: 138 | total_txids_in_hour_by_all += 1 139 | 140 | for cohort in event.cohorts_complete: 141 | total_txids_in_hour_by_cohort[cohort] += 1 142 | 143 | if event.spread > max_spread: 144 | max_spread = event.spread 145 | 146 | if event.spread < min_spread: 147 | min_spread = event.spread 148 | 149 | MEMPOOL_TOTAL_TXIDS_IN_HOUR.set(total_txids_in_hour) 150 | 151 | for host, total in total_txids_in_hour_per_host.items(): 152 | MEMPOOL_TOTAL_TXIDS_IN_HOUR_PER_HOST.labels(**labels_for_host[host]).set(total) 153 | 154 | MEMPOOL_TOTAL_TXIDS_ACCEPTED_BY_ALL_IN_HOUR.set(total_txids_in_hour_by_all) 155 | 156 | for cohort, total in total_txids_in_hour_by_cohort.items(): 157 | MEMPOOL_TOTAL_TXIDS_ACCEPTED_BY_COHORT_IN_HOUR.labels(cohort.name).set(total) 158 | 159 | MEMPOOL_MAX_PROPAGATION_SPREAD_IN_HOUR.set(max_spread) 160 | MEMPOOL_MIN_PROPAGATION_SPREAD_IN_HOUR.set(min_spread) 161 | 162 | # TOO SLOW 163 | # counts = util.get_task_counts() 164 | counts = util.get_task_counts_fast() 165 | 166 | for name, count in counts.items(): 167 | TASK_COUNT.labels(name=name).set(count) 168 | 169 | 170 | def sigterm_handler(*_): 171 | print("exiting") 172 | sys.exit(0) 173 | 174 | 175 | @cli.main 176 | def main(addr="0.0.0.0", port=infra.SERVER_EXPORTER_PORT): 177 | app = make_wsgi_app() 178 | 179 | signal.signal(signal.SIGTERM, sigterm_handler) 180 | 181 | def refresh(*args, **kwargs): 182 | try: 183 | refresh_metrics() 184 | except Exception: 185 | log.exception("failed to refresh bmon server metrics") 186 | 187 | return app(*args, **kwargs) 188 | 189 | httpd = make_server(addr, port, refresh) 190 | print(f"serving bmon server monitor {addr}:{port}") 191 | httpd.serve_forever() 192 | -------------------------------------------------------------------------------- /bmon/server_tasks.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tasks that execute on the central bmon server. 3 | 4 | There are only one of these workers, so here is where we queue up periodic analysis 5 | tasks that should only be run in one place, with a view of the whole herd of bitcoind 6 | nodes. 7 | """ 8 | import os 9 | import time 10 | import logging 11 | import datetime 12 | from collections import defaultdict 13 | 14 | import django 15 | import redis 16 | from django.conf import settings 17 | from huey import RedisHuey, crontab 18 | 19 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bmon.settings") 20 | django.setup() 21 | 22 | from bmon import models, mempool, bitcoin 23 | from .hosts import get_bitcoind_hosts_to_policy_cohort 24 | 25 | 26 | log = logging.getLogger(__name__) 27 | 28 | server_q = RedisHuey( 29 | "bmon-server", 30 | url=settings.REDIS_SERVER_URL, 31 | immediate=settings.TESTING, 32 | duration_warn=5, # warn if task takes longer than 5 seconds 33 | ) 34 | 35 | # Special-case mempool events because they're so high volume; if something goes 36 | # wrong, we don't want to disrupt other event types. 37 | mempool_q = RedisHuey( 38 | "bmon-server-mempool", 39 | url=settings.REDIS_SERVER_URL, 40 | immediate=settings.TESTING, 41 | results=False, 42 | duration_warn=5, # warn if task takes longer than 5 seconds 43 | ) 44 | 45 | redisdb = redis.Redis.from_url(settings.REDIS_SERVER_URL, decode_responses=True) 46 | 47 | 48 | def get_mempool_aggregator() -> mempool.MempoolAcceptAggregator: 49 | """ 50 | Cache this for 90 seconds; we want to refresh periodically in case host versions 51 | change, potentially putting them in a different policy cohort. 52 | """ 53 | SECONDS_TO_CACHE = 90 54 | CACHE_KEY = '__cache' 55 | 56 | if (got := getattr(get_mempool_aggregator, CACHE_KEY, None)): 57 | [ts, cached] = got 58 | if (time.time() - ts) <= SECONDS_TO_CACHE: 59 | return cached 60 | 61 | hosts_to_policy = { 62 | h.name: v for h, v in get_bitcoind_hosts_to_policy_cohort().items()} 63 | 64 | agg = mempool.MempoolAcceptAggregator(redisdb, hosts_to_policy) 65 | 66 | setattr(get_mempool_aggregator, CACHE_KEY, (time.time(), agg)) 67 | return get_mempool_aggregator() 68 | 69 | 70 | @server_q.periodic_task(crontab(minute="*/10")) 71 | def check_for_overlapping_peers(): 72 | def getpeerinfo(rpc): 73 | return rpc.getpeerinfo() 74 | 75 | results = bitcoin.gather_rpc(getpeerinfo) 76 | peer_to_hosts = defaultdict(list) 77 | hosts_contacted = [] 78 | 79 | for hostname, peers in results.items(): 80 | if peers == bitcoin.RPC_ERROR_RESULT: 81 | log.warning("Unable to retrieve peers from host %r", hostname) 82 | continue 83 | 84 | hosts_contacted.append(hostname) 85 | for peer in peers: 86 | peer_to_hosts[peer["addr"]].append(hostname) 87 | 88 | print( 89 | "%d peers found across %d hosts (%s)" 90 | % (len(peer_to_hosts), len(hosts_contacted), ", ".join(hosts_contacted)) 91 | ) 92 | for peer, hosts in peer_to_hosts.items(): 93 | if len(hosts) > 1: 94 | print("peer overlap detected for %r: %s" % (peer, hosts)) 95 | 96 | 97 | @server_q.task() 98 | def persist_bitcoind_event(event: dict, _: str): 99 | modelname = event.pop("_model") 100 | Model = getattr(models, modelname) 101 | 102 | if Model == models.MempoolReject: 103 | # XXX this is an ugly hack: Django doesn't suffix with "_id" in `model_to_dict`; 104 | # come up with a better way of dealing with this. 105 | event["peer_id"] = event.pop("peer") 106 | 107 | if "host" in event: 108 | event["host_id"] = event.pop("host") 109 | 110 | instance = Model.objects.create(**event) 111 | print(f"Saved {instance}") 112 | 113 | 114 | @mempool_q.task() 115 | def process_mempool_accept(txid: str, seen_at: datetime.datetime, host: str): 116 | agg = get_mempool_aggregator() 117 | 118 | if agg.mark_seen(host, txid, seen_at) == mempool.PropagationStatus.CompleteAll: 119 | agg.finalize_propagation(txid, assert_complete=True) 120 | 121 | 122 | @mempool_q.periodic_task(crontab(minute="*/1")) 123 | @mempool_q.lock_task('process_aged_propagations') 124 | def process_aged_propagations(): 125 | agg = get_mempool_aggregator() 126 | agg.process_all_aged() 127 | -------------------------------------------------------------------------------- /bmon/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for bmon project. 3 | 4 | Generated by 'django-admin startproject' using Django 4.1.1. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/4.1/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/4.1/ref/settings/ 11 | """ 12 | 13 | import os 14 | from pathlib import Path 15 | 16 | import sentry_sdk 17 | from sentry_sdk.integrations.django import DjangoIntegration 18 | 19 | 20 | # Build paths inside the project like this: BASE_DIR / 'subdir'. 21 | BASE_DIR = Path(__file__).resolve().parent.parent 22 | 23 | HOSTNAME = os.environ.get('BMON_HOSTNAME', 'FIXME') 24 | TESTING = False 25 | 26 | # Only bitcoind-monitoring nodes have a local Redis cache. 27 | REDIS_LOCAL_URL = os.environ.get('BMON_REDIS_LOCAL_URL') 28 | REDIS_LOCAL_HOST = os.environ.get('BMON_REDIS_LOCAL_HOST') 29 | 30 | # All installations must know about the central Redis instance. 31 | REDIS_SERVER_URL = os.environ.get('BMON_REDIS_SERVER_URL', 'FIXME') 32 | REDIS_SERVER_HOST = os.environ.get('BMON_REDIS_HOST', 'FIXME') 33 | 34 | BITCOIN_RPC_HOST = os.environ.get('BITCOIN_RPC_HOST') 35 | BITCOIN_RPC_USER = os.environ.get('BITCOIN_RPC_USER') 36 | BITCOIN_RPC_PASSWORD = os.environ.get('BITCOIN_RPC_PASSWORD') 37 | BITCOIN_RPC_PORT = os.environ.get('BITCOIN_RPC_PORT') 38 | 39 | BITCOIN_GITREF = os.environ.get('BITCOIN_GITREF') 40 | BITCOIN_GITSHA = os.environ.get('BITCOIN_GITSHA') 41 | BITCOIN_DBCACHE = os.environ.get('BITCOIN_DBCACHE') 42 | BITCOIN_PRUNE = os.environ.get('BITCOIN_PRUNE') 43 | BITCOIN_FLAGS = os.environ.get('BITCOIN_FLAGS') 44 | 45 | BITCOIND_VERSION_PATH = os.environ.get('BITCOIND_VERSION_PATH') 46 | 47 | BITCOIND_LOG_PATH = os.environ.get('BMON_BITCOIND_LOG_PATH') 48 | 49 | # GCP credentials for uploading mempool activity. 50 | CHAINCODE_GCP_CRED_PATH = os.environ.get('CHAINCODE_GCP_CRED_PATH') 51 | CHAINCODE_GCP_BUCKET = 'mempool-event-logs' 52 | 53 | # For testing 54 | LOCALHOST_AUTH_TOKEN = '4396049cdfe946f88ec63da115cbcfcf' 55 | 56 | # The location we write 57 | MEMPOOL_ACTIVITY_CACHE_PATH = Path(os.environ.get( 58 | 'MEMPOOL_ACTIVITY_CACHE_PATH', '/mempool-activity-cache')) 59 | 60 | # Quick-start development settings - unsuitable for production 61 | # See https://docs.djangoproject.com/en/4.1/howto/deployment/checklist/ 62 | 63 | # SECURITY WARNING: keep the secret key used in production secret! 64 | SECRET_KEY = 'django-insecure-wtsk76*1ci%=)05y^-t-9^0y^y1ku3iunp2(h&6*qo@vxzv#n3' 65 | 66 | # SECURITY WARNING: don't run with debug turned on in production! 67 | DEBUG = os.environ.get('BMON_DEBUG') == "1" 68 | 69 | ALLOWED_HOSTS = ['*'] 70 | 71 | NO_LOG = { 72 | 'level': 'WARNING', 73 | 'handlers': [], 74 | 'propagate': False, 75 | } 76 | 77 | LOGGING = { 78 | 'version': 1, 79 | 'disable_existing_loggers': False, 80 | 'formatters': { 81 | 'console': { 82 | # exact format is not important, this is the minimum information 83 | 'format': '%(asctime)s %(name)s [%(levelname)s] %(message)s', 84 | }, 85 | }, 86 | 'handlers': { 87 | 'console': { 88 | 'class': 'logging.StreamHandler', 89 | 'formatter': 'console', 90 | }, 91 | }, 92 | 'loggers': { 93 | '': { 94 | 'level': 'DEBUG' if DEBUG else 'INFO', 95 | 'handlers': [ 96 | 'console', 97 | ], 98 | }, 99 | 'bitcoin-rpc': { 100 | 'level': 'DEBUG' if DEBUG else 'INFO', 101 | 'handlers': [ 102 | 'console', 103 | ], 104 | }, 105 | # 'huey': NO_LOG, 106 | 'parso': NO_LOG, 107 | 'clii': NO_LOG, 108 | }, 109 | } 110 | 111 | 112 | INSTALLED_APPS = [ 113 | 'bmon', 114 | 'django.contrib.admin', 115 | 'django.contrib.auth', 116 | 'django.contrib.contenttypes', 117 | 'django.contrib.sessions', 118 | 'django.contrib.messages', 119 | "whitenoise.runserver_nostatic", 120 | 'django.contrib.staticfiles', 121 | ] 122 | 123 | MIDDLEWARE = [ 124 | 'django.middleware.security.SecurityMiddleware', 125 | 'whitenoise.middleware.WhiteNoiseMiddleware', 126 | 'django.contrib.sessions.middleware.SessionMiddleware', 127 | 'django.middleware.common.CommonMiddleware', 128 | 'django.middleware.csrf.CsrfViewMiddleware', 129 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 130 | 'django.contrib.messages.middleware.MessageMiddleware', 131 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 132 | ] 133 | 134 | ROOT_URLCONF = 'bmon.urls' 135 | 136 | SHARED_WITH_FRONTEND = BASE_DIR / 'frontend-build' 137 | 138 | TEMPLATES = [ 139 | { 140 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 141 | 'DIRS': [SHARED_WITH_FRONTEND], 142 | 'APP_DIRS': True, 143 | 'OPTIONS': { 144 | 'context_processors': [ 145 | 'django.template.context_processors.debug', 146 | 'django.template.context_processors.request', 147 | 'django.contrib.auth.context_processors.auth', 148 | 'django.contrib.messages.context_processors.messages', 149 | ], 150 | }, 151 | }, 152 | ] 153 | 154 | WSGI_APPLICATION = 'bmon.wsgi.application' 155 | 156 | 157 | # Database 158 | # https://docs.djangoproject.com/en/4.1/ref/settings/#databases 159 | 160 | DATABASES = { 161 | 'default': { 162 | 'ENGINE': 'django.db.backends.postgresql', 163 | 'NAME': os.environ.get('DB_NAME', 'bmon'), 164 | 'USER': os.environ.get('DB_USERNAME', 'bmon'), 165 | 'PASSWORD': os.environ.get('DB_PASSWORD', 'FIXME'), 166 | 'HOST': os.environ.get('DB_HOST', 'FIXME'), 167 | 'PORT': os.environ.get('DB_PORT', '5432'), 168 | }, 169 | } 170 | 171 | 172 | # Password validation 173 | # https://docs.djangoproject.com/en/4.1/ref/settings/#auth-password-validators 174 | 175 | AUTH_PASSWORD_VALIDATORS = [ 176 | { 177 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 178 | }, 179 | { 180 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 181 | }, 182 | { 183 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 184 | }, 185 | { 186 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 187 | }, 188 | ] 189 | 190 | 191 | # Internationalization 192 | # https://docs.djangoproject.com/en/4.1/topics/i18n/ 193 | 194 | LANGUAGE_CODE = 'en-us' 195 | 196 | TIME_ZONE = 'UTC' 197 | 198 | USE_I18N = True 199 | 200 | USE_TZ = True 201 | 202 | 203 | # Static files (CSS, JavaScript, Images) 204 | # https://docs.djangoproject.com/en/4.1/howto/static-files/ 205 | 206 | STATIC_URL = 'static/' 207 | STATIC_ROOT = BASE_DIR / "staticfiles-build" 208 | STATICFILES_DIRS = [ 209 | os.path.join(os.path.dirname(__file__), 'static'), 210 | SHARED_WITH_FRONTEND, 211 | ] 212 | 213 | # Default primary key field type 214 | # https://docs.djangoproject.com/en/4.1/ref/settings/#default-auto-field 215 | 216 | DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' 217 | 218 | SENTRY_DSN = os.environ.get('SENTRY_DSN') 219 | 220 | if SENTRY_DSN: 221 | sentry_sdk.init( 222 | dsn=SENTRY_DSN, 223 | integrations=[DjangoIntegration()], 224 | 225 | # Set traces_sample_rate to 1.0 to capture 100% 226 | # of transactions for performance monitoring. 227 | # We recommend adjusting this value in production, 228 | traces_sample_rate=0.8, 229 | profiles_sample_rate=0.8, 230 | 231 | # If you wish to associate users to errors (assuming you are using 232 | # django.contrib.auth) you may enable sending PII data. 233 | send_default_pii=True, 234 | 235 | # By default the SDK will try to use the SENTRY_RELEASE 236 | # environment variable, or infer a git commit 237 | # SHA as release, however you may want to set 238 | # something more human-readable. 239 | # release="myapp@1.0.0", 240 | server_name=HOSTNAME, 241 | ) 242 | -------------------------------------------------------------------------------- /bmon/settings_test.py: -------------------------------------------------------------------------------- 1 | from bmon.settings import * # noqa 2 | from bmon import settings 3 | 4 | DEBUG = True 5 | TESTING = True 6 | 7 | DATABASES = { 8 | 'default': { 9 | 'ENGINE': 'django.db.backends.sqlite3', 10 | 'NAME': ':memory:', 11 | }, 12 | } 13 | 14 | REDIS_LOCAL_URL = "redis://redis:6379/11" 15 | REDIS_SERVER_URL = "redis://redis:6379/10" 16 | REDIS_HOST = "redis" 17 | REDIS_LOCAL_HOST = "redis" 18 | 19 | HOSTNAME = "test" 20 | 21 | BITCOIN_DBCACHE = settings.BITCOIN_DBCACHE or '1000' 22 | BITCOIN_PRUNE = settings.BITCOIN_PRUNE or '550' 23 | -------------------------------------------------------------------------------- /bmon/static/ninepin.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaincodelabs/bmon/39a0f8204f48cd9f84a9d181abaf69ab43747bd0/bmon/static/ninepin.ttf -------------------------------------------------------------------------------- /bmon/static/style.css: -------------------------------------------------------------------------------- 1 | @font-face { 2 | font-family: 'ninepin'; 3 | src: url('/static/ninepin.ttf') format('truetype'); 4 | } 5 | 6 | body { 7 | background-color: #f8f8f8; 8 | padding: 3vw; 9 | } 10 | 11 | #content { 12 | font-family: monospace; 13 | padding-left: 1em; 14 | } 15 | 16 | #bmon-title { 17 | font-size: 3em; 18 | font-family: 'ninepin', monospace; 19 | padding: 0.3em; 20 | border-bottom: 1px solid #222; 21 | border-left: 1px solid #222; 22 | width: max-content; 23 | margin-bottom: 1em; 24 | background-color: #f8f8f8; 25 | } 26 | 27 | .block-connects, .hosts { 28 | display: flex; 29 | flex: 1; 30 | align-items: stretch; 31 | justify-content: left; 32 | overflow-x: scroll; 33 | scrollbar-width: none; 34 | padding-bottom: 17px; 35 | margin-bottom: 3em; 36 | } 37 | 38 | .block-connects::-webkit-scrollbar, .hosts::-webkit-scrollbar { 39 | display: none; 40 | } 41 | 42 | .block-connect, .host { 43 | padding: 0.8em; 44 | border: 1px solid #666; 45 | background-color: #fcf3ec; 46 | filter: drop-shadow(3px 2px 2px black); 47 | margin-right: 1.3em; 48 | } 49 | 50 | .host { 51 | background-color: #f5fbfb; 52 | } 53 | 54 | .block-connect { 55 | padding-right: 1.5rem; 56 | } 57 | 58 | .stats { 59 | width: max-content; 60 | } 61 | 62 | .stats .value table { 63 | font-size: 0.9rem; 64 | } 65 | 66 | .stats .title { 67 | font-weight: bold; 68 | margin-bottom: 0.2rem; 69 | } 70 | 71 | .stat { 72 | margin-bottom: 1em; 73 | } 74 | 75 | .card-title { 76 | font-weight: bold; 77 | font-size: 1.4em; 78 | margin-bottom: 0.8em; 79 | } 80 | 81 | .diffs { 82 | letter-spacing: -1px; 83 | } 84 | 85 | .diffs tr td:first-child { 86 | padding-right: 1em; 87 | } 88 | 89 | h1 { 90 | 91 | } 92 | -------------------------------------------------------------------------------- /bmon/templates/tips.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 10 | 11 | 12 |

Tips

13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | {% for event in events %} 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | {% endfor %} 37 | 38 | 39 |
HeightHostnameVersionHeader-to-tip (s)Header-to-block (s)Block-to-tip (s)Num CB txns requested
{{ event.saw_header_at }}{{ event.height }}{{ event.host.name }}{% firstof event.host.bitcoin_gitref event.host.bitcoin_version %}{{ event.header_to_tip_secs }}{{ event.header_to_block_secs }}{{ event.block_to_tip_secs }}{{ event.reconstruction_data.num_requested }}
40 | 41 | 42 | -------------------------------------------------------------------------------- /bmon/test_bitcoind.py: -------------------------------------------------------------------------------- 1 | 2 | from bmon.bitcoin import api 3 | 4 | 5 | def test_get_version(): 6 | api.bitcoind_version('25.0rc2') == ((25, 0), None) 7 | api.bitcoind_version('0.14.0') == ((0, 14, 0), None) 8 | -------------------------------------------------------------------------------- /bmon/test_bitcoind_tasks.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from . import bitcoind_tasks, conftest 4 | 5 | 6 | @pytest.mark.django_db 7 | def test_peerstats(): 8 | peerdata = conftest.read_json_data("getpeerinfo.json") 9 | host = bitcoind_tasks.create_host_record() 10 | got = bitcoind_tasks.compute_peer_stats_blocking(peerdata) 11 | 12 | assert got.bytesrecv == 3774372281 13 | assert got.bytesrecv_per_msg == { 14 | "addrv2": 182499, 15 | "block": 3747147953, 16 | "blocktxn": 4151572, 17 | "cmpctblock": 514020, 18 | "feefilter": 320, 19 | "getblocktxn": 61, 20 | "getdata": 13008, 21 | "getheaders": 10530, 22 | "headers": 290930, 23 | "inv": 3589111, 24 | "notfound": 6973, 25 | "ping": 24128, 26 | "pong": 23808, 27 | "sendaddrv2": 240, 28 | "sendcmpct": 693, 29 | "sendheaders": 240, 30 | "tx": 18414454, 31 | "verack": 240, 32 | "version": 1261, 33 | "wtxidrelay": 240, 34 | } 35 | 36 | assert got.bytessent == 8264800 37 | assert got.bytessent_per_msg == { 38 | "addrv2": 50616, 39 | "blocktxn": 719, 40 | "cmpctblock": 229710, 41 | "feefilter": 512, 42 | "getaddr": 192, 43 | "getblocktxn": 3338, 44 | "getdata": 1527809, 45 | "getheaders": 17901, 46 | "headers": 57612, 47 | "inv": 6195104, 48 | "ping": 23808, 49 | "pong": 24128, 50 | "sendaddrv2": 240, 51 | "sendcmpct": 3663, 52 | "sendheaders": 240, 53 | "tx": 127458, 54 | "verack": 240, 55 | "version": 1270, 56 | "wtxidrelay": 240, 57 | } 58 | 59 | assert got.host == host 60 | assert got.created_at 61 | assert got.num_peers == 10 62 | assert got.ping_max == 0.216082 63 | assert got.ping_mean == 0.0859731 64 | assert got.ping_min == 0.008235 65 | -------------------------------------------------------------------------------- /bmon/test_hosts.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from bmon import hosts, mempool 4 | 5 | 6 | @pytest.mark.django_db 7 | def test_policy_cohorts(fake_hosts): 8 | host_to_cohort = hosts.get_bitcoind_hosts_to_policy_cohort() 9 | 10 | assert {h.name: v for h, v in host_to_cohort.items()} == { 11 | 'bitcoind': mempool.PolicyCohort.segwit, 12 | 'bitcoind-02': mempool.PolicyCohort.taproot, 13 | } 14 | -------------------------------------------------------------------------------- /bmon/test_integration.py: -------------------------------------------------------------------------------- 1 | 2 | import pytest 3 | 4 | 5 | from . import conftest, bitcoind_tasks, server_tasks, mempool 6 | from .hosts import get_bitcoind_hosts_to_policy_cohort 7 | 8 | 9 | @pytest.mark.django_db 10 | def test_process_mempool_accepts(fake_hosts): 11 | logdata = conftest.read_data_file("mempool-accepts-log.txt") 12 | 13 | for host in fake_hosts: 14 | for line in logdata: 15 | bitcoind_tasks.process_line(line, host) 16 | 17 | hosts_to_policy = { 18 | h.name: v for h, v in get_bitcoind_hosts_to_policy_cohort().items()} 19 | 20 | agg = mempool.MempoolAcceptAggregator(server_tasks.redisdb, hosts_to_policy) 21 | 22 | events = list(agg.get_propagation_events()) 23 | assert len(events) == 50 24 | assert agg.get_total_txids_processed() == 50 25 | assert agg.get_total_txids_processed_per_host() == { 26 | 'bitcoind': 50, 27 | 'bitcoind-02': 50, 28 | } 29 | -------------------------------------------------------------------------------- /bmon/test_mempool.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | from django.utils import timezone 3 | 4 | import pytest 5 | 6 | from bmon import mempool, server_tasks, server_monitor, conftest, models 7 | 8 | 9 | @pytest.mark.django_db 10 | def test_mempool_accept_processing(): 11 | redis = server_tasks.redisdb 12 | hosts = { 13 | "a": mempool.PolicyCohort.segwit, 14 | "b": mempool.PolicyCohort.segwit, 15 | "c": mempool.PolicyCohort.taproot, 16 | "d": mempool.PolicyCohort.taproot, 17 | "e": mempool.PolicyCohort.taproot, 18 | } 19 | 20 | for host in hosts: 21 | conftest.make_host(host) 22 | 23 | agg = mempool.MempoolAcceptAggregator(redis, hosts) 24 | 25 | assert agg.get_total_txids_processed() == 0 26 | assert agg.get_total_txids_processed_per_host() == {} 27 | 28 | now = timezone.now() 29 | now_ts = now.timestamp() 30 | 31 | most_hosts = ("a", "b", "c", "d") 32 | for host in most_hosts: 33 | retval = agg.mark_seen(host, "txid1", now) 34 | 35 | if host == "b": 36 | assert retval is mempool.PropagationStatus.CompleteCohort 37 | else: 38 | assert retval is None 39 | 40 | for host in most_hosts: 41 | assert redis.get("mpa:txid1:%s" % host) 42 | assert redis.get("mpa:total_txids:%s" % host) == "1" 43 | 44 | assert not redis.get("mpa:txid1:e") 45 | assert not redis.get("mpa:total_txids:e") 46 | 47 | assert agg.get_total_txids_processed() == 1 48 | assert agg.get_total_txids_processed_per_host() == {h: 1 for h in most_hosts} 49 | 50 | assert agg.mark_seen("e", "txid2", now) is None 51 | 52 | assert agg.get_total_txids_processed() == 2 53 | assert agg.get_total_txids_processed_per_host() == {h: 1 for h in hosts.keys()} 54 | 55 | assert ( 56 | agg.mark_seen("e", "txid1", now + timedelta(seconds=1)) 57 | == mempool.PropagationStatus.CompleteAll 58 | ) 59 | 60 | new_counts = {h: 1 for h in hosts.keys()} 61 | new_counts["e"] = 2 62 | assert agg.get_total_txids_processed_per_host() == new_counts 63 | 64 | print("All specific txid keys should have a TTL") 65 | num_checked = 0 66 | 67 | for key in redis.keys("mpa:txid*"): 68 | assert redis.ttl(key) 69 | num_checked += 1 70 | 71 | assert num_checked >= len(hosts) 72 | 73 | # Nothing's ready yet. 74 | assert len(agg.process_all_aged()) == 0 75 | processed = agg.process_all_aged(latest_time_allowed=now_ts) 76 | assert len(processed) == 0 77 | 78 | processed = agg.process_all_aged(latest_time_allowed=(now_ts + 1)) 79 | assert len(processed) == 2 80 | 81 | [txprop2, txprop1] = processed 82 | 83 | assert txprop1.host_to_timestamp == dict( 84 | **{h: now_ts for h in most_hosts}, **{"e": now_ts + 1} 85 | ) 86 | assert set(txprop1.cohorts_complete) == { 87 | mempool.PolicyCohort.segwit, 88 | mempool.PolicyCohort.taproot, 89 | } 90 | assert txprop1.all_complete 91 | assert txprop1.spread == 1 92 | assert txprop1.time_window > 0 93 | assert txprop1.earliest_saw == now_ts 94 | assert txprop1.latest_saw == now_ts + 1 95 | 96 | assert txprop2.host_to_timestamp == {"e": now.timestamp()} 97 | assert txprop2.cohorts_complete == [] 98 | assert not txprop2.all_complete 99 | assert txprop2.spread == 0 100 | assert txprop2.time_window > 0 101 | 102 | for host in most_hosts: 103 | assert redis.get("mpa:total_txids:%s" % host) == "1" 104 | 105 | assert redis.get("mpa:total_txids:e") == "2" 106 | 107 | all_processed = agg.get_propagation_event_keys() 108 | assert all_processed == ["mpa:prop_event:txid2", "mpa:prop_event:txid1"] 109 | 110 | [prop1, prop2] = [ 111 | mempool.TxPropagation.from_redis(d) for d in sorted(redis.mget(all_processed)) 112 | ] 113 | 114 | assert prop1 == txprop1 115 | assert prop2 == txprop2 116 | assert set(agg.get_propagation_events()) == {prop1, prop2} 117 | 118 | for key in all_processed: 119 | assert int(redis.ttl(key)) <= (60 * 60) + (60 * 5) 120 | 121 | assert set(redis.keys()) == { 122 | "mpa:total_txids:a", 123 | "mpa:total_txids:b", 124 | "mpa:total_txids:c", 125 | "mpa:total_txids:d", 126 | "mpa:total_txids:e", 127 | "mpa:total_txids", 128 | "mpa:prop_event:txid2", 129 | "mpa:prop_event:txid1", 130 | "mpa:prop_event_set", 131 | } 132 | 133 | print("Smoke-test metric generation") 134 | server_monitor.refresh_metrics( 135 | agg, 136 | hosts_to_cohort={ 137 | models.Host.objects.filter(name=h).first(): cohort 138 | for h, cohort in hosts.items() 139 | }, 140 | ) 141 | 142 | 143 | @pytest.mark.django_db 144 | def test_get_aggreator(fake_hosts): 145 | assert ( 146 | server_tasks.get_mempool_aggregator() == server_tasks.get_mempool_aggregator() 147 | ) 148 | -------------------------------------------------------------------------------- /bmon/test_models.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from .bitcoind_tasks import create_host_record 4 | 5 | 6 | @pytest.mark.django_db 7 | def test_create_host(): 8 | h = create_host_record() 9 | assert h.bitcoin_dbcache == 450 10 | assert h.bitcoin_extra == {'flags': '-regtest'} 11 | assert h.bitcoin_prune == 0 12 | assert not h.bitcoin_listen 13 | -------------------------------------------------------------------------------- /bmon/testdata/block-timeouts.log: -------------------------------------------------------------------------------- 1 | 2022-09-30T17:26:52Z [] Timeout downloading block 0000000000000000000573b6110e9e77864ee0832f2ade5f9317b439c3ce5294 from peer=4, disconnecting 2 | 2022-10-05T22:29:02.937882Z [] Timeout downloading block 000000000000000000086779ecf494b0595a9b779f501c7e25fb2be0b69907a2 from peer=24, disconnecting 3 | 2022-10-06T19:52:47.698534Z Timeout downloading block 000000000000000000016d82a353995e3e713c53319f71361cd02b53cc0fb49e from peer=3, disconnecting 4 | 2022-10-06T20:12:10.246876Z Timeout downloading block 0000000000000000000571fbffbdcbae46b16f79e75a77cd1a470981f48d951f from peer=6, disconnecting 5 | 2022-10-06T20:58:28.107620Z Timeout downloading block 00000000000000000003fcc1329bec27b69463dde49fb367421d8f139a2e66f5 from peer=5, disconnecting 6 | -------------------------------------------------------------------------------- /bmon/testdata/logs_badblock_cb_overspent_018.txt: -------------------------------------------------------------------------------- 1 | 2019-07-10T14:35:27Z [msghand] received: cmpctblock (344 bytes) peer=2579 2 | 2019-07-10T14:35:27Z [msghand] Initialized PartiallyDownloadedBlock for block 0000000000000000000b47042b90c6a893e6e5cdef70c92beefb88f4c5fa5a69 using a cmpctblock of size 344 3 | 2019-07-10T14:35:27Z [msghand] received: blocktxn (33 bytes) peer=2579 4 | 2019-07-10T14:35:27Z [msghand] Successfully reconstructed block 0000000000000000000b47042b90c6a893e6e5cdef70c92beefb88f4c5fa5a69 with 1 txn prefilled, 0 txn from mempool (incl at least 0 from extra pool) and 0 txn requested 5 | 2019-07-10T14:35:27Z [msghand] - Load block from disk: 0.00ms [5025.19s] 6 | 2019-07-10T14:35:27Z [msghand] - Sanity checks: 0.01ms [1150.96s (1.97ms/blk)] 7 | 2019-07-10T14:35:27Z [msghand] - Fork checks: 0.04ms [71.00s (0.12ms/blk)] 8 | 2019-07-10T14:35:27Z [msghand] - Connect 1 transactions: 0.04ms (0.045ms/tx, 0.000ms/txin) [65004.23s (111.16ms/blk)] 9 | 2019-07-10T14:35:27Z [msghand] ERROR: ConnectBlock(): coinbase pays too much (actual=1326546691 vs limit=1250000000) 10 | 2019-07-10T14:35:27Z [msghand] InvalidChainFound: invalid block=0000000000000000000b47042b90c6a893e6e5cdef70c92beefb88f4c5fa5a69 height=584802 log2_work=90.831336 date=2019-07-10T14:35:25Z 11 | 2019-07-10T14:35:27Z [msghand] InvalidChainFound: current best=0000000000000000001b253b1fac766189e15d7f7078191002e5427ac7b8f9f1 height=584801 log2_work=90.831311 date=2019-07-10T14:35:06Z 12 | 2019-07-10T14:35:27Z [msghand] ERROR: ConnectTip: ConnectBlock 0000000000000000000b47042b90c6a893e6e5cdef70c92beefb88f4c5fa5a69 failed, bad-cb-amount (code 16) 13 | 2019-07-10T14:35:27Z [msghand] InvalidChainFound: invalid block=0000000000000000000b47042b90c6a893e6e5cdef70c92beefb88f4c5fa5a69 height=584802 log2_work=90.831336 date=2019-07-10T14:35:25Z 14 | 2019-07-10T14:35:27Z [msghand] InvalidChainFound: current best=0000000000000000001b253b1fac766189e15d7f7078191002e5427ac7b8f9f1 height=584801 log2_work=90.831311 date=2019-07-10T14:35:06Z 15 | 2019-07-10T14:35:27Z [msghand] received: cmpctblock (344 bytes) peer=2765 16 | 2019-07-10T14:35:27Z [msghand] ERROR: AcceptBlockHeader: block 0000000000000000000b47042b90c6a893e6e5cdef70c92beefb88f4c5fa5a69 is marked invalid 17 | 2019-07-10T14:35:27Z [msghand] peer=2765: invalid header via cmpctblock 18 | 2019-07-10T14:35:27Z [msghand] received: cmpctblock (344 bytes) peer=2940 19 | 2019-07-10T14:35:27Z [msghand] ERROR: AcceptBlockHeader: block 0000000000000000000b47042b90c6a893e6e5cdef70c92beefb88f4c5fa5a69 is marked invalid 20 | 2019-07-10T14:35:27Z [msghand] peer=2940: invalid header via cmpctblock 21 | 2019-07-10T14:35:27Z [msghand] sending inv (109 bytes) peer=1784 22 | 2019-07-10T14:35:27Z [msghand] received: inv (649 bytes) peer=968 23 | -------------------------------------------------------------------------------- /bmon/testdata/logs_connectblock_010.txt: -------------------------------------------------------------------------------- 1 | 2019-08-12 04:01:49 received: block (706064 bytes) peer=1 2 | 2019-08-12 04:01:49 received block 00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d peer=1 3 | 2019-08-12 04:01:49 - Load block from disk: 0.00ms [0.11s] 4 | 2019-08-12 04:01:50 - Connect 1996 transactions: 1039.51ms (0.521ms/tx, 0.256ms/txin) [1918.99s] 5 | 2019-08-12 04:01:50 - Verify 4055 txins: 1052.74ms (0.260ms/txin) [2256.76s] 6 | 2019-08-12 04:01:50 - Index writing: 4.71ms [29.09s] 7 | 2019-08-12 04:01:50 - Callbacks: 0.03ms [0.84s] 8 | 2019-08-12 04:01:50 - Connect total: 1235.09ms [2935.83s] 9 | 2019-08-12 04:01:50 - Flush: 10.92ms [33.34s] 10 | 2019-08-12 04:01:50 - Writing chainstate: 0.43ms [1.63s] 11 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00044247 BTC/kB fee/0 priority, took 0 blocks 12 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00044444 BTC/kB fee/0 priority, took 0 blocks 13 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00044444 BTC/kB fee/0 priority, took 0 blocks 14 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00050000 BTC/kB fee/1531.02 priority, took 0 blocks 15 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00064891 BTC/kB fee/356757 priority, took 0 blocks 16 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00144447 BTC/kB fee/14.56 priority, took 0 blocks 17 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00033146 BTC/kB fee/1.98362e+07 priority, took 0 blocks 18 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00221238 BTC/kB fee/0 priority, took 0 blocks 19 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00109866 BTC/kB fee/0 priority, took 0 blocks 20 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00011035 BTC/kB fee/1.10256e+06 priority, took 0 blocks 21 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00044247 BTC/kB fee/0 priority, took 1 blocks 22 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00044444 BTC/kB fee/0 priority, took 1 blocks 23 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00074257 BTC/kB fee/680356 priority, took 1 blocks 24 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00044247 BTC/kB fee/0 priority, took 1 blocks 25 | 2019-08-12 04:01:50 Seen TX confirm: unassigned : 0.00031415 BTC/kB fee/7.51389e+07 priority, took 1 blocks 26 | 2019-08-12 04:01:50 Seen TX confirm: unassigned : 0.00001826 BTC/kB fee/0 priority, took 1 blocks 27 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00044444 BTC/kB fee/0 priority, took 1 blocks 28 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00064876 BTC/kB fee/2.21603e+07 priority, took 1 blocks 29 | 2019-08-12 04:01:50 Seen TX confirm: priority : 0.00002420 BTC/kB fee/3.00167e+08 priority, took 1 blocks 30 | 2019-08-12 04:01:50 Seen TX confirm: fee : 0.00031807 BTC/kB fee/2.53432e+06 priority, took 1 blocks 31 | 2019-08-12 04:01:50 estimates: for confirming within 0 blocks based on 100/100 samples, fee=0.00074962 BTC/kB, prio=1.05116e+11 32 | 2019-08-12 04:01:50 estimates: for confirming within 1 blocks based on 100/100 samples, fee=0.00048128 BTC/kB, prio=1.71533e+10 33 | 2019-08-12 04:01:50 estimates: for confirming within 2 blocks based on 100/100 samples, fee=0.00044444 BTC/kB, prio=6.79135e+09 34 | 2019-08-12 04:01:50 estimates: for confirming within 3 blocks based on 100/100 samples, fee=0.00043946 BTC/kB, prio=3.5258e+09 35 | 2019-08-12 04:01:50 estimates: for confirming within 4 blocks based on 100/100 samples, fee=0.00038812 BTC/kB, prio=2.16646e+09 36 | 2019-08-12 04:01:50 estimates: for confirming within 5 blocks based on 100/100 samples, fee=0.00033106 BTC/kB, prio=1.41581e+09 37 | 2019-08-12 04:01:50 estimates: for confirming within 6 blocks based on 100/100 samples, fee=0.00032256 BTC/kB, prio=1.02353e+09 38 | 2019-08-12 04:01:50 estimates: for confirming within 7 blocks based on 100/100 samples, fee=0.00027164 BTC/kB, prio=7.62452e+08 39 | 2019-08-12 04:01:50 estimates: for confirming within 8 blocks based on 100/100 samples, fee=0.00024510 BTC/kB, prio=6.0877e+08 40 | 2019-08-12 04:01:50 estimates: for confirming within 9 blocks based on 100/100 samples, fee=0.00021445 BTC/kB, prio=5.02758e+08 41 | 2019-08-12 04:01:50 estimates: for confirming within 10 blocks based on 100/100 samples, fee=0.00019524 BTC/kB, prio=4.06034e+08 42 | 2019-08-12 04:01:50 estimates: for confirming within 11 blocks based on 100/100 samples, fee=0.00018242 BTC/kB, prio=3.38395e+08 43 | 2019-08-12 04:01:50 estimates: for confirming within 12 blocks based on 100/100 samples, fee=0.00015448 BTC/kB, prio=2.81602e+08 44 | 2019-08-12 04:01:50 estimates: for confirming within 13 blocks based on 100/100 samples, fee=0.00014000 BTC/kB, prio=2.35824e+08 45 | 2019-08-12 04:01:50 estimates: for confirming within 14 blocks based on 100/100 samples, fee=0.00012325 BTC/kB, prio=2.06678e+08 46 | 2019-08-12 04:01:50 estimates: for confirming within 15 blocks based on 100/100 samples, fee=0.00010591 BTC/kB, prio=1.77602e+08 47 | 2019-08-12 04:01:50 estimates: for confirming within 16 blocks based on 100/100 samples, fee=0.00010052 BTC/kB, prio=1.56175e+08 48 | 2019-08-12 04:01:50 estimates: for confirming within 17 blocks based on 100/100 samples, fee=0.00009416 BTC/kB, prio=1.3558e+08 49 | 2019-08-12 04:01:50 estimates: for confirming within 18 blocks based on 100/100 samples, fee=0.00009040 BTC/kB, prio=1.18255e+08 50 | 2019-08-12 04:01:50 estimates: for confirming within 19 blocks based on 100/100 samples, fee=0.00008451 BTC/kB, prio=1.05245e+08 51 | 2019-08-12 04:01:50 estimates: for confirming within 20 blocks based on 100/100 samples, fee=0.00007869 BTC/kB, prio=9.38046e+07 52 | 2019-08-12 04:01:50 estimates: for confirming within 21 blocks based on 100/100 samples, fee=0.00006589 BTC/kB, prio=8.38128e+07 53 | 2019-08-12 04:01:50 estimates: for confirming within 22 blocks based on 100/100 samples, fee=0.00006026 BTC/kB, prio=7.46516e+07 54 | 2019-08-12 04:01:50 estimates: for confirming within 23 blocks based on 100/100 samples, fee=0.00005436 BTC/kB, prio=6.63551e+07 55 | 2019-08-12 04:01:50 estimates: for confirming within 24 blocks based on 100/100 samples, fee=0.00005044 BTC/kB, prio=6.05811e+07 56 | 2019-08-12 04:01:50 UpdateTip: new best=00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d height=589733 log2_work=90.954156 tx=444177421 date=2019-08-12 04:01:32 progress=1.000000 cache=23091 57 | 2019-08-12 04:01:50 - Connect postprocess: 10.45ms [52.47s] 58 | 2019-08-12 04:01:50 - Connect block: 1256.89ms [3023.38s] 59 | 2019-08-12 04:01:50 Committing 22929 changed transactions (out of 23091) to coin database... 60 | -------------------------------------------------------------------------------- /bmon/testdata/logs_connectblock_basic.txt: -------------------------------------------------------------------------------- 1 | 2019-07-29T18:34:17Z - Load block from disk: 0.00ms [23.45s] 2 | 2019-07-29T18:34:17Z - Sanity checks: 0.01ms [17.24s (18.07ms/blk)] 3 | 2019-07-29T18:34:17Z - Fork checks: 0.04ms [0.09s (0.10ms/blk)] 4 | 2019-07-29T18:34:17Z - Connect 1982 transactions: 41.16ms (0.021ms/tx, 0.008ms/txin) [154.90s (162.37ms/blk)] 5 | 2019-07-29T18:34:17Z - Verify 4917 txins: 41.23ms (0.008ms/txin) [177.91s (186.49ms/blk)] 6 | 2019-07-29T18:34:17Z - Index writing: 13.62ms [13.08s (13.71ms/blk)] 7 | 2019-07-29T18:34:17Z - Callbacks: 0.04ms [0.05s (0.05ms/blk)] 8 | 2019-07-29T18:34:17Z - Connect total: 55.33ms [208.93s (219.00ms/blk)] 9 | 2019-07-29T18:34:17Z - Flush: 10.58ms [104.30s (109.33ms/blk)] 10 | 2019-07-29T18:34:17Z - Writing chainstate: 0.09ms [0.10s (0.10ms/blk)] 11 | 2019-08-09T16:28:42Z UpdateTip: new best=00000000000000000001d80d14ee4400b6d9c851debe27e6777f3876edd4ad1e height=589349 version=0x20800000 log2_work=90.944215 tx=443429260 date='2019-08-09T16:27:43Z' progress=1.000000 cache=8.7MiB(64093txo) warning='44 of last 100 blocks have unexpected version' 12 | 2019-07-29T18:34:17Z - Connect postprocess: 70.64ms [8.14s (8.53ms/blk)] 13 | 2019-07-29T18:34:40Z - Connect block: 136.63ms [344.92s (361.55ms/blk)] 14 | 15 | # Second message (without block warning) 16 | 17 | 2019-07-29T18:34:17Z - Load block from disk: 0.00ms [23.45s] 18 | 2019-07-29T18:34:17Z - Sanity checks: 0.01ms [17.24s (18.07ms/blk)] 19 | 2019-07-29T18:34:17Z - Fork checks: 0.04ms [0.09s (0.10ms/blk)] 20 | 2019-07-29T18:34:17Z - Connect 1982 transactions: 41.16ms (0.021ms/tx, 0.008ms/txin) [154.90s (162.37ms/blk)] 21 | 2019-07-29T18:34:17Z - Verify 4917 txins: 41.23ms (0.008ms/txin) [177.91s (186.49ms/blk)] 22 | 2019-07-29T18:34:17Z - Index writing: 13.62ms [13.08s (13.71ms/blk)] 23 | 2019-07-29T18:34:17Z - Callbacks: 0.04ms [0.05s (0.05ms/blk)] 24 | 2019-07-29T18:34:17Z - Connect total: 55.33ms [208.93s (219.00ms/blk)] 25 | 2019-07-29T18:34:17Z - Flush: 10.58ms [104.30s (109.33ms/blk)] 26 | 2019-07-29T18:34:17Z - Writing chainstate: 0.09ms [0.10s (0.10ms/blk)] 27 | 2019-08-09T16:28:42Z UpdateTip: new best=00000000000000000001d80d14ee4400b6d9c851debe27e6777f3876edd4ad1e height=589349 version=0x20800000 log2_work=90.944215 tx=443429260 date='2019-08-09T16:27:43Z' progress=1.000000 cache=8.7MiB(64093txo) 28 | 2019-07-29T18:34:17Z - Connect postprocess: 70.64ms [8.14s (8.53ms/blk)] 29 | 2019-07-29T18:34:40Z - Connect block: 136.63ms [344.92s (361.55ms/blk)] 30 | 31 | # incomplete connect block follows: 32 | 33 | 2019-07-29T18:34:17Z - Load block from disk: 0.00ms [23.45s] 34 | 2019-07-29T18:34:17Z - Sanity checks: 0.01ms [17.24s (18.07ms/blk)] 35 | 2019-07-29T18:34:17Z - Fork checks: 0.04ms [0.09s (0.10ms/blk)] 36 | 2019-07-29T18:34:17Z - Connect 1982 transactions: 41.16ms (0.021ms/tx, 0.008ms/txin) [154.90s (162.37ms/blk)] 37 | 2019-07-29T18:34:17Z - Verify 4917 txins: 41.23ms (0.008ms/txin) [177.91s (186.49ms/blk)] 38 | -------------------------------------------------------------------------------- /bmon/testdata/logs_gotblock_013.txt: -------------------------------------------------------------------------------- 1 | 2019-08-12 04:01:45.275508 received: cmpctblock (12405 bytes) peer=218 2 | 2019-08-12 04:01:45.278101 Initialized PartiallyDownloadedBlock for block 00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d using a cmpctblock of size 12405 3 | 2019-08-12 04:01:45.278367 received: blocktxn (33 bytes) peer=218 4 | 2019-08-12 04:01:45.297276 Successfully reconstructed block 00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d with 1 txn prefilled, 1995 txn from mempool and 0 txn requested 5 | 2019-08-12 04:01:45.334588 - Load block from disk: 0.00ms [5.36s] 6 | 2019-08-12 04:01:45.334698 - Sanity checks: 0.01ms [0.64s] 7 | 2019-08-12 04:01:45.334962 - Fork checks: 0.26ms [0.66s] 8 | 2019-08-12 04:01:45.424408 - Connect 1996 transactions: 89.42ms (0.045ms/tx, 0.022ms/txin) [761.45s] 9 | 2019-08-12 04:01:45.451806 - Verify 4055 txins: 116.82ms (0.029ms/txin) [836.39s] 10 | 2019-08-12 04:01:45.459047 - Index writing: 7.26ms [28.87s] 11 | 2019-08-12 04:01:45.459114 - Callbacks: 0.08ms [0.49s] 12 | 2019-08-12 04:01:45.459416 - Connect total: 124.85ms [869.60s] 13 | 2019-08-12 04:01:45.474767 - Flush: 15.33ms [30.29s] 14 | 2019-08-12 04:01:45.475437 - Writing chainstate: 0.69ms [7.81s] 15 | 2019-08-12 04:01:45.508583 Blockpolicy recalculating dynamic cutoffs: 16 | 2019-08-12 04:01:45.508672 2: For conf success > 0.95 need Priority >: -1 from buckets 2.1e+24 - 2.1e+24 Cur Bucket stats 2.70% 0.0/(1.3+0 mempool) 17 | 2019-08-12 04:01:45.508720 2: For conf success > 0.95 need FeeRate >: 68912 from buckets 72890.5 - 72890.5 Cur Bucket stats 85.21% 16739.1/(19644.9+0 mempool 18 | ) 19 | 2019-08-12 04:01:45.508759 10: For conf success < 0.50 need Priority <: -1 from buckets 5.76e+07 - 5.76e+07 Cur Bucket stats 47.29% 0.6/(1.3+0 mempool) 20 | 2019-08-12 04:01:45.508805 10: For conf success < 0.50 need FeeRate <: -1 from buckets 1000 - 1000 Cur Bucket stats 83.66% 3161.8/(3779.3+0 mempool) 21 | 2019-08-12 04:01:45.509021 Blockpolicy after updating estimates for 1995 confirmed entries, new mempool map size 36 22 | 2019-08-12 04:01:45.510024 UpdateTip: new best=00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d height=589733 version=0x20c00000 log2_work=90.954156 tx=4441 77421 date='2019-08-12 04:01:32' progress=1.000000 cache=48.1MiB(24602tx) warning='42 of last 100 blocks have unexpected version' 23 | 2019-08-12 04:01:45.514064 - Connect postprocess: 38.62ms [142.65s] 24 | 2019-08-12 04:01:45.514109 - Connect block: 179.49ms [1055.72s] 25 | 2019-08-12 04:01:45.518217 received: cmpctblock (12405 bytes) peer=378 26 | 2019-08-12 04:01:45.518533 received: cmpctblock (12405 bytes) peer=481 27 | 2019-08-12 04:01:45.518686 SendMessages: sending header 00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d to peer=697 28 | 2019-08-12 04:01:45.518720 sending: headers (82 bytes) peer=697 29 | 2019-08-12 04:01:45.518827 SendMessages: sending header 00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d to peer=698 30 | 2019-08-12 04:01:45.518859 sending: headers (82 bytes) peer=698 31 | 2019-08-12 04:01:45.519073 received: cmpctblock (12405 bytes) peer=4 32 | 2019-08-12 04:01:45.519300 SendMessages: sending header 00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d to peer=5 33 | 2019-08-12 04:01:45.519346 sending: headers (82 bytes) peer=5 34 | 2019-08-12 04:01:45.519532 received: cmpctblock (12405 bytes) peer=194 35 | 2019-08-12 04:01:45.519684 received: inv (217 bytes) peer=378 36 | -------------------------------------------------------------------------------- /bmon/testdata/logs_gotblock_018.txt: -------------------------------------------------------------------------------- 1 | 2019-08-12T04:01:44Z [msghand] received: inv (289 bytes) peer=4476 2 | 2019-08-12T04:01:44Z [msghand] got inv: tx f7fb920201577760035303dad9c96c22c07e69f2a285b13b1772af38755c1562 have peer=4476 3 | 2019-08-12T04:01:44Z [msghand] got inv: tx 522d5ae8ce124b02d7e8853606347f1566227fd4beb768b6c1d582c46dea78ec have peer=4476 4 | 2019-08-12T04:01:45Z [msghand] received: cmpctblock (12405 bytes) peer=4349 5 | 2019-08-12T04:01:45Z [msghand] Initialized PartiallyDownloadedBlock for block 00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d using a cmpctblock of size 12405 6 | 2019-08-12T04:01:45Z [msghand] received: blocktxn (33 bytes) peer=4349 7 | 2019-08-12T04:01:45Z [msghand] Successfully reconstructed block 00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d with 1 txn prefilled, 1995 txn from mempool (incl at least 0 from extra pool) and 0 txn requested 8 | 2019-08-12T04:01:45Z [msghand] - Load block from disk: 0.00ms [5025.72s] 9 | 2019-08-12T04:01:45Z [msghand] - Sanity checks: 0.01ms [1151.15s (1.95ms/blk)] 10 | 2019-08-12T04:01:45Z [msghand] - Fork checks: 0.05ms [71.55s (0.12ms/blk)] 11 | 2019-08-12T04:01:45Z [msghand] - Connect 1996 transactions: 34.40ms (0.017ms/tx, 0.008ms/txin) [69965.68s (118.64ms/blk)] 12 | 2019-08-12T04:01:45Z [msghand] - Verify 4055 txins: 34.62ms (0.009ms/txin) [71067.69s (120.51ms/blk)] 13 | 2019-08-12T04:01:45Z [msghand] - Index writing: 10.83ms [1710.73s (2.90ms/blk)] 14 | 2019-08-12T04:01:45Z [msghand] - Callbacks: 0.09ms [19.32s (0.03ms/blk)] 15 | 2019-08-12T04:01:45Z [msghand] - Connect total: 46.46ms [74163.86s (125.76ms/blk)] 16 | 2019-08-12T04:01:45Z [msghand] - Flush: 9.12ms [2394.55s (4.06ms/blk)] 17 | 2019-08-12T04:01:45Z [msghand] - Writing chainstate: 0.10ms [43.56s (0.07ms/blk)] 18 | 2019-08-12T04:01:45Z [msghand] Blockpolicy estimates updated by 1204 of 1995 block txs, since last block 860 of 1265 tracked, mempool map size 28, max target 1008 from current 19 | 2019-08-12T04:01:45Z [msghand] UpdateTip: new best=00000000000000000010e1543aa317eb5e34148afda9b9da10edbdd9cb8a1c8d height=589733 version=0x20c00000 log2_work=90.954156 tx=444177421 date='2019-08-12T04:01:32Z' progress=1.000000 cache=574.9MiB(79670txo) warning='42 of last 100 blocks have unexpected version' 20 | 2019-08-12T04:01:45Z [msghand] - Connect postprocess: 56.13ms [906.15s (1.54ms/blk)] 21 | 2019-08-12T04:01:45Z [msghand] - Connect block: 111.80ms [82533.83s (139.95ms/blk)] 22 | 2019-08-12T04:01:45Z [msghand] received: cmpctblock (12405 bytes) peer=4360 23 | 2019-08-12T04:01:45Z [msghand] received: headers (82 bytes) peer=4476 24 | -------------------------------------------------------------------------------- /bmon/testdata/mempool-accepts-log.txt: -------------------------------------------------------------------------------- 1 | 2022-11-03T16:59:14.909503Z [msghand] AcceptToMemoryPool: peer=12: accepted c438cfcc7381fee23be3258e883f141d791b351f53060125786419dcb7b2d4a8 (poolsz 3931 txn, 9408 kB) 2 | 2022-11-03T16:59:14.914051Z [msghand] AcceptToMemoryPool: peer=12: accepted a5e9a1e541acb8f728f94a7d437d8791a4fc00ab047714ab8cee78d0f68bea41 (poolsz 3932 txn, 9410 kB) 3 | 2022-11-03T16:59:14.916397Z [msghand] AcceptToMemoryPool: peer=12: accepted 13739f3ab199c052f9e3567f8c806f5ab58e4e5289b93fb5091ff2182f280351 (poolsz 3933 txn, 9411 kB) 4 | 2022-11-03T16:59:14.918273Z [msghand] AcceptToMemoryPool: peer=12: accepted 94b8475ff53d6e6fe2bccec59fab9765a32f7161a92c0344ea87c02d9012e1fb (poolsz 3934 txn, 9413 kB) 5 | 2022-11-03T16:59:14.919882Z [msghand] AcceptToMemoryPool: peer=12: accepted 0655c01d4da1917b53063066bff3a5137ebb721d9656999b1be417061fa834e9 (poolsz 3935 txn, 9414 kB) 6 | 2022-11-03T16:59:15.768394Z [msghand] AcceptToMemoryPool: peer=5: accepted 3278c3ea101386a0d1b04866bead7756be22fe802a4a631a48dcb8594a0ddc9e (poolsz 3936 txn, 9416 kB) 7 | 2022-11-03T16:59:15.789064Z [msghand] AcceptToMemoryPool: peer=7: accepted f928138059e6717ba73f12825093e9fc3ed25f10a4763d0b5175d479d780e506 (poolsz 3937 txn, 9417 kB) 8 | 2022-11-03T16:59:15.790152Z [msghand] AcceptToMemoryPool: peer=7: accepted 75a3b627965c42960cd8718d941f316640f88819bdbddb10c5418f839f013ffc (poolsz 3938 txn, 9418 kB) 9 | 2022-11-03T16:59:15.793392Z [msghand] AcceptToMemoryPool: peer=7: accepted 91bb08ccf47b973eb3092eafc15b61fc2d0605c614a0ff4b68273ad05ffd5977 (poolsz 3939 txn, 9420 kB) 10 | 2022-11-03T16:59:15.797664Z [msghand] AcceptToMemoryPool: peer=7: accepted 1750b96b1540c34cd6415e40d10f2507bbe71e103a6ebfc3f488411c18860bc3 (poolsz 3940 txn, 9421 kB) 11 | 2022-11-03T16:59:17.182110Z [msghand] AcceptToMemoryPool: peer=6: accepted e0a0fd072cf83913337d67c4d832a03364a56309ea4abac1d9299a746e1aca77 (poolsz 3941 txn, 9422 kB) 12 | 2022-11-03T16:59:17.183177Z [msghand] AcceptToMemoryPool: peer=6: accepted 41f50487093ecb728d7661c40714a83c3eb357b48f8cf541bf7df83e28347444 (poolsz 3942 txn, 9423 kB) 13 | 2022-11-03T16:59:17.184227Z [msghand] AcceptToMemoryPool: peer=6: accepted d66c9f30129c12aaac366c2992d67186dd786329d98e05e381c4fb2cf9c1c341 (poolsz 3943 txn, 9424 kB) 14 | 2022-11-03T16:59:17.185284Z [msghand] AcceptToMemoryPool: peer=6: accepted 386d2951746b707a0b7348df0902db0265e6c2eb690828aaa617f485051f9643 (poolsz 3944 txn, 9425 kB) 15 | 2022-11-03T16:59:17.186384Z [msghand] AcceptToMemoryPool: peer=6: accepted 3291a231f954ff550ff84c8e3e5540fd9a92ca8f6c7fbcfc446b8202b5947f0d (poolsz 3945 txn, 9427 kB) 16 | 2022-11-03T16:59:17.188832Z [msghand] AcceptToMemoryPool: peer=6: accepted 1fd640910afb19578b0ecdb389a168945be27e94d0cbd3839e53b0a8f13fd833 (poolsz 3946 txn, 9428 kB) 17 | 2022-11-03T16:59:17.189891Z [msghand] AcceptToMemoryPool: peer=6: accepted ee9b55c6450fcf20a2af1a36ed0b3dd061741617d0f97810177bed4b4c09d6ab (poolsz 3947 txn, 9429 kB) 18 | 2022-11-03T16:59:17.191168Z [msghand] AcceptToMemoryPool: peer=6: accepted 28f59c599f8993a7d2c97cbe7bda8498d27d6e906ad1e7c55e569b3c41c27c91 (poolsz 3948 txn, 9430 kB) 19 | 2022-11-03T16:59:17.192006Z [msghand] AcceptToMemoryPool: peer=6: accepted de8b9efd6ae1947bb1ab5244851da74e9a08adbb9a11fdb376b90db649353eee (poolsz 3949 txn, 9431 kB) 20 | 2022-11-03T16:59:17.192898Z [msghand] AcceptToMemoryPool: peer=6: accepted b4306e47125a2c0c9002e5a363c4617b8f0a462044d5006adb220e616d954c9b (poolsz 3950 txn, 9432 kB) 21 | 2022-11-03T16:59:17.194223Z [msghand] AcceptToMemoryPool: peer=6: accepted 4fcd35e62d6201979ae808876425f52c6c561d1a097c11f6c18dccfa4078410d (poolsz 3951 txn, 9433 kB) 22 | 2022-11-03T16:59:17.195500Z [msghand] AcceptToMemoryPool: peer=6: accepted 36450e5b809365d337e41801cd22bfee5213f77231da29b5dbb2073200ec5124 (poolsz 3952 txn, 9435 kB) 23 | 2022-11-03T16:59:17.196602Z [msghand] AcceptToMemoryPool: peer=6: accepted 9a31e4e117d0422828d548e5886e44bd9458389a75b3aee58151e409863199b5 (poolsz 3953 txn, 9436 kB) 24 | 2022-11-03T16:59:17.205068Z [msghand] AcceptToMemoryPool: peer=6: accepted 2be95495d1ec483db211fb508fba407ecae42ae8a967d35ee943ae86a83608ec (poolsz 3954 txn, 9441 kB) 25 | 2022-11-03T16:59:17.206782Z [msghand] AcceptToMemoryPool: peer=6: accepted c0c1c6a2c17171de5c8d5a94118c544f605713af01a1076c1450ac133704ac9a (poolsz 3955 txn, 9442 kB) 26 | 2022-11-03T16:59:17.208477Z [msghand] AcceptToMemoryPool: peer=6: accepted ed784c8aaf5dff78085fa2e2d0fa48460ae734924e0ef236481e30691d5242d6 (poolsz 3955 txn, 9442 kB) 27 | 2022-11-03T16:59:17.212720Z [msghand] AcceptToMemoryPool: peer=6: accepted fce9c4c5496c98a898708cd27ea329a2a0f1c982c14557b9b4d0fb89692230af (poolsz 3956 txn, 9444 kB) 28 | 2022-11-03T16:59:17.216855Z [msghand] AcceptToMemoryPool: peer=6: accepted 9c1c198f54422e71d6e09c1521aed1de8cadac2b57310f617217e40a27641934 (poolsz 3957 txn, 9446 kB) 29 | 2022-11-03T16:59:17.220674Z [msghand] AcceptToMemoryPool: peer=6: accepted e4a1040351b33be34379e287220b03e2c8dc3288d27a865cfca9181ab580db78 (poolsz 3958 txn, 9447 kB) 30 | 2022-11-03T16:59:17.224652Z [msghand] AcceptToMemoryPool: peer=6: accepted 774d0ed70f53fbcac104b08e6b2994ebbb3bfec8666552c58430b9c2556f2a10 (poolsz 3959 txn, 9449 kB) 31 | 2022-11-03T16:59:17.228725Z [msghand] AcceptToMemoryPool: peer=6: accepted e364fe928c538a5bd50c8b1bde27ba357c4248f7e395650db63c532a0dabce8f (poolsz 3960 txn, 9450 kB) 32 | 2022-11-03T16:59:18.630085Z [msghand] AcceptToMemoryPool: peer=6: accepted 19f1261e64944c3ddecdc5d6606155ec42314fa5c9f3a7cb23f391dae31abe36 (poolsz 3961 txn, 9452 kB) 33 | 2022-11-03T16:59:18.632767Z [msghand] AcceptToMemoryPool: peer=6: accepted 9cbd84b3170d41293611db7b4eb2d793e5d4be6b906748274324b20084b50f2a (poolsz 3962 txn, 9453 kB) 34 | 2022-11-03T16:59:18.634481Z [msghand] AcceptToMemoryPool: peer=6: accepted d470d9fc10d4269555afad6212b88ad5f80e9dd30ffabc144a1ff45b58d736d9 (poolsz 3963 txn, 9455 kB) 35 | 2022-11-03T16:59:18.635717Z [msghand] AcceptToMemoryPool: peer=8: accepted 1061e84be4fbc833e02ce1fe7a0239409a6d730ea009ffdc884d6f1da74038a9 (poolsz 3963 txn, 9456 kB) 36 | 2022-11-03T16:59:18.636881Z [msghand] AcceptToMemoryPool: peer=6: accepted eb4c788008b335918e1e9cb347b0f6ed977481d2b2bba9f99e3c0d450d7044fd (poolsz 3964 txn, 9457 kB) 37 | 2022-11-03T16:59:18.638110Z [msghand] AcceptToMemoryPool: peer=8: accepted e63105299ed3d081a7e3e45f51ca698d4ac7d9dcf0add77da854a053d7d191c1 (poolsz 3965 txn, 9458 kB) 38 | 2022-11-03T16:59:18.642054Z [msghand] AcceptToMemoryPool: peer=6: accepted 285331d1a9a5b96a16f57fee57b82753b849123b624bdbb9a012dc8bfda765bb (poolsz 3966 txn, 9461 kB) 39 | 2022-11-03T16:59:18.643420Z [msghand] AcceptToMemoryPool: peer=8: accepted bfc0fdae816e16a29283d8e99d3e67830fff8629f522ab1f819a7594730a0e93 (poolsz 3967 txn, 9462 kB) 40 | 2022-11-03T16:59:18.648750Z [msghand] AcceptToMemoryPool: peer=6: accepted 806520d34a8dbbdd5ad58dd6488455794f0be28f8faebef5e2893db9d9e980dc (poolsz 3968 txn, 9464 kB) 41 | 2022-11-03T16:59:18.651329Z [msghand] AcceptToMemoryPool: peer=8: accepted 0bba42324bd35124a37b033af0de6057e06d6930b2d979aa09a305e112962e40 (poolsz 3969 txn, 9465 kB) 42 | 2022-11-03T16:59:18.656199Z [msghand] AcceptToMemoryPool: peer=6: accepted acdab1983f288491661069d66c6dc59d29c3be6c08754442b58e8aed693c7348 (poolsz 3970 txn, 9467 kB) 43 | 2022-11-03T16:59:18.660896Z [msghand] AcceptToMemoryPool: peer=6: accepted 1d748dd073071507b551072b2aa553d07e72a1323d0241df8039f04d64967918 (poolsz 3971 txn, 9468 kB) 44 | 2022-11-03T16:59:18.665548Z [msghand] AcceptToMemoryPool: peer=6: accepted c6ea1c81fbaa660e87b26bc447a07b748c1e597f711b26eb6cb909fd3c0313f2 (poolsz 3972 txn, 9470 kB) 45 | 2022-11-03T16:59:18.670574Z [msghand] AcceptToMemoryPool: peer=6: accepted 60e3e32f6071f98c6628f4ea733824106c8e80bd3b429b8df71ea5619f75b526 (poolsz 3973 txn, 9472 kB) 46 | 2022-11-03T16:59:18.675701Z [msghand] AcceptToMemoryPool: peer=6: accepted dfd5cf057ef33d466c8fed60129204e0f8cb73765bd183cc7d414b4664780f00 (poolsz 3974 txn, 9473 kB) 47 | 2022-11-03T16:59:19.494734Z [msghand] AcceptToMemoryPool: peer=8: accepted c8c7808cfe366a4d70010b8b0fc4ee508fbbebd20823c3d85165bb191ae09fca (poolsz 3975 txn, 9475 kB) 48 | 2022-11-03T16:59:19.510679Z [msghand] AcceptToMemoryPool: peer=2: accepted c9949bef73ad48b8ba362f53400d00198f6a463f1a879afa005387e0bb1e29c4 (poolsz 3976 txn, 9476 kB) 49 | 2022-11-03T16:59:19.511016Z [msghand] AcceptToMemoryPool: peer=2: accepted 85c2df886983d30a1e542d2344d609e332ee2aa818f542bb4a45266d15445be8 (poolsz 3977 txn, 9477 kB) 50 | 2022-11-03T16:59:20.228992Z [msghand] AcceptToMemoryPool: peer=8: accepted 40b315d2f40c0fdbb2a3dff2c8f0bb8c031afcc70af0fdeda49d07f400a95d3b (poolsz 3978 txn, 9478 kB) 51 | -------------------------------------------------------------------------------- /bmon/testdata/new-header.log: -------------------------------------------------------------------------------- 1 | 2023-05-16T05:49:52.202998Z [msghand] Saw new header hash=00000000000000000004b2aea48da395156297a536ca62c2c53d25e14225f72a height=789959 2 | 2023-05-16T05:49:52.203009Z [msghand] [net] Saw new cmpctblock header hash=00000000000000000004b2aea48da395156297a536ca62c2c53d25e14225f72a peer=46 3 | 2023-05-16T05:49:52.210637Z [msghand] [cmpctblock] Initialized PartiallyDownloadedBlock for block 00000000000000000004b2aea48da395156297a536ca62c2c53d25e14225f72a using a cmpctblock of size 27459 4 | 2023-05-16T05:49:52.210744Z [msghand] [net] received: blocktxn (33 bytes) peer=46 5 | 2023-05-16T05:49:52.213277Z [msghand] [cmpctblock] Successfully reconstructed block 00000000000000000004b2aea48da395156297a536ca62c2c53d25e14225f72a with 1 txn prefilled, 4529 txn from mempool (incl at least 4 from extra pool) and 0 txn requested 6 | 2023-05-16T05:49:52.214496Z [msghand] [validation] NewPoWValidBlock: block hash=00000000000000000004b2aea48da395156297a536ca62c2c53d25e14225f72a 7 | 2023-05-16T05:49:52.216604Z [msghand] [net] PeerManager::NewPoWValidBlock sending header-and-ids 00000000000000000004b2aea48da395156297a536ca62c2c53d25e14225f72a to peer=36 8 | 2023-05-16T05:49:52.216920Z [msghand] [net] sending cmpctblock (27459 bytes) peer=36 9 | 2023-05-16T05:49:52.217106Z [msghand] [net] PeerManager::NewPoWValidBlock sending header-and-ids 00000000000000000004b2aea48da395156297a536ca62c2c53d25e14225f72a to peer=43 10 | 2023-05-16T05:49:52.217116Z [msghand] [net] sending cmpctblock (27459 bytes) peer=43 11 | 2023-05-16T05:49:52.224032Z [msghand] [bench] - Using cached block 12 | 2023-05-16T05:49:52.224053Z [msghand] [bench] - Load block from disk: 0.02ms [0.00s (0.02ms/blk)] 13 | 2023-05-16T05:49:52.224069Z [msghand] [bench] - Sanity checks: 0.00ms [0.00s (0.00ms/blk)] 14 | 2023-05-16T05:49:52.224080Z [msghand] [bench] - Fork checks: 0.01ms [0.00s (0.01ms/blk)] 15 | 2023-05-16T05:49:52.235159Z [msghand] [bench] - Connect 4530 transactions: 11.07ms (0.002ms/tx, 0.002ms/txin) [0.14s (13.06ms/blk)] 16 | 2023-05-16T05:49:52.235176Z [msghand] [bench] - Verify 5434 txins: 11.09ms (0.002ms/txin) [0.15s (13.51ms/blk)] 17 | 2023-05-16T05:49:52.238731Z [msghand] [bench] - Write undo data: 3.55ms [0.04s (3.67ms/blk)] 18 | 2023-05-16T05:49:52.238742Z [msghand] [bench] - Index writing: 0.01ms [0.00s (0.01ms/blk)] 19 | 2023-05-16T05:49:52.239139Z [msghand] [validation] BlockChecked: block hash=00000000000000000004b2aea48da395156297a536ca62c2c53d25e14225f72a state=Valid 20 | 2023-05-16T05:49:52.239153Z [msghand] [bench] - Connect total: 15.11ms [0.19s (17.61ms/blk)] 21 | 2023-05-16T05:49:52.243699Z [msghand] [bench] - Flush: 4.55ms [0.06s (5.79ms/blk)] 22 | 2023-05-16T05:49:52.243712Z [msghand] [bench] - Writing chainstate: 0.01ms [0.00s (0.02ms/blk)] 23 | 2023-05-16T05:49:52.249227Z [msghand] [validation] Enqueuing TransactionRemovedFromMempool: txid=8c2c557deb498b554b4ed5316a2f45975314079fe4e230f2a148bf770ebd336d wtxid=2e8a26ac7a33e3712a02d5d20b5486411862d349c1aa351a01f0805f84595890 reason=conflict 24 | 2023-05-16T05:49:52.249309Z [scheduler] [validation] TransactionRemovedFromMempool: txid=8c2c557deb498b554b4ed5316a2f45975314079fe4e230f2a148bf770ebd336d wtxid=2e8a26ac7a33e3712a02d5d20b5486411862d349c1aa351a01f0805f84595890 reason=conflict 25 | 2023-05-16T05:49:52.264753Z [msghand] [validation] Enqueuing TransactionRemovedFromMempool: txid=0b5a140531f0e45ec3b725c02e9cb58f00a4b1b48a66aeebc7bf6c125f0d6acc wtxid=c92796f548ac07ef26be5e983a76dabb78be8e6d119ada787d58e1b53cf3fa59 reason=conflict 26 | 2023-05-16T05:49:52.264842Z [scheduler] [validation] TransactionRemovedFromMempool: txid=0b5a140531f0e45ec3b725c02e9cb58f00a4b1b48a66aeebc7bf6c125f0d6acc wtxid=c92796f548ac07ef26be5e983a76dabb78be8e6d119ada787d58e1b53cf3fa59 reason=conflict 27 | 2023-05-16T05:49:52.264918Z [msghand] [validation] Enqueuing TransactionRemovedFromMempool: txid=8a8a22707c8e249007ecb5e71d85c23a0c7de07a9dadc9902a96f4d566fb8ab1 wtxid=f6de5f230bfa0ae8cbc2f599eebf3cbebd2370a8fd5e14bd1470648942baec6c reason=conflict 28 | 2023-05-16T05:49:52.264988Z [scheduler] [validation] TransactionRemovedFromMempool: txid=8a8a22707c8e249007ecb5e71d85c23a0c7de07a9dadc9902a96f4d566fb8ab1 wtxid=f6de5f230bfa0ae8cbc2f599eebf3cbebd2370a8fd5e14bd1470648942baec6c reason=conflict 29 | 2023-05-16T05:49:52.269840Z [msghand] UpdateTip: new best=00000000000000000004b2aea48da395156297a536ca62c2c53d25e14225f72a height=789959 version=0x333d4000 log2_work=94.182447 tx=838636104 date='2023-05-16T05:49:03Z' progress=1.000000 cache=76.4MiB(626588txo) 30 | -------------------------------------------------------------------------------- /bmon/urls.py: -------------------------------------------------------------------------------- 1 | """bmon URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/4.1/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | from django.contrib import admin 17 | from django.conf import settings 18 | from django.urls import path 19 | from django.conf.urls.static import static 20 | 21 | from .views import main, headertotip 22 | from .views_api import api 23 | 24 | urlpatterns = [ 25 | path('admin/', admin.site.urls), 26 | path("api/", api.urls), 27 | path("tips", headertotip), 28 | path("", main), 29 | ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) 30 | -------------------------------------------------------------------------------- /bmon/util.py: -------------------------------------------------------------------------------- 1 | import decimal 2 | import json 3 | import re 4 | import cProfile 5 | import time 6 | import pstats 7 | import huey 8 | import http.client 9 | import urllib 10 | import os 11 | import logging 12 | from collections import Counter 13 | 14 | from django.db import models 15 | from django.db.models.sql.query import Query 16 | 17 | from . import server_tasks 18 | 19 | log = logging.getLogger(__name__) 20 | 21 | 22 | class DecimalEncoder(json.JSONEncoder): 23 | def default(self, o): 24 | if isinstance(o, decimal.Decimal): 25 | return str(o) 26 | return super(DecimalEncoder, self).default(o) 27 | 28 | 29 | def json_dumps(*args, **kwargs): 30 | """Handles serialization of decimals.""" 31 | kwargs.setdefault("cls", DecimalEncoder) 32 | return json.dumps(*args, **kwargs) 33 | 34 | 35 | json_loads = json.loads 36 | 37 | 38 | def print_sql(q: models.QuerySet | Query): 39 | from pygments import highlight 40 | from pygments.formatters import TerminalFormatter 41 | from pygments.lexers import PostgresLexer 42 | from sqlparse import format 43 | 44 | """Prettyprint a Django queryset.""" 45 | if hasattr(q, "q"): 46 | q = q.query # type: ignore 47 | formatted = format(str(q), reindent=True) 48 | print(highlight(formatted, PostgresLexer(), TerminalFormatter())) 49 | 50 | 51 | def profile(cmd): 52 | cProfile.run(cmd, "stats") 53 | p = pstats.Stats("stats") 54 | p.sort_stats(pstats.SortKey.CUMULATIVE).print_stats(30) 55 | 56 | 57 | def exec_tasks(n, huey_instance): 58 | for _ in range(n): 59 | t = time.time() 60 | task = huey_instance.dequeue() 61 | print("executing task %s" % task) 62 | assert task 63 | task.execute() 64 | print(" took %s" % (time.time() - t)) 65 | 66 | 67 | def exec_mempool_tasks(n): 68 | return exec_tasks(n, server_tasks.mempool_q) 69 | 70 | 71 | def _count_tasks(q) -> Counter: 72 | p = re.compile(rb"bmon\.[a-zA-Z_\.]+") 73 | 74 | def search(msg): 75 | m = p.search(msg) 76 | assert m 77 | return m 78 | 79 | return Counter(search(msg).group().decode() for msg in q.storage.enqueued_items()) 80 | 81 | 82 | def get_task_counts(): 83 | """ 84 | XXX THIS CAN BE SLOW! 85 | """ 86 | counts = {} 87 | counts.update(dict(_count_tasks(server_tasks.mempool_q))) 88 | counts.update(dict(_count_tasks(server_tasks.server_q))) 89 | return counts 90 | 91 | 92 | def get_task_counts_fast(): 93 | return { 94 | 'mempool_q': len(server_tasks.mempool_q), 95 | 'server_q': len(server_tasks.server_q), 96 | } 97 | 98 | 99 | def count_tasks(): 100 | print(_count_tasks(server_tasks.mempool_q)) 101 | print(_count_tasks(server_tasks.server_q)) 102 | 103 | 104 | def remove_mempool_events(q: huey.RedisHuey): 105 | clean_queue(q, "Mempool") 106 | clean_queue(q, "Pong") 107 | 108 | 109 | def pushover_notification(msg: str) -> bool: 110 | token = os.environ.get("PUSHOVER_TOKEN") 111 | 112 | if not token: 113 | log.error("no pushover token configured") 114 | return False 115 | 116 | try: 117 | conn = http.client.HTTPSConnection("api.pushover.net:443") 118 | conn.request( 119 | "POST", 120 | "/1/messages.json", 121 | urllib.parse.urlencode( 122 | { 123 | "token": token, 124 | "user": os.environ.get("PUSHOVER_USER"), 125 | "message": msg, 126 | } 127 | ), 128 | {"Content-type": "application/x-www-form-urlencoded"}, 129 | ) 130 | resp = conn.getresponse() 131 | if resp.status != 200: 132 | log.error("pushover request failed", extra={"response": resp, "msg": msg}) 133 | return False 134 | except Exception: 135 | log.exception("pushover request failed") 136 | return False 137 | return True 138 | 139 | 140 | def clean_queue(q: huey.RedisHuey, filter_str: str): 141 | num_exs = 0 142 | re = 0 143 | processed = 0 144 | 145 | while True: 146 | processed += 1 147 | 148 | if processed % 1000 == 0: 149 | print(processed) 150 | 151 | try: 152 | t = q.dequeue() 153 | except Exception: 154 | num_exs += 1 155 | continue 156 | 157 | if not t: 158 | break 159 | 160 | if filter_str in str(t): 161 | continue 162 | else: 163 | q.enqueue(t) 164 | re += 1 165 | 166 | print(f"exceptions: {num_exs}") 167 | print(f"requeued: {re}") 168 | -------------------------------------------------------------------------------- /bmon/util_cli.py: -------------------------------------------------------------------------------- 1 | import pprint 2 | import logging 3 | from functools import cache 4 | from collections import defaultdict 5 | 6 | from clii import App 7 | import fastavro 8 | from django.conf import settings 9 | from django import db 10 | from django.core.exceptions import ValidationError 11 | 12 | # If we're not on a bitcoind host, this import will fail - that's okay. 13 | try: 14 | from . import bitcoind_tasks 15 | except Exception: 16 | bitcoind_tasks = None # type: ignore 17 | 18 | from .bitcoin.api import gather_rpc, RPC_ERROR_RESULT, wait_for_synced 19 | from . import logparse, models, server_tasks 20 | 21 | 22 | log = logging.getLogger(__name__) 23 | cli = App() 24 | 25 | 26 | @cli.cmd 27 | def feedline(line: str) -> None: 28 | """Manually process a logline. Useful for testing in dev.""" 29 | assert bitcoind_tasks 30 | host = models.Host.objects.filter(name=settings.HOSTNAME).order_by('-id').first() 31 | assert host 32 | bitcoind_tasks.process_line(line, host) 33 | 34 | 35 | @cli.cmd 36 | def showmempool() -> None: 37 | """Show the current mempool avro data.""" 38 | assert bitcoind_tasks 39 | with open(settings.MEMPOOL_ACTIVITY_CACHE_PATH / "current", "rb") as f: 40 | for record in fastavro.reader(f): 41 | print(record) 42 | 43 | 44 | @cli.cmd 45 | def run_listener(listener_name: str) -> None: 46 | """Rerun a listener over all bitcoind log lines.""" 47 | assert bitcoind_tasks 48 | listeners = [getattr(logparse, listener_name)()] 49 | host = models.Host.objects.filter(name=settings.HOSTNAME).order_by('-id').first() 50 | assert host 51 | 52 | assert settings.BITCOIND_LOG_PATH 53 | with open(settings.BITCOIND_LOG_PATH, "r", errors="ignore") as f: 54 | for line in f: 55 | try: 56 | bitcoind_tasks.process_line( 57 | line, host, listeners=listeners, modify_log_pos=False 58 | ) 59 | except db.IntegrityError: 60 | pass 61 | except ValidationError as e: 62 | if 'already exists' not in str(e): 63 | raise 64 | 65 | 66 | @cli.cmd 67 | def shipmempool() -> None: 68 | """Ship off mempool activity to GCP.""" 69 | assert bitcoind_tasks 70 | bitcoind_tasks.mempool_q.immediate = True 71 | bitcoind_tasks.queue_mempool_to_ship() 72 | bitcoind_tasks.ship_mempool_activity() 73 | 74 | 75 | @cli.cmd 76 | def wipe_mempool_backlog() -> None: 77 | if not bitcoind_tasks: 78 | q = server_tasks.mempool_q 79 | else: 80 | q = bitcoind_tasks.mempool_q 81 | 82 | print(f"Wiping queue {q} ({q.storage.queue_size()} entries)") 83 | q.flush() 84 | print(f"{q.storage.queue_size()} entries left") 85 | 86 | 87 | @cli.cmd 88 | def rpc(*cmd) -> None: 89 | """Gather bitcoind RPC results from all hosts. Should be run on the bmon server.""" 90 | pprint.pprint(gather_rpc(" ".join(cmd))) 91 | 92 | 93 | @cli.cmd 94 | def wait_for_bitcoind_sync() -> None: 95 | wait_for_synced() 96 | 97 | 98 | @cli.cmd 99 | def compare_mempools() -> None: 100 | mempools = gather_rpc("getrawmempool") 101 | host_to_set = {} 102 | 103 | for host, res in mempools.items(): 104 | if res == RPC_ERROR_RESULT: 105 | log.warning("unable to retrieve mempool for %s; skipping", host) 106 | continue 107 | host_to_set[host] = set(res) 108 | 109 | all_hosts = set(host_to_set.keys()) 110 | num_hosts = len(host_to_set) 111 | over_half = (num_hosts // 2) + 1 112 | 113 | @cache 114 | def hosts_with_txid(txid: str) -> tuple[str, ...]: 115 | return tuple(h for h, pool in host_to_set.items() if txid in pool) 116 | 117 | all_tx = set() 118 | 119 | for pool in host_to_set.values(): 120 | all_tx.update(pool) 121 | 122 | results: dict[str, dict[str, list[str]]] = defaultdict(lambda: defaultdict(list)) 123 | 124 | for tx in all_tx: 125 | hosts = hosts_with_txid(tx) 126 | 127 | if len(hosts) == 1: 128 | results['unique'][hosts[0]].append(tx) 129 | elif len(hosts) >= over_half: 130 | for host in (all_hosts - set(hosts)): 131 | results['missing'][host].append(tx) 132 | elif len(hosts) < over_half: 133 | for host in hosts: 134 | results['have_uncommon'][host].append(tx) 135 | 136 | def default_to_regular(d): 137 | if isinstance(d, defaultdict): 138 | d = {k: default_to_regular(v) for k, v in d.items()} 139 | return d 140 | 141 | pprint.pprint(default_to_regular(results)) 142 | 143 | 144 | def main() -> None: 145 | cli.run() 146 | -------------------------------------------------------------------------------- /bmon/views.py: -------------------------------------------------------------------------------- 1 | from django.shortcuts import render 2 | 3 | from bmon.models import HeaderToTipEvent 4 | 5 | 6 | def main(request): 7 | return render(request, 'index.html', {}) 8 | 9 | 10 | def headertotip(request): 11 | events = HeaderToTipEvent.objects.filter(header_to_tip_secs__gte=5).order_by('-height')[:100] 12 | return render(request, 'tips.html', {"events": events}) 13 | -------------------------------------------------------------------------------- /bmon/views_api.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import statistics 3 | from dataclasses import dataclass 4 | 5 | from ninja import NinjaAPI 6 | from django.forms.models import model_to_dict 7 | from django.db.models import Max 8 | 9 | from bmon import models 10 | from .bitcoin.api import gather_rpc, RPC_ERROR_RESULT 11 | from bmon_infra import infra, config 12 | 13 | api = NinjaAPI() 14 | 15 | 16 | def _get_wireguard_ip(host): 17 | bmon_wg = host.wireguards["wg-bmon"] 18 | return bmon_wg.ip 19 | 20 | 21 | def _get_db_hosts() -> dict[str, models.Host]: 22 | bitcoind_hosts = {h.name for h in config.get_hosts()[1].values() if "bitcoind" in h.tags} 23 | latest_ids = ( 24 | models.Host.objects.values("name") 25 | .annotate(max_id=Max("id")) 26 | .values_list("max_id", flat=True) 27 | ) 28 | return { 29 | h.name: h for h in models.Host.objects.filter(id__in=latest_ids) 30 | if h.name in bitcoind_hosts 31 | } 32 | 33 | 34 | @api.get("/prom-config-bitcoind") 35 | def prom_config_bitcoind(_): 36 | """Dynamic configuration for bitcoind prometheus monitoring endpoints.""" 37 | bitcoind_hosts = [h for h in config.get_hosts()[1].values() if "bitcoind" in h.tags] 38 | db_hosts = _get_db_hosts() 39 | out = [] 40 | 41 | for host in bitcoind_hosts: 42 | wgip = _get_wireguard_ip(host) 43 | targets = [ 44 | f"{wgip}:{host.bitcoind_exporter_port}", 45 | f"{wgip}:{infra.BMON_BITCOIND_EXPORTER_PORT}", 46 | ] 47 | if host.prom_exporter_port: 48 | targets.append(f"{wgip}:{host.prom_exporter_port}") 49 | 50 | out.append( 51 | { 52 | "targets": targets, 53 | "labels": { 54 | "job": "bitcoind", 55 | "hostname": host.name, 56 | "bitcoin_version": db_hosts[host.name].bitcoin_version, 57 | "bitcoin_gitref": db_hosts[host.name].bitcoin_gitref, 58 | "bitcoin_gitsha": db_hosts[host.name].bitcoin_gitsha, 59 | "bitcoin_dbcache": str(host.bitcoin_dbcache), 60 | "bitcoin_prune": str(host.bitcoin_prune), 61 | "bitcoin_listen": '1' if host.bitcoin_listen else '0', 62 | }, 63 | } 64 | ) 65 | 66 | return out 67 | 68 | 69 | @api.get("/prom-config-server") 70 | def prom_config_server(_): 71 | """Dynamic configuration for bmon server prometheus monitoring endpoints.""" 72 | hosts = config.get_hosts()[1].values() 73 | [server] = [h for h in hosts if "server" in h.tags] 74 | wgip = _get_wireguard_ip(server) 75 | 76 | return [ 77 | { 78 | "targets": [ 79 | f"{wgip}:{server.prom_exporter_port}", 80 | f"{wgip}:{infra.SERVER_EXPORTER_PORT}", 81 | ], 82 | "labels": {"job": "server", "hostname": server.name}, 83 | } 84 | ] 85 | 86 | 87 | @api.get("/hosts") 88 | def hosts(_): 89 | out = [] 90 | hosts = config.get_bitcoind_hosts() 91 | db_hosts = _get_db_hosts() 92 | peer_info = gather_rpc(lambda r: r.getpeerinfo()) 93 | chain_info = gather_rpc(lambda r: r.getblockchaininfo()) 94 | 95 | for host in hosts: 96 | peers = peer_info[host.name] 97 | if peers == RPC_ERROR_RESULT: 98 | peers = [] 99 | 100 | chain = chain_info[host.name] 101 | if chain == RPC_ERROR_RESULT: 102 | continue 103 | 104 | out.append( 105 | { 106 | "name": host.name, 107 | "peers": {p["addr"]: p["subver"] for p in peers}, 108 | "chaininfo": chain, 109 | "bitcoin_version": db_hosts[host.name].bitcoin_version, 110 | } 111 | ) 112 | 113 | return out 114 | 115 | 116 | @dataclass 117 | class BlockConnView: 118 | height: int 119 | events: list[models.ConnectBlockEvent] 120 | 121 | def __post_init__(self): 122 | if not self.events: 123 | return 124 | 125 | def fromts(ts): 126 | return datetime.datetime.fromtimestamp(ts) 127 | 128 | times = {e.host.name: e.timestamp.timestamp() for e in self.events} 129 | self.date = self.events[0].date 130 | self.avg_got_time: datetime.datetime = fromts(statistics.mean(times.values())) 131 | self.stddev_got_time: float = statistics.pstdev(times.values()) 132 | self.min: float = min(times.values()) 133 | self.min_dt = fromts(self.min) 134 | self.diffs: dict[str, float] = {host: t - self.min for host, t in times.items()} 135 | self.events = [] 136 | 137 | 138 | @api.get("/blocks") 139 | def blocks(_): 140 | out = [] 141 | heights = list( 142 | models.ConnectBlockEvent.objects.values_list("height", flat=True) 143 | .order_by("-height") 144 | .distinct()[:10] 145 | ) 146 | cbs = list(models.ConnectBlockEvent.objects.filter(height__in=heights)) 147 | 148 | for height in heights: 149 | height_cbs = [cb for cb in cbs if cb.height == height] 150 | out.append(BlockConnView(height, height_cbs).__dict__) 151 | 152 | return out 153 | 154 | 155 | @api.get("/mempool") 156 | def mempool(_): 157 | mempool_accepts = models.MempoolAccept.objects.order_by("-id")[:400] 158 | return [model_to_dict(m) for m in mempool_accepts] 159 | 160 | 161 | @api.get("/process-errors") 162 | def process_errors(_): 163 | objs = models.ProcessLineError.objects.order_by("-id")[:400] 164 | return [model_to_dict(m) for m in objs] 165 | 166 | 167 | @api.get("/crash") 168 | def crash(_): 169 | """for testing sentry""" 170 | return 1 / 0 171 | -------------------------------------------------------------------------------- /bmon/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for bmon project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/4.1/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bmon.settings') 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /dev: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Various tools for interacting with a bmon deployment; works both locally and 4 | in production. 5 | 6 | Should run `bmon-config` before using this tool. 7 | 8 | The difference between `bmon-util` and this file is that the former is meant to be 9 | run from within a docker container, whereas this is run on the host. 10 | """ 11 | 12 | import json 13 | import time 14 | import sys 15 | import subprocess 16 | import functools 17 | import os 18 | 19 | import clii 20 | 21 | from bmon_infra import infra, config 22 | from fscm import p, RunReturn 23 | 24 | cli = clii.App() 25 | 26 | os.environ["PYTHONUNBUFFERED"] = "1" 27 | 28 | 29 | def sh(cmd, **kwargs): 30 | return subprocess.run(cmd, shell=True, **kwargs) 31 | 32 | 33 | @functools.cache 34 | def getenv(): 35 | return config.get_env_object() 36 | 37 | 38 | def is_dev() -> bool: 39 | """True if we're in the dev environment.""" 40 | return getenv().BMON_ENV == "dev" 41 | 42 | 43 | def is_regtest() -> bool: 44 | """True if we're running on regtest.""" 45 | return (rpcport := getenv().BITCOIN_RPC_PORT) and int(rpcport) == "18443" 46 | 47 | 48 | def brpc(cmd, **kwargs): 49 | """Run a bitcoin RPC command.""" 50 | flags = "-regtest" if is_regtest() else "" 51 | 52 | return sh( 53 | "docker-compose exec bitcoind " 54 | f"bitcoin-cli {flags} -datadir=/bitcoin/data {cmd}", 55 | **kwargs, 56 | ) 57 | 58 | 59 | def dev_only(func): 60 | """Decorator that enforces a command is run only in the dev environment.""" 61 | 62 | @functools.wraps(func) 63 | def wrapper(*args, **kwargs): 64 | if not is_dev(): 65 | print("Shouldn't be running {func} outside of a dev environment") 66 | sys.exit(1) 67 | else: 68 | return func(*args, **kwargs) 69 | 70 | return wrapper 71 | 72 | 73 | @cli.cmd 74 | def bitcoinrpc(*cmd): 75 | return brpc(" ".join(cmd)) 76 | 77 | 78 | @cli.cmd 79 | def bitcoind_wait_for_synced(): 80 | """ 81 | Wait until bitcoind's tip is reasonably current. 82 | 83 | This is helpful for bootstrapping new monited bitcoind instances without 84 | generating a bunch of spurious data. 85 | """ 86 | tries = 12 87 | backoff_secs = 2 88 | is_synced = False 89 | got = {} 90 | i = 0 91 | 92 | while tries and not is_synced: 93 | try: 94 | got = json.loads( 95 | brpc("getblockchaininfo", text=True, capture_output=True).stdout 96 | ) 97 | except Exception as e: 98 | print(f"exception getting verification progress: {e}") 99 | tries -= 1 100 | time.sleep(backoff_secs) 101 | if backoff_secs < 120: 102 | backoff_secs *= 2 103 | else: 104 | is_synced = float(got["verificationprogress"]) > 0.9999 105 | time.sleep(1) 106 | tries = 12 107 | 108 | if i % 40 == 0: 109 | print(f"At height {got['blocks']} ({got['verificationprogress']})", flush=True) 110 | 111 | i += 1 112 | 113 | if not is_synced: 114 | print("Failed to sync!") 115 | sys.exit(1) 116 | 117 | print(f"Synced to height: {got['blocks']}") 118 | 119 | 120 | @cli.cmd 121 | @dev_only 122 | def generateblock(): 123 | wallets = json.loads(brpc("listwallets", capture_output=True).stdout) 124 | if "test" not in wallets: 125 | brpc("createwallet test false false '' false true true") 126 | 127 | if '"test"' not in brpc("getwalletinfo", capture_output=True, text=True).stdout: 128 | brpc("loadwallet test") 129 | sh( 130 | "docker-compose exec bitcoind bitcoin-cli -regtest -datadir=/bitcoin/data -generate" 131 | ) 132 | 133 | 134 | @cli.cmd 135 | def managepy(*cmd): 136 | sh(f"docker-compose run --rm shell python manage.py {' '.join(cmd)}") 137 | 138 | 139 | @cli.cmd 140 | def shell(): 141 | managepy("shell") 142 | 143 | 144 | @cli.cmd 145 | @dev_only 146 | def reup( 147 | service: str = "", 148 | rebuild_docker: bool = False, 149 | logs: bool = False, 150 | data: bool = False, 151 | ): 152 | sh(f"docker-compose down {service} ; docker-compose rm -f {service} ") 153 | if rebuild_docker: 154 | sh("docker-compose build") 155 | 156 | if data: 157 | cleardata() 158 | 159 | sh("bmon-config") 160 | env = config.get_env_object() 161 | p(env.BITCOIND_VERSION_PATH).contents(infra.get_bitcoind_version()) 162 | 163 | sh("docker-compose up -d db") 164 | managepy("migrate") 165 | sh(f"docker-compose up -d {service}") 166 | 167 | if logs: 168 | sh(f"docker-compose logs -f {service}") 169 | 170 | 171 | @cli.cmd 172 | def watchlogs(others: str = ""): 173 | """Tail interesting logs.""" 174 | sh( 175 | "docker-compose logs -f bitcoind server-task-worker " 176 | f"bitcoind-task-worker bitcoind-watcher bitcoind-mempool-worker {others}" 177 | ) 178 | 179 | 180 | @cli.cmd 181 | @dev_only 182 | def cleardata(): 183 | sh("sudo rm -fr services/dev/*") 184 | sh("bmon-config") 185 | 186 | 187 | def _testrun(cmd: str) -> bool: 188 | return sh( 189 | f"docker-compose run --rm -e RUN_DB_MIGRATIONS= test -- bash -c '{cmd}'", 190 | env={'BMON_BITCOIND_PORT': '8555', 191 | 'BMON_BITCOIND_RPC_PORT': '8554', 192 | 'BMON_REDIS_OPTIONS': '', 193 | **os.environ}, 194 | ).returncode == 0 195 | 196 | 197 | @cli.cmd 198 | @dev_only 199 | def test(run_mypy: bool = False): 200 | """Run automated tests.""" 201 | 202 | flake8_command = "flake8 %s --count --show-source --statistics" 203 | 204 | bmon_failed = not _testrun(flake8_command % 'bmon/') 205 | infra_failed = not _testrun(flake8_command % 'infra/') 206 | 207 | if run_mypy: 208 | mypy() 209 | 210 | test_failed = not _testrun("pytest -vv bmon") 211 | 212 | if bmon_failed or infra_failed: 213 | sys.exit(1) 214 | if test_failed: 215 | sys.exit(2) 216 | 217 | 218 | @cli.cmd 219 | @dev_only 220 | def mypy(): 221 | bmon_failed = not _testrun("mypy bmon/") 222 | infra_failed = not _testrun("mypy --exclude infra/build infra/") 223 | 224 | if bmon_failed or infra_failed: 225 | sys.exit(3) 226 | 227 | 228 | @cli.cmd 229 | @dev_only 230 | def watchjs(): 231 | """Watch the frontend javascript and rebuild as necessary.""" 232 | sh("docker-compose run --rm js yarn run start") 233 | 234 | 235 | @cli.cmd 236 | def sql_shell(db_host: str = "localhost"): 237 | e = getenv() 238 | db = f"postgres://bmon:{e.DB_PASSWORD}@{db_host}:5432/bmon" 239 | print(f"Connecting to {db}") 240 | sh(f"pgcli {db}") 241 | 242 | 243 | @cli.cmd 244 | def pgcli(): 245 | sql_shell() 246 | 247 | 248 | if __name__ == "__main__": 249 | cli.run() 250 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # Services that run on the single, centralized bmon server instance. 2 | # 3 | # Environment is injected from the .env file, which is generated by 4 | # `./infra/bmon_infra/config.py`. 5 | 6 | version: '3.5' 7 | 8 | x-service-config: &service 9 | environment: &env 10 | BMON_DEBUG: 11 | BMON_HOSTS_FILE: 12 | BMON_HOSTNAME: 13 | DB_HOST: 14 | DB_PASSWORD: 15 | BITCOIN_RPC_HOST: 16 | BITCOIN_RPC_PASSWORD: 17 | BITCOIN_RPC_PORT: 18 | BITCOIN_RPC_USER: 19 | BITCOIN_GITSHA: 20 | BITCOIN_GITREF: 21 | BITCOIN_VERSION: 22 | BITCOIN_FLAGS: 23 | BITCOIN_PRUNE: 24 | BITCOIN_DBCACHE: 25 | BITCOIND_VERSION_PATH: 26 | PUSHOVER_USER: 27 | PUSHOVER_TOKEN: 28 | BMON_REDIS_SERVER_URL: 29 | BMON_REDIS_HOST: 30 | SENTRY_DSN: 31 | PYTHONUNBUFFERED: 1 32 | 33 | x-redis-config: &redis-config 34 | image: docker.io/library/redis 35 | user: "${USER_ID}" 36 | ports: 37 | - 6379:6379 38 | volumes: 39 | - ${ENV_ROOT}/redis/data:/data 40 | # Configure redis for persistence with AOF; 41 | # See https://redis.io/docs/manual/persistence/ 42 | command: redis-server ${BMON_REDIS_OPTIONS---appendonly yes --loglevel warning} 43 | 44 | services: 45 | 46 | # bmon server containers 47 | # ----------------------------------------------- 48 | 49 | db: 50 | profiles: ["server"] 51 | user: "${USER_ID}" 52 | image: docker.io/library/postgres:14-bullseye 53 | environment: 54 | POSTGRES_DB: bmon 55 | POSTGRES_USER: bmon 56 | POSTGRES_PASSWORD: ${DB_PASSWORD} 57 | volumes: 58 | - ${ENV_ROOT}/postgres/data:/var/lib/postgresql/data 59 | - /etc/passwd:/etc/passwd:ro 60 | - ./etc/postgres.conf:/etc/postgresql/postgresql.conf 61 | ports: 62 | - 5432:5432 63 | command: postgres -c 'config_file=/etc/postgresql/postgresql.conf' 64 | 65 | redis: 66 | profiles: ["server"] 67 | <<: *redis-config 68 | 69 | grafana: 70 | profiles: ["server"] 71 | image: docker.io/grafana/grafana-enterprise:9.4.3 72 | user: "${USER_ID}" 73 | volumes: 74 | - ${ENV_ROOT}/grafana/etc:/etc/grafana 75 | - ${ENV_ROOT}/grafana/var:/var/lib/grafana 76 | links: 77 | - prom 78 | - alertman 79 | # - loki 80 | ports: 81 | - 3000:3000 82 | 83 | prom: 84 | profiles: ["server"] 85 | image: docker.io/prom/prometheus:latest 86 | user: "${USER_ID}" 87 | volumes: 88 | - ${ENV_ROOT}/prom/etc:/etc/prometheus 89 | - ${ENV_ROOT}/prom/data:/prometheus 90 | links: 91 | - alertman 92 | ports: 93 | - 9090:9090 94 | 95 | alertman: 96 | profiles: ["server"] 97 | image: docker.io/prom/alertmanager:latest 98 | user: "${USER_ID}:1000" 99 | volumes: 100 | - ${ENV_ROOT}/alertman/config.yml:/etc/alertmanager/config.yml 101 | - ${ENV_ROOT}/alertman/data:/alertmanager 102 | command: --config.file=/etc/alertmanager/config.yml --storage.path=/alertmanager 103 | ports: 104 | - 9093:9093 105 | 106 | web: &web_config 107 | profiles: ["server"] 108 | user: "${USER_ID}" 109 | build: 110 | context: . 111 | dockerfile: ./docker/py.Dockerfile 112 | image: docker.io/jamesob/bmon:latest 113 | environment: 114 | <<: *env 115 | RUN_DB_MIGRATIONS: 1 116 | WAIT_FOR: "${DB_HOST}:5432" 117 | ports: 118 | - 8080:8080 119 | volumes: 120 | - ./:/src 121 | links: 122 | - db 123 | command: ./manage.py runserver 0.0.0.0:8080 124 | 125 | shell: 126 | image: docker.io/jamesob/bmon:latest 127 | profiles: ["ops"] 128 | environment: 129 | <<: *env 130 | WAIT_FOR: "${DB_HOST}:5432" 131 | IPYTHONDIR: /var/lib/ipython 132 | volumes: 133 | - ./:/src 134 | - ${ENV_ROOT}/ipython:/var/lib/ipython 135 | command: ./manage.py shell 136 | 137 | server-task-worker: 138 | profiles: ["server"] 139 | user: "${USER_ID}" 140 | image: docker.io/jamesob/bmon:latest 141 | environment: 142 | <<: *env 143 | WAIT_FOR: "${DB_HOST}:5432,${BMON_REDIS_HOST}:6379" 144 | volumes: 145 | - ./:/src 146 | links: 147 | - db 148 | - redis 149 | command: huey_consumer.py bmon.server_tasks.server_q -w 3 150 | 151 | server-mempool-task-worker: 152 | profiles: ["server"] 153 | user: "${USER_ID}" 154 | image: docker.io/jamesob/bmon:latest 155 | environment: 156 | <<: *env 157 | WAIT_FOR: "${DB_HOST}:5432,${BMON_REDIS_HOST}:6379" 158 | volumes: 159 | - ./:/src 160 | links: 161 | - db 162 | - redis 163 | command: huey_consumer.py bmon.server_tasks.mempool_q -w 3 -q 164 | 165 | server-monitor: 166 | profiles: ["server"] 167 | user: "${USER_ID}" 168 | image: docker.io/jamesob/bmon:latest 169 | environment: 170 | <<: *env 171 | WAIT_FOR: "${BMON_REDIS_HOST}:6379,${DB_HOST}:5432" 172 | BMON_REDIS_URL: 173 | volumes: 174 | - ./:/src 175 | command: bmon-server-monitor 176 | ports: 177 | - 9334:9334 178 | 179 | bitcoind-exporter: 180 | profiles: ["bitcoind"] 181 | image: jamesob/bitcoin-prometheus-exporter:latest 182 | environment: 183 | <<: *env 184 | BITCOIN_RPC_HOST: 185 | links: 186 | - bitcoind 187 | ports: 188 | - 9332:9332 189 | 190 | # bitcoind node containers 191 | # ----------------------------------------------- 192 | 193 | bitcoind: 194 | profiles: ["bitcoind"] 195 | image: ${BITCOIN_DOCKER_TAG} 196 | user: "${USER_ID}" 197 | volumes: 198 | - ${ENV_ROOT}/bitcoin/data:/bitcoin/data 199 | command: bitcoind -datadir=/bitcoin/data ${BITCOIN_FLAGS} 200 | ports: 201 | - ${BMON_BITCOIND_PORT:-8332}:8332 202 | - ${BMON_BITCOIND_RPC_PORT:-8333}:8333 203 | 204 | redis-bitcoind: 205 | profiles: ["prod-bitcoind"] 206 | <<: *redis-config 207 | 208 | bitcoind-task-worker: &bitcoind-worker 209 | profiles: ["bitcoind"] 210 | user: "${USER_ID}" 211 | image: docker.io/jamesob/bmon:latest 212 | # We have to repeat the build section here because the `web` container won't 213 | # build on bitcoind hosts, so we have to have at least one container configured 214 | # to. 215 | build: 216 | context: . 217 | dockerfile: ./docker/py.Dockerfile 218 | environment: 219 | <<: *env 220 | BMON_REDIS_LOCAL_URL: 221 | WAIT_FOR: "${BMON_REDIS_HOST}:6379,${BMON_REDIS_LOCAL_HOST}:6379" 222 | CHAINCODE_GCP_CRED_PATH: 223 | volumes: 224 | - ./:/src 225 | - ${ENV_ROOT}/bmon/mempool-activity-cache:/mempool-activity-cache 226 | - ${BITCOIND_VERSION_PATH}:/bitcoin-version:ro 227 | # Can't have this as a dependency because of local dev. 228 | # links: 229 | # - redis-bitcoind 230 | command: huey_consumer.py bmon.bitcoind_tasks.events_q -w 2 231 | 232 | bitcoind-mempool-worker: 233 | <<: *bitcoind-worker 234 | volumes: 235 | - ./:/src 236 | - ${ENV_ROOT}/bmon/mempool-activity-cache:/mempool-activity-cache 237 | - ${ENV_ROOT}/bmon/credentials/chaincode-gcp.json:${CHAINCODE_GCP_CRED_PATH} 238 | - ${BITCOIND_VERSION_PATH}:/bitcoin-version:ro 239 | # Consume mempool activity in its own worker since it's so high volume. 240 | # Tell huey to -q to avoid lots of noise. 241 | command: huey_consumer.py bmon.bitcoind_tasks.mempool_q -w 2 -q 242 | 243 | bitcoind-watcher: 244 | profiles: ["bitcoind"] 245 | user: "${USER_ID}" 246 | image: docker.io/jamesob/bmon:latest 247 | environment: 248 | <<: *env 249 | BMON_REDIS_LOCAL_URL: 250 | BMON_BITCOIND_LOG_PATH: /bitcoin-data/debug.log 251 | WAIT_FOR: "${BMON_REDIS_HOST}:6379,${DB_HOST}:5432" 252 | volumes: 253 | - ./:/src 254 | - ${BITCOIN_DATA_PATH}:/bitcoin-data:ro 255 | - ${BITCOIND_VERSION_PATH}:/bitcoin-version:ro 256 | links: 257 | - bitcoind 258 | - bitcoind-task-worker 259 | command: bmon-watch-bitcoind-logs 260 | 261 | bitcoind-monitor: 262 | profiles: ["bitcoind"] 263 | user: "${USER_ID}" 264 | image: docker.io/jamesob/bmon:latest 265 | environment: 266 | <<: *env 267 | WAIT_FOR: "${BMON_REDIS_LOCAL_HOST}:6379,${DB_HOST}:5432" 268 | BMON_REDIS_LOCAL_URL: 269 | BMON_BITCOIND_LOG_PATH: /bitcoin-data/debug.log 270 | volumes: 271 | - ./:/src 272 | - ${ENV_ROOT}/bmon/mempool-activity-cache:/mempool-activity-cache 273 | - ${BITCOIN_DATA_PATH}:/bitcoin-data 274 | - ${BITCOIND_VERSION_PATH}:/bitcoin-version:ro 275 | command: bmon-bitcoind-monitor 276 | ports: 277 | - 9333:9333 278 | 279 | node-exporter: 280 | # only monitor this stuff in prod since its mountpoint needs are zealous. 281 | profiles: ["prod"] 282 | image: prom/node-exporter:latest 283 | restart: unless-stopped 284 | volumes: 285 | - /proc:/host/proc:ro 286 | - /sys:/host/sys:ro 287 | - /:/rootfs:ro 288 | command: 289 | - '--path.procfs=/host/proc' 290 | - '--path.rootfs=/rootfs' 291 | - '--path.sysfs=/host/sys' 292 | - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' 293 | ports: 294 | - 9100:9100 295 | 296 | # dev containers 297 | # ----------------------------------------------- 298 | 299 | test: 300 | profiles: ["dev"] 301 | user: "${USER_ID}" 302 | image: docker.io/jamesob/bmon:test-latest 303 | build: 304 | context: . 305 | dockerfile: ./docker/py.Dockerfile 306 | args: 307 | PYTHON_PKG: .[tests] 308 | environment: 309 | <<: *env 310 | DJANGO_SETTINGS_MODULE: bmon.settings_test 311 | WAIT_FOR: "${BMON_REDIS_HOST}:6379,${DB_HOST}:5432" 312 | ports: 313 | - 8080:8080 314 | volumes: 315 | - ./:/src 316 | links: 317 | - db 318 | - bitcoind 319 | - redis 320 | command: ./manage.py runserver 0.0.0.0:8080 321 | 322 | js: 323 | profiles: ["dev"] 324 | image: js 325 | user: "${USER_ID}" 326 | build: 327 | context: ./frontend 328 | environment: 329 | <<: *env 330 | volumes: 331 | - ./frontend:/src 332 | - js_node_modules:/node_modules 333 | - ./frontend-build:/build 334 | command: yarn run build 335 | 336 | bitcoind-02: 337 | profiles: ["dev"] 338 | image: ${BITCOIN_DOCKER_TAG} 339 | user: "${USER_ID}" 340 | volumes: 341 | - ${ENV_ROOT}/bitcoin-02/data:/bitcoin/data 342 | - ${ENV_ROOT}/bitcoin-02/data/bitcoin.conf:/bitcoin/bitcoin.conf 343 | command: bitcoind -datadir=/bitcoin/data ${BITCOIN_FLAGS} 344 | 345 | volumes: 346 | js_node_modules: 347 | -------------------------------------------------------------------------------- /docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if ! [ -z "${WAIT_FOR}" ]; then 5 | # wait-for detects/uses the above envvar. 6 | /bin/wait-for 7 | fi 8 | 9 | if ! [ -z "${RUN_DB_MIGRATIONS}" ]; then 10 | python manage.py migrate 11 | python manage.py collectstatic --noinput 12 | fi 13 | 14 | cd /src 15 | 16 | exec "$@" 17 | -------------------------------------------------------------------------------- /docker/py.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/library/python:3.11.0rc2-bullseye 2 | 3 | WORKDIR /src 4 | RUN apt-get -qq update && apt-get install -qq -y libpq-dev netcat iproute2 lshw 5 | 6 | # Can pass PYTHON_PKG=.[tests] for test dependencies. 7 | ARG PYTHON_PKG=. 8 | 9 | COPY . ./ 10 | COPY ./docker/wait-for /bin/wait-for 11 | COPY ./docker/entrypoint.sh /entrypoint.sh 12 | RUN pip install --upgrade pip setuptools ipython && \ 13 | pip install -e ./infra && \ 14 | pip install -e $PYTHON_PKG && \ 15 | chmod +x /entrypoint.sh /bin/wait-for 16 | 17 | ENTRYPOINT ["/entrypoint.sh"] 18 | -------------------------------------------------------------------------------- /docker/wait-for: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Wait for stuff to boot with no system dependencies other than Python 3. 4 | 5 | Example usages: 6 | 7 | wait-for server:3000 8 | WAIT_FOR=localhost:8080,localhost:8081 wait-for -n 2 9 | 10 | """ 11 | 12 | import os 13 | import sys 14 | import socket 15 | import time 16 | import argparse 17 | 18 | 19 | def main(): 20 | parser = argparse.ArgumentParser(description=__doc__) 21 | parser.add_argument( 22 | '-d', '--delay', action='store', type=float, default=0.2, 23 | help='Number of seconds to delay between attempts') 24 | parser.add_argument( 25 | '-n', '--num-tries', action='store', type=int, default=200, 26 | help='Number of times to try each target') 27 | parser.add_argument( 28 | 'targets', nargs='*', 29 | default=[i.strip() for i in os.environ.get('WAIT_FOR', '').split(',')], 30 | help='The targets to poll, e.g. "localhost:8081"') 31 | 32 | args = parser.parse_args() 33 | 34 | for target in filter(None, args.targets): 35 | wait_for_target(target, args) 36 | 37 | 38 | def wait_for_target(target: str, args): 39 | tries = args.num_tries 40 | host, port = target.split(':') 41 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 42 | print(f"waiting for {target} ({tries * args.delay}s)") 43 | sys.stdout.flush() 44 | 45 | while tries > 0: 46 | try: 47 | not_connected = s.connect_ex((host, int(port))) 48 | except socket.gaierror: 49 | not_connected = True 50 | 51 | if not_connected: 52 | tries -= 1 53 | time.sleep(args.delay) 54 | else: 55 | print(f"connected to {target}!") 56 | return True 57 | 58 | print(f"timed out waiting for {target}") 59 | sys.exit(1) 60 | 61 | 62 | if __name__ == "__main__": 63 | main() 64 | -------------------------------------------------------------------------------- /etc/alertmanager-template.yml: -------------------------------------------------------------------------------- 1 | route: 2 | receiver: pushover 3 | receivers: 4 | - name: pushover 5 | pushover_configs: 6 | - token: ${PUSHOVER_TOKEN} 7 | user_key: ${PUSHOVER_USER} 8 | retry: 30m 9 | message: "{{ range .Alerts }}{{ range .Annotations.SortedPairs }}- {{ .Value }}\n{{ end }}{{ end }}" 10 | url: "http://alerts.bmon.j.co" 11 | -------------------------------------------------------------------------------- /etc/alertmanager.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | # The smarthost and SMTP sender used for mail notifications. 3 | smtp_smarthost: 'localhost:25' 4 | smtp_from: 'alertmanager@example.org' 5 | 6 | # The root route on which each incoming alert enters. 7 | route: 8 | # The root route must not have any matchers as it is the entry point for 9 | # all alerts. It needs to have a receiver configured so alerts that do not 10 | # match any of the sub-routes are sent to someone. 11 | receiver: 'team-X-mails' 12 | 13 | # The labels by which incoming alerts are grouped together. For example, 14 | # multiple alerts coming in for cluster=A and alertname=LatencyHigh would 15 | # be batched into a single group. 16 | # 17 | # To aggregate by all possible labels use '...' as the sole label name. 18 | # This effectively disables aggregation entirely, passing through all 19 | # alerts as-is. This is unlikely to be what you want, unless you have 20 | # a very low alert volume or your upstream notification system performs 21 | # its own grouping. Example: group_by: [...] 22 | group_by: ['alertname', 'cluster'] 23 | 24 | # When a new group of alerts is created by an incoming alert, wait at 25 | # least 'group_wait' to send the initial notification. 26 | # This way ensures that you get multiple alerts for the same group that start 27 | # firing shortly after another are batched together on the first 28 | # notification. 29 | group_wait: 30s 30 | 31 | # When the first notification was sent, wait 'group_interval' to send a batch 32 | # of new alerts that started firing for that group. 33 | group_interval: 5m 34 | 35 | # If an alert has successfully been sent, wait 'repeat_interval' to 36 | # resend them. 37 | repeat_interval: 3h 38 | 39 | # All the above attributes are inherited by all child routes and can 40 | # overwritten on each. 41 | 42 | # The child route trees. 43 | routes: 44 | # This routes performs a regular expression match on alert labels to 45 | # catch alerts that are related to a list of services. 46 | - match_re: 47 | service: ^(foo1|foo2|baz)$ 48 | receiver: team-X-mails 49 | 50 | # The service has a sub-route for critical alerts, any alerts 51 | # that do not match, i.e. severity != critical, fall-back to the 52 | # parent node and are sent to 'team-X-mails' 53 | routes: 54 | - match: 55 | severity: critical 56 | receiver: team-X-pager 57 | 58 | - match: 59 | service: files 60 | receiver: team-Y-mails 61 | 62 | routes: 63 | - match: 64 | severity: critical 65 | receiver: team-Y-pager 66 | 67 | # This route handles all alerts coming from a database service. If there's 68 | # no team to handle it, it defaults to the DB team. 69 | - match: 70 | service: database 71 | 72 | receiver: team-DB-pager 73 | # Also group alerts by affected database. 74 | group_by: [alertname, cluster, database] 75 | 76 | routes: 77 | - match: 78 | owner: team-X 79 | receiver: team-X-pager 80 | 81 | - match: 82 | owner: team-Y 83 | receiver: team-Y-pager 84 | 85 | 86 | # Inhibition rules allow to mute a set of alerts given that another alert is 87 | # firing. 88 | # We use this to mute any warning-level notifications if the same alert is 89 | # already critical. 90 | inhibit_rules: 91 | - source_matchers: 92 | - severity="critical" 93 | target_matchers: 94 | - severity="warning" 95 | # Apply inhibition if the alertname is the same. 96 | # CAUTION: 97 | # If all label names listed in `equal` are missing 98 | # from both the source and target alerts, 99 | # the inhibition rule will apply! 100 | equal: ['alertname'] 101 | 102 | 103 | receivers: 104 | - name: 'team-X-mails' 105 | email_configs: 106 | - to: 'team-X+alerts@example.org, team-Y+alerts@example.org' 107 | 108 | - name: 'team-X-pager' 109 | email_configs: 110 | - to: 'team-X+alerts-critical@example.org' 111 | pagerduty_configs: 112 | - routing_key: 113 | 114 | - name: 'team-Y-mails' 115 | email_configs: 116 | - to: 'team-Y+alerts@example.org' 117 | 118 | - name: 'team-Y-pager' 119 | pagerduty_configs: 120 | - routing_key: 121 | 122 | - name: 'team-DB-pager' 123 | pagerduty_configs: 124 | - routing_key: 125 | -------------------------------------------------------------------------------- /etc/bitcoin/bitcoin-template.conf: -------------------------------------------------------------------------------- 1 | ${RPC_AUTH_LINE} 2 | 3 | logthreadnames=1 4 | logtimemicros=1 5 | 6 | debug=addrman 7 | debug=bench 8 | debug=blockstorage 9 | debug=cmpctblock 10 | debug=mempool 11 | debug=mempoolrej 12 | debug=net 13 | debug=validation 14 | debug=coindb 15 | 16 | rpcbind=0.0.0.0 17 | rpcallowip=0.0.0.0/0 18 | 19 | [regtest] 20 | 21 | ${RPC_AUTH_LINE} 22 | 23 | rpcbind=0.0.0.0 24 | rpcallowip=0.0.0.0/0 25 | -------------------------------------------------------------------------------- /etc/bitcoind-logrotate.conf: -------------------------------------------------------------------------------- 1 | ${BMON_DIR}/services/prod/bitcoin/data/debug.log 2 | { 3 | rotate 30 4 | size 500M 5 | notifempty 6 | compress 7 | delaycompress 8 | sharedscripts 9 | dateext 10 | dateformat -%Y%m%d%H 11 | prerotate 12 | ${HOME}/.venv/bin/docker-compose -f ${BMON_DIR}/docker-compose.yml stop bitcoind 13 | sleep 20 14 | endscript 15 | postrotate 16 | ${HOME}/.venv/bin/docker-compose -f ${BMON_DIR}/docker-compose.yml start bitcoind 17 | endscript 18 | su ${USER} ${USER} 19 | } 20 | -------------------------------------------------------------------------------- /etc/grafana-dashboards-template.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | # an unique provider name. Required 5 | - name: dashboards 6 | type: file 7 | allowUiUpdates: true 8 | options: 9 | # path to dashboard files on disk. Required when using the 'file' type 10 | path: /var/lib/grafana/dashboards 11 | # use folder names from filesystem to create folders in Grafana 12 | foldersFromFilesStructure: true 13 | -------------------------------------------------------------------------------- /etc/grafana-datasources-template.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: Prometheus 5 | type: prometheus 6 | access: proxy 7 | url: http://${PROM_ADDRESS} 8 | 9 | - name: Loki 10 | type: loki 11 | access: proxy 12 | url: http://${LOKI_ADDRESS} 13 | 14 | - name: Alertmanager 15 | type: alertmanager 16 | url: http://${ALERTMAN_ADDRESS} 17 | access: proxy 18 | -------------------------------------------------------------------------------- /etc/loki-template.yml: -------------------------------------------------------------------------------- 1 | auth_enabled: false 2 | 3 | server: 4 | http_listen_address: 0.0.0.0 5 | http_listen_port: ${LOKI_PORT} 6 | grpc_listen_port: 9096 7 | 8 | common: 9 | path_prefix: /loki 10 | storage: 11 | filesystem: 12 | chunks_directory: /loki/chunks 13 | rules_directory: /loki/rules 14 | replication_factor: 1 15 | ring: 16 | instance_addr: 127.0.0.1 17 | kvstore: 18 | store: inmemory 19 | 20 | schema_config: 21 | configs: 22 | - from: 2020-10-24 23 | store: boltdb-shipper 24 | object_store: filesystem 25 | schema: v11 26 | index: 27 | prefix: index_ 28 | period: 24h 29 | 30 | ruler: 31 | alertmanager_url: http://${ALERTMAN_ADDRESS} 32 | -------------------------------------------------------------------------------- /etc/prom-alerts.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: is_up 3 | rules: 4 | - alert: InstanceDown 5 | expr: up == 0 6 | for: 3m 7 | labels: 8 | severity: critical 9 | annotations: 10 | summary: "Instance {{ $labels.instance }} ({{ $labels.hostname }}) down" 11 | 12 | - name: has_disk 13 | rules: 14 | - alert: LowDisk 15 | expr: (100 - (node_filesystem_avail_bytes{mountpoint="/"} * 100) / node_filesystem_size_bytes{mountpoint="/"}) > 93 16 | for: 5m 17 | labels: 18 | severity: warning 19 | annotations: 20 | summary: "Instance {{ $labels.instance }} ({{ $labels.hostname }}) is low on disk" 21 | 22 | - name: low_mem 23 | rules: 24 | - alert: LowMem 25 | expr: 1 - node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes > 0.9 26 | for: 5m 27 | labels: 28 | severity: critical 29 | annotations: 30 | summary: "Memory usage high: {{ $value }} on {{ $labels.instance }} ({{ $labels.hostname }})" 31 | 32 | - name: low_peers 33 | rules: 34 | - alert: LowPeers 35 | expr: bitcoin_peers < 6 36 | for: 5m 37 | labels: 38 | severity: warning 39 | annotations: 40 | summary: "{{ $labels.instance }} ({{ $labels.bitcoin_version }}) ({{ $labels.hostname }}) has low peers" 41 | 42 | - name: logs_not_watched 43 | rules: 44 | - alert: BitcoindLogsNotWatched 45 | expr: (time() - bmon_last_bitcoind_log_seen_at) > 90 46 | for: 5m 47 | labels: 48 | severity: critical 49 | annotations: 50 | summary: "{{ $labels.instance }} ({{ $labels.hostname }}) has stopped processing bitcoind logs" 51 | 52 | - name: large_debug 53 | rules: 54 | - alert: BitcoindLargeDebug 55 | expr: bmon_bitcoind_debug_log_size_mibibytes > 5000 56 | for: 5m 57 | labels: 58 | severity: critical 59 | annotations: 60 | summary: "{{ $labels.instance }} ({{ $labels.hostname }}) has stopped rotating bitcoind logs" 61 | 62 | - name: large_activity_cache 63 | rules: 64 | - alert: BitcoindLargeActivityCache 65 | expr: bmon_mempool_activity_cache_size_mibibytes > 50 66 | for: 5m 67 | labels: 68 | severity: critical 69 | annotations: 70 | summary: "{{ $labels.instance }} ({{ $labels.hostname }}) has stopped pushing mempool activity" 71 | -------------------------------------------------------------------------------- /etc/prom-template.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 10s 3 | evaluation_interval: 10s 4 | 5 | rule_files: 6 | - "alerts.yml" 7 | 8 | 9 | alerting: 10 | alertmanagers: 11 | - static_configs: 12 | - targets: ["${ALERTMAN_ADDRESS}"] 13 | 14 | scrape_configs: 15 | - job_name: bitcoind 16 | http_sd_configs: 17 | - url: ${WEB_API_URL}/api/prom-config-bitcoind 18 | 19 | - job_name: server 20 | http_sd_configs: 21 | - url: ${WEB_API_URL}/api/prom-config-server 22 | -------------------------------------------------------------------------------- /etc/promtail-template.yml: -------------------------------------------------------------------------------- 1 | server: 2 | http_listen_port: ${PROMTAIL_PORT} 3 | grpc_listen_port: 0 4 | 5 | positions: 6 | filename: /tmp/positions.yaml 7 | 8 | clients: 9 | - url: http://${LOKI_ADDRESS}:/loki/api/v1/push 10 | 11 | scrape_configs: 12 | - job_name: system 13 | static_configs: 14 | - targets: 15 | - localhost 16 | labels: 17 | job: bitcoin 18 | host: "${BMON_HOSTNAME}" 19 | version: "${BITCOIN_VERSION}" 20 | gitsha: "${BITCOIN_GITSHA}" 21 | gitref: "${BITCOIN_GITREF}" 22 | bitcoin_dbcache: "${BITCOIN_DBCACHE}" 23 | bitcoin_prune: "${BITCOIN_PRUNE}" 24 | bitcoin_flags: "${BITCOIN_FLAGS}" 25 | __path__: /bitcoin/data/debug.log 26 | -------------------------------------------------------------------------------- /etc/server-nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | server_name bmon.lan grafana.bmon.lan bmon.info grafana.bmon.info; 3 | listen 80; 4 | 5 | location / { 6 | proxy_set_header Host bmon.info; 7 | proxy_pass http://127.0.0.1:3000; 8 | } 9 | } 10 | 11 | server { 12 | server_name sentry.bmon.j.co sentry.bmon.lan; 13 | listen 80; 14 | 15 | location / { 16 | proxy_set_header Host bmon.info; 17 | proxy_pass http://127.0.0.1:9000; 18 | } 19 | } 20 | 21 | server { 22 | server_name web.bmon.lan web.bmon.info web.bmon.j.co; 23 | listen 80; 24 | 25 | location / { 26 | proxy_set_header Host bmon.info; 27 | proxy_pass http://127.0.0.1:8080; 28 | } 29 | } 30 | 31 | server { 32 | server_name prom.bmon.lan prom.bmon.j.co; 33 | listen 80; 34 | 35 | location / { 36 | proxy_set_header Host bmon.info; 37 | proxy_pass http://127.0.0.1:9090; 38 | } 39 | } 40 | 41 | server { 42 | server_name alerts.bmon.lan alerts.bmon.j.co; 43 | listen 80; 44 | 45 | location / { 46 | proxy_set_header Host bmon.info; 47 | proxy_pass http://127.0.0.1:9093; 48 | } 49 | } 50 | 51 | 52 | server { 53 | server_name 10.33.0.2; 54 | listen 80; 55 | 56 | location / { 57 | root /www/data; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /etc/systemd-bitcoind-unit.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=bmon bitcoind host 3 | 4 | [Service] 5 | Type=oneshot 6 | WorkingDirectory=${bmon_dir} 7 | StandardOutput=journal 8 | RemainAfterExit=true 9 | 10 | Environment="COMPOSE_PROFILES=bitcoind,prod,prod-bitcoind" 11 | ExecStart=${docker_compose_path} up -d --remove-orphans 12 | ExecStop=${docker_compose_path} stop ; ${docker_compose_path} rm -f 13 | 14 | [Install] 15 | WantedBy=default.target 16 | -------------------------------------------------------------------------------- /etc/systemd-server-sentry-unit.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=bmon sentry 3 | 4 | [Service] 5 | Type=oneshot 6 | WorkingDirectory=${sentry_dir} 7 | StandardOutput=journal 8 | RemainAfterExit=true 9 | 10 | ExecStart=${docker_compose_path} up -d --remove-orphans 11 | ExecStop=${docker_compose_path} rm -fs 12 | 13 | [Install] 14 | WantedBy=default.target 15 | -------------------------------------------------------------------------------- /etc/systemd-server-unit.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=bmon server 3 | 4 | [Service] 5 | Type=oneshot 6 | WorkingDirectory=${bmon_dir} 7 | StandardOutput=journal 8 | RemainAfterExit=true 9 | 10 | Environment="COMPOSE_PROFILES=server,prod" 11 | ExecStart=${docker_compose_path} up -d --remove-orphans 12 | ExecStop=${docker_compose_path} rm -fs 13 | 14 | [Install] 15 | WantedBy=default.target 16 | -------------------------------------------------------------------------------- /frontend-build/bundle-prod.e9a10db85a6bff80a378.js.LICENSE.txt: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Lodash 4 | * Copyright OpenJS Foundation and other contributors 5 | * Released under MIT license 6 | * Based on Underscore.js 1.8.3 7 | * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors 8 | */ 9 | -------------------------------------------------------------------------------- /frontend-build/index.html: -------------------------------------------------------------------------------- 1 | bmon
-------------------------------------------------------------------------------- /frontend/.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | "env": { 3 | "browser": true, 4 | "commonjs": true, 5 | "es6": true, 6 | "jest": true, 7 | "node": true, 8 | }, 9 | "extends": "eslint:recommended", 10 | "rules": { 11 | "indent": ["error", 2, { "SwitchCase": 1 }], 12 | "linebreak-style": ["error", "unix"], 13 | "no-console": [ 0 ], 14 | "no-empty": [ 0 ], 15 | "no-undef": ["error", { "typeof": false }], 16 | "no-unused-vars": ["error", { "varsIgnorePattern": "React" }], 17 | "quotes": ["error", "single"], 18 | "react/jsx-uses-vars": [ 2 ], 19 | }, 20 | "plugins": [ 21 | "react", 22 | ], 23 | "parser": "babel-eslint", 24 | "parserOptions": { 25 | "ecmaFeatures": { 26 | "jsx": true, 27 | "modules": true, 28 | } 29 | } 30 | }; -------------------------------------------------------------------------------- /frontend/.yarnrc: -------------------------------------------------------------------------------- 1 | --modules-folder /node_modules/ 2 | -------------------------------------------------------------------------------- /frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/library/node:14 2 | 3 | ARG UID=1000 4 | ARG GID=1000 5 | 6 | WORKDIR /src 7 | # The .yarnrc file places our node_modules in /node_modules. 8 | COPY docker_entrypoint.sh /entrypoint.sh 9 | RUN chmod +rx /entrypoint.sh 10 | ENV PATH /node_modules/.bin/:$PATH 11 | ENTRYPOINT ["/entrypoint.sh"] 12 | -------------------------------------------------------------------------------- /frontend/docker_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | rm -rf /build/* 4 | yarn 5 | exec "$@" 6 | -------------------------------------------------------------------------------- /frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "jamesandjustine", 3 | "version": "0.1.0", 4 | "author": "jamesob", 5 | "license": "MIT", 6 | "description": "", 7 | "private": true, 8 | "scripts": { 9 | "start": "webpack --config webpack.config.js --watch", 10 | "build": "webpack --config webpack.config.js" 11 | }, 12 | "dependencies": { 13 | "isomorphic-fetch": "^2.2.1", 14 | "preact": "*", 15 | "preact-dom": "*", 16 | "preact-compat": "*" 17 | }, 18 | "devDependencies": { 19 | "@babel/core": "^7.17.2", 20 | "@babel/preset-env": "^7.16.11", 21 | "@babel/preset-react": "^7.16.7", 22 | "babel-loader": "^8.2.3", 23 | "webpack": "^5.68.0", 24 | "webpack-cli": "^4.9.2", 25 | "webpack-dev-server": "^4.7.4", 26 | "webpack-merge": "^4.1.0", 27 | "html-webpack-plugin": "*", 28 | "babel-eslint": "^10.0.3", 29 | "babel-loader": "^8.0.6", 30 | "clean-webpack-plugin": "^3.0.0", 31 | "compression-webpack-plugin": "^3.0.0", 32 | "css-loader": "^3.1.0", 33 | "cssnano": "^4.1.10", 34 | "eslint": "^6.6.0", 35 | "eslint-plugin-react": "^7.16.0", 36 | "json-loader": "^0.5.4", 37 | "jsx-loader": "^0.13.2", 38 | "lodash": "^4.17.15", 39 | "mini-css-extract-plugin": "^0.8.0", 40 | "moment": "^2.25.3", 41 | "optimize-css-assets-webpack-plugin": "^5.0.3", 42 | "postcss-import": "^12.0.1", 43 | "postcss-loader": "^3.0.0", 44 | "postcss-nested": "^4.2.1", 45 | "postcss-preset-env": "^6.7.0", 46 | "postcss-simple-vars": "^5.0.2", 47 | "prop-types": "^15.5.10", 48 | "react-icons": "^3.10.0", 49 | "react-test-renderer": "^16.11.0", 50 | "style-loader": "*", 51 | "luxon": "*" 52 | }, 53 | "babel": { 54 | "presets": [ 55 | "@babel/env", 56 | "@babel/react" 57 | ], 58 | "env": { 59 | "start": { 60 | "presets": [ 61 | "@babel/env", 62 | "@babel/react" 63 | ] 64 | } 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /frontend/src/assets/styles/main.css: -------------------------------------------------------------------------------- 1 | /* @import url('https://fonts.googleapis.com/css2?family=Karla&display=swap'); 2 | */ 3 | 4 | @font-face { 5 | font-family: 'ninepin'; 6 | src: url('/static/ninepin.ttf') format('truetype'); 7 | } 8 | 9 | body { 10 | background-color: #f8f8f8; 11 | padding: 3vw; 12 | } 13 | 14 | #content { 15 | font-family: monospace; 16 | padding-left: 1em; 17 | } 18 | 19 | #bmon-title { 20 | font-size: 3em; 21 | font-family: 'ninepin', monospace; 22 | padding: 0.3em; 23 | border-bottom: 1px solid #222; 24 | border-left: 1px solid #222; 25 | width: max-content; 26 | margin-bottom: 1em; 27 | background-color: #f8f8f8; 28 | } 29 | 30 | .block-connects, .hosts { 31 | display: flex; 32 | flex: 1; 33 | align-items: stretch; 34 | justify-content: left; 35 | overflow-x: scroll; 36 | scrollbar-width: none; 37 | padding-bottom: 17px; 38 | margin-bottom: 3em; 39 | } 40 | 41 | .block-connects::-webkit-scrollbar, .hosts::-webkit-scrollbar { 42 | display: none; 43 | } 44 | 45 | .block-connect, .host { 46 | padding: 0.8em; 47 | border: 1px solid #666; 48 | background-color: #fcf3ec; 49 | filter: drop-shadow(3px 2px 2px black); 50 | margin-right: 1.3em; 51 | } 52 | 53 | .host { 54 | background-color: #f5fbfb; 55 | } 56 | 57 | .block-connect { 58 | padding-right: 1.5rem; 59 | } 60 | 61 | .stats { 62 | width: max-content; 63 | } 64 | 65 | .stats .value table { 66 | font-size: 0.9rem; 67 | } 68 | 69 | .stats .title { 70 | font-weight: bold; 71 | margin-bottom: 0.2rem; 72 | } 73 | 74 | .stat { 75 | margin-bottom: 1em; 76 | } 77 | 78 | .card-title { 79 | font-weight: bold; 80 | font-size: 1.4em; 81 | margin-bottom: 0.8em; 82 | } 83 | 84 | .diffs { 85 | letter-spacing: -1px; 86 | } 87 | 88 | .diffs tr td:first-child { 89 | padding-right: 1em; 90 | } 91 | -------------------------------------------------------------------------------- /frontend/src/assets/styles/style.js: -------------------------------------------------------------------------------- 1 | import './main.css'; 2 | -------------------------------------------------------------------------------- /frontend/src/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | bmon 7 | 8 | 9 | 10 |
11 | 12 | 13 | -------------------------------------------------------------------------------- /frontend/src/index.js: -------------------------------------------------------------------------------- 1 | import { useState, useEffect } from 'preact/hooks'; 2 | import React from 'preact/compat'; 3 | import { render } from 'preact'; 4 | import { DateTime } from 'luxon'; 5 | 6 | import './assets/styles/style'; 7 | import { postData, get_data } from './util'; 8 | 9 | import { map } from 'lodash'; 10 | 11 | const App = (props) => { 12 | var children; 13 | const path = window.location.pathname; 14 | 15 | if (path.match(/.*/)) { 16 | children = ; 17 | } 18 | return ( 19 |
20 |
21 |
22 | bmon 23 |
24 |
25 | 26 |
27 | {children} 28 |
29 |
30 | ); 31 | }; 32 | 33 | export const Home = (props) => { 34 | const [ hosts, sethosts ] = useState(null); 35 | const [ blocks, setblocks ] = useState(null); 36 | 37 | useEffect(() => get_data(`/api/hosts`, (resp) => sethosts(resp)), [0]); 38 | useEffect(() => get_data(`/api/blocks`, (resp) => setblocks(resp)), [0]); 39 | 40 | return <> 41 |
42 | {map(blocks, (b) => )} 43 |
44 | 45 |
46 | {map(hosts, (h) => )} 47 |
48 | ; 49 | }; 50 | 51 | 52 | const CardStat = (props) => ( 53 |
54 |
{props.title}
55 |
{props.children}
56 |
57 | ); 58 | 59 | const Card = (props) => { 60 | return
61 |
62 | {props.title} 63 |
64 | 65 |
66 | {props.children} 67 |
68 |
; 69 | }; 70 | 71 | 72 | const BlockCard = (props) => { 73 | const dt = DateTime.fromISO(props.date); 74 | const mindt = DateTime.fromISO(props.min_dt); 75 | const fmtfloat = (flt) => Number.parseFloat(flt).toFixed(4); 76 | const items = { 77 | "date": <>{dt.toFormat('LLL dd')}
{dt.toFormat('HH:mm:ss.uuu')}, 78 | "saw at": <>{mindt.toFormat('LLL dd')}
{mindt.toFormat('HH:mm:ss.uuu')}, 79 | "saw block diffs": <> 80 | 81 | {map(props.diffs, (diff, host) => 82 | 83 | )} 84 |
{host}{fmtfloat(diff)} sec
85 | , 86 | "stddev": <>± {fmtfloat(props.stddev_got_time)} sec, 87 | }; 88 | 89 | return 90 | {map(items, (v, k) => )} 91 | ; 92 | }; 93 | 94 | 95 | const HostCard = (props) => { 96 | console.log(props); 97 | const items = { 98 | "version": <>{props.bitcoin_version}, 99 | "height": <>{props.chaininfo.blocks}, 100 | "pruned?": <>{props.chaininfo.pruned ? 'yes' : 'no'}, 101 | "peers": <> 102 | 103 | {map(props.peers, (addr, subver) => 104 | 105 | )} 106 |
{addr}{subver}
107 | , 108 | }; 109 | const chain = props.chaininfo.chain; 110 | const net = chain == 'main' ? '' : `(${chain})`; 111 | return 112 | {map(items, (v, k) => )} 113 | ; 114 | }; 115 | 116 | 117 | 118 | render(, document.body); 119 | -------------------------------------------------------------------------------- /frontend/src/util.js: -------------------------------------------------------------------------------- 1 | export function get_data(url, callback) { 2 | return fetch(url) 3 | .then(response => response.json()) 4 | .then(data => { 5 | console.log(`Got data`, data); 6 | callback(data); 7 | }); 8 | } 9 | 10 | /// Copypasted from developer.mozilla.org. 11 | export async function postData(url = '', data = {}) { 12 | // Default options are marked with * 13 | const response = await fetch(url, { 14 | method: 'POST', // *GET, POST, PUT, DELETE, etc. 15 | mode: 'cors', // no-cors, *cors, same-origin 16 | cache: 'no-cache', // *default, no-cache, reload, force-cache, only-if-cached 17 | credentials: 'same-origin', // include, *same-origin, omit 18 | headers: { 19 | 'Content-Type': 'application/json' 20 | // 'Content-Type': 'application/x-www-form-urlencoded', 21 | }, 22 | redirect: 'follow', // manual, *follow, error 23 | referrerPolicy: 'no-referrer', // no-referrer, *no-referrer-when-downgrade, origin, origin-when-cross-origin, same-origin, strict-origin, strict-origin-when-cross-origin, unsafe-url 24 | body: JSON.stringify(data) // body data type must match "Content-Type" header 25 | }); 26 | return response.json(); // parses JSON response into native JavaScript objects 27 | } 28 | -------------------------------------------------------------------------------- /frontend/webpack.config.js: -------------------------------------------------------------------------------- 1 | // 2 | // This is written from the perspective of being run in the `js` container. 3 | // 4 | const OptimizeCssAssetsPlugin = require('optimize-css-assets-webpack-plugin'); 5 | const { CleanWebpackPlugin } = require('clean-webpack-plugin'); 6 | const MiniCssExtractPlugin = require('mini-css-extract-plugin'); 7 | const HtmlWebpackPlugin = require('html-webpack-plugin'); 8 | const merge = require('webpack-merge'); 9 | const webpack = require('webpack'); 10 | const path = require('path'); 11 | 12 | const paths = { 13 | src: path.resolve(__dirname, 'src'), 14 | build: '/build' 15 | } 16 | 17 | const htmlConfig = { 18 | template: path.join(paths.src, 'index.html'), 19 | minify: { 20 | collapseWhitespace: true, 21 | } 22 | } 23 | 24 | const common = { 25 | entry: path.join(paths.src, 'index.js'), 26 | resolve: { 27 | extensions: ['.js', '.jsx', '.ts', '.tsx'], 28 | alias: { 29 | "react": "preact/compat", 30 | "react-dom": "preact/compat" 31 | }, 32 | }, 33 | output: { 34 | path: paths.build, 35 | filename: 'bundle-dev.[hash].js', 36 | // This publicPath is important and shared with Django configuration. 37 | publicPath: '/static/', 38 | }, 39 | performance: { 40 | hints: false, 41 | }, 42 | mode: 'development', 43 | module: { 44 | rules: [ 45 | { 46 | test: /\.(js|jsx)$/, 47 | exclude: /(node_modules)/, 48 | use: { 49 | loader: 'babel-loader', 50 | options: { 51 | presets: ['@babel/env'] 52 | } 53 | } 54 | }, 55 | { 56 | test: /\.(ts)$/, 57 | exclude: /(node_modules)/, 58 | use: { 59 | loader: 'awesome-typescript-loader', 60 | options: { 61 | useCache: false, 62 | } 63 | } 64 | }, 65 | { 66 | test: /\.(css)$/, 67 | use: [ 68 | 'style-loader', 69 | { loader: 'css-loader', options: { importLoaders: 1 } }, 70 | { 71 | loader: 'postcss-loader', 72 | options: { 73 | map: true, plugins: [ 74 | require('postcss-import'), 75 | require('postcss-nested'), 76 | require('cssnano') 77 | ] 78 | } 79 | } 80 | ], 81 | }, 82 | { 83 | test: /\.(png|jpg|gif)$/, 84 | type: 'asset/resource', 85 | }, 86 | { 87 | test: /\.(woff|woff2|eot|ttf|otf)$/i, 88 | type: 'asset/resource', 89 | }, 90 | ] 91 | }, 92 | plugins: [ 93 | new CleanWebpackPlugin(), 94 | new HtmlWebpackPlugin(htmlConfig), 95 | new MiniCssExtractPlugin({ 96 | filename: '[name].css', 97 | chunkFilename: '[id].css', 98 | ignoreOrder: false, 99 | }), 100 | ] 101 | }; 102 | 103 | const devSettings = { 104 | devtool: 'eval-source-map', 105 | devServer: { 106 | historyApiFallback: true, 107 | quiet: false, 108 | contentBase: paths.build, 109 | }, 110 | plugins: [ 111 | new CleanWebpackPlugin({ cleanStaleWebpackAssets: false }), 112 | ] 113 | } 114 | 115 | const prodSettings = { 116 | mode: 'production', 117 | optimization: { 118 | minimize: true, 119 | }, 120 | devtool: false, 121 | output: { 122 | filename: 'bundle-prod.[hash].js', 123 | }, 124 | plugins: [ 125 | new webpack.DefinePlugin({ 'process.env': { 126 | NODE_ENV: JSON.stringify('production') 127 | }}), 128 | new OptimizeCssAssetsPlugin(), 129 | ] 130 | } 131 | 132 | const TARGET = process.env.npm_lifecycle_event; 133 | process.env.BABEL_ENV = TARGET; 134 | 135 | if (TARGET === 'start') { 136 | module.exports = merge(common, devSettings) 137 | } 138 | 139 | if (TARGET === 'build' || !TARGET) { 140 | module.exports = merge(common, prodSettings) 141 | } 142 | -------------------------------------------------------------------------------- /git-hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env zsh 2 | set -e 3 | 4 | if ( git ls-files . --exclude-standard --others | grep bmon/migrations ) ; then 5 | echo "You forgot to commit migrations!" 6 | exit 1 7 | fi 8 | 9 | ./dev test 10 | -------------------------------------------------------------------------------- /images/bmon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaincodelabs/bmon/39a0f8204f48cd9f84a9d181abaf69ab43747bd0/images/bmon.png -------------------------------------------------------------------------------- /images/netmon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaincodelabs/bmon/39a0f8204f48cd9f84a9d181abaf69ab43747bd0/images/netmon.png -------------------------------------------------------------------------------- /images/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaincodelabs/bmon/39a0f8204f48cd9f84a9d181abaf69ab43747bd0/images/screenshot.png -------------------------------------------------------------------------------- /infra/bmon_infra/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .config import Host, get_hosts, get_bitcoind_hosts # noqa: F401 3 | -------------------------------------------------------------------------------- /infra/bmon_infra/getbitcoin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import argparse 6 | import typing as t 7 | from pathlib import Path 8 | from dataclasses import dataclass 9 | 10 | from fscm import run, p 11 | 12 | 13 | CORE_URL = "https://bitcoincore.org/bin" 14 | VERSIONPREFIX = "bitcoin-core-" 15 | 16 | 17 | @dataclass 18 | class Release: 19 | version: str 20 | url: str 21 | sha256: str 22 | 23 | 24 | # Hashes were verified by me with the use of the bitcoin-core 25 | # ./contrib/verifybinaries/verify.py script. 26 | releases = [ 27 | Release( 28 | '24.0rc1', 29 | f'{CORE_URL}/bitcoin-core-24.0/test.rc1/bitcoin-24.0rc1-x86_64-linux-gnu.tar.gz', 30 | 'a05352b7feedeba71f4f124ccaa3d69a85031fab85baa45a1aae74316cd754d7', 31 | ), 32 | Release( 33 | '23.0', 34 | f'{CORE_URL}/bitcoin-core-23.0/bitcoin-23.0-x86_64-linux-gnu.tar.gz', 35 | '2cca490c1f2842884a3c5b0606f179f9f937177da4eadd628e3f7fd7e25d26d0', 36 | ), 37 | Release( 38 | '22.0', 39 | f'{CORE_URL}/bitcoin-core-22.0/bitcoin-22.0-x86_64-linux-gnu.tar.gz', 40 | '59ebd25dd82a51638b7a6bb914586201e67db67b919b2a1ff08925a7936d1b16', 41 | ), 42 | Release( 43 | '0.18.1', 44 | f'{CORE_URL}/bitcoin-core-0.18.1/bitcoin-0.18.1-x86_64-linux-gnu.tar.gz', 45 | '600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a', 46 | ), 47 | Release( 48 | '0.10.3', 49 | f'{CORE_URL}/bitcoin-core-0.10.3/bitcoin-0.10.3-linux64.tar.gz', 50 | '586eb5576f71cd1ad2a42a26f67afc87deffc51d9f75348e2c7e96b1a401e23d', 51 | ), 52 | ] 53 | version_to_release = {r.version: r for r in releases} 54 | 55 | 56 | def download_bitcoind(release: Release, dest: t.Optional[Path] = None): 57 | """Download and extract bitcoin binaries into local_dir.""" 58 | dest = dest or Path.cwd() 59 | os.chdir('/tmp') 60 | filename = release.url.split('/')[-1] 61 | 62 | if not Path(filename).exists(): 63 | run(f'wget {release.url}').assert_ok() 64 | 65 | hash = run(f'sha256sum {filename}').assert_ok() 66 | 67 | if (got_hash := hash.stdout.split()[0]) != release.sha256: 68 | raise RuntimeError( 69 | f"incorrect hash found for {filename}: {got_hash} " 70 | f"(expected {release.sha256})") 71 | 72 | run(f'tar xvf {filename}').assert_ok() 73 | dirname = 'bitcoin-' + filename.lstrip('bitcoin-').split('-')[0] 74 | if not dest.exists(): 75 | p(dest).mkdir() 76 | run(f'mv {dirname}/bin/* {dest}').assert_ok() 77 | 78 | 79 | def main(): 80 | parser = argparse.ArgumentParser() 81 | parser.add_argument('version') 82 | parser.add_argument( 83 | '--dest', '-d', help="Directory to place the downloaded binaries into", default=None) 84 | args = parser.parse_args() 85 | 86 | if args.version not in version_to_release: 87 | print('Unrecognized version. Options are {", ".join(version_to_release.keys())}') 88 | sys.exit(1) 89 | 90 | dest = Path(args.dest) if args.dest else None 91 | download_bitcoind(version_to_release[args.version], dest) 92 | 93 | 94 | if __name__ == "__main__": 95 | main() 96 | -------------------------------------------------------------------------------- /infra/hosts_dev.yml: -------------------------------------------------------------------------------- 1 | wireguard: 2 | 3 | hosts: 4 | 5 | bmon: 6 | tags: [server] 7 | ssh_hostname: bmon.dev.local 8 | wireguard: 9 | wg-bmon: 10 | ip: 10.33.0.2 11 | endpoint: 10.8.1.1 12 | pubkey: +Bxc6NA3doW9lyKqoVybzsRZ3upH8jQpVA2M5myoUHE= 13 | 14 | bitcoind: 15 | tags: [bitcoind] 16 | wireguard: 17 | wg-bmon: 18 | ip: bitcoind 19 | endpoint: 10.8.1.1 20 | pubkey: /Qzuh/N11tPhzjCmq9+H+OzbeHnDLvniCPgxc+5R8l8= 21 | bitcoin: 22 | docker_tag: jamesob/bitcoind:v24.0rc2 23 | 24 | bitcoind-02: 25 | tags: [bitcoind] 26 | wireguard: 27 | wg-bmon: 28 | ip: bitcoind-02 29 | endpoint: 10.8.1.1 30 | pubkey: /Qzuh/N11tPhzjCmq9+H+OzbeHnDLvniCPgxc+5R8l8= 31 | bitcoin: 32 | docker_tag: jamesob/bitcoind:master 33 | -------------------------------------------------------------------------------- /infra/hosts_prod.yml: -------------------------------------------------------------------------------- 1 | wireguard: 2 | 3 | wg-bmon: 4 | cidr: 10.33.0.1/22 5 | port: 51822 6 | interfaces: [enp2s0, enp3s0] 7 | pubkey: VyTZTJM5LSOwj7b0cYefPjQp/NWcyZ6euqcWqtk1KDI= 8 | host: apu2 9 | # external_peers: 10 | # b1.slug: Q3ZHherioK0EY6Rd7O9B1RooHtOJHd6JhsWPTeh/JQ4=, 10.33.0.10 11 | 12 | hosts: 13 | 14 | bmon: 15 | tags: [server] 16 | ssh_hostname: 10.33.0.2 17 | check_host_keys: accept 18 | wireguard: 19 | wg-bmon: 20 | ip: 10.33.0.2 21 | endpoint: 10.8.1.1 22 | pubkey: +Bxc6NA3doW9lyKqoVybzsRZ3upH8jQpVA2M5myoUHE= 23 | 24 | bitcoin-01: 25 | tags: [bitcoind] 26 | ssh_hostname: 10.33.0.3 27 | check_host_keys: accept 28 | # outbound_wireguard: wg-switzerland-01 29 | wireguard: 30 | wg-bmon: 31 | ip: 10.33.0.3 32 | a: bitcoin-0.james.bmon.info 33 | endpoint: 10.8.1.1 34 | pubkey: /Qzuh/N11tPhzjCmq9+H+OzbeHnDLvniCPgxc+5R8l8= 35 | bitcoin: 36 | docker_tag: jamesob/bitcoind:23.0 37 | 38 | # Down due to harddrive failure 39 | # 40 | # b-01.slug: 41 | # tags: [bitcoind, vagrant] 42 | # ssh_hostname: 10.33.0.20 43 | # username: vagrant 44 | # check_host_keys: accept 45 | # wireguard: 46 | # wg-bmon: 47 | # ip: 10.33.0.20 48 | # endpoint: 10.8.1.1 49 | # a: b-01.slug.james.bmon.info 50 | # pubkey: VzvkMZx+ZpzCmZvgFECohT1pa2QyC40qp7dHFaEzGn8= 51 | # bitcoin: 52 | # prune: 550 53 | # dbcache: 1200 54 | # docker_tag: jamesob/bitcoind:22.0 55 | 56 | # b-02.slug: 57 | # tags: [bitcoind, vagrant] 58 | # ssh_hostname: 10.33.0.21 59 | # check_host_keys: accept 60 | # username: vagrant 61 | # wireguard: 62 | # wg-bmon: 63 | # ip: 10.33.0.21 64 | # endpoint: 10.8.1.1 65 | # a: b-02.slug.james.bmon.info 66 | # pubkey: t/jLtFWsGaEy/RGp5ehcY7cVIdsKW8d1iJzTTUBmIxU= 67 | # bitcoin: 68 | # prune: 550 69 | # dbcache: 1200 70 | # docker_tag: jamesob/bitcoind:2023-05-parallel-block-downloads 71 | 72 | # b-03.slug: 73 | # tags: [bitcoind, vagrant] 74 | # ssh_hostname: 10.33.0.22 75 | # check_host_keys: accept 76 | # # outbound_wireguard: wg-australia-01 77 | # username: vagrant 78 | # wireguard: 79 | # wg-bmon: 80 | # ip: 10.33.0.22 81 | # endpoint: 10.8.1.1 82 | # a: b-03.slug.james.bmon.info 83 | # pubkey: FCuA6MLGa5PuNX95ZKfjo9WodcI2r3u2792wy6gjxSw= 84 | # bitcoin: 85 | # prune: 550 86 | # dbcache: 1200 87 | # docker_tag: jamesob/bitcoind:0.19.1 88 | 89 | # b-04.slug: 90 | # tags: [bitcoind, vagrant] 91 | # ssh_hostname: 10.33.0.23 92 | # check_host_keys: accept 93 | # # outbound_wireguard: wg-japan-01 94 | # username: vagrant 95 | # wireguard: 96 | # wg-bmon: 97 | # ip: 10.33.0.23 98 | # endpoint: 10.8.1.1 99 | # a: b-04.slug.james.bmon.info 100 | # pubkey: L3DN/gKgDT9gF2qrvcybjV09MC9G4zT7u1Lsa34dTR4= 101 | # bitcoin: 102 | # prune: 550 103 | # dbcache: 1200 104 | # docker_tag: jamesob/bitcoind:v25.0rc2 105 | 106 | ssd-1.ccl: 107 | tags: [bitcoind] 108 | ssh_hostname: ssd-1.ccl.bmon.j.co 109 | check_host_keys: accept 110 | username: ccl 111 | wireguard: 112 | wg-bmon: 113 | ip: 10.33.0.50 114 | endpoint: au92.org 115 | pubkey: 17Zj7t2SD9Yy9BON350GgohO+uJ+84fzO5AtARU+yQE= 116 | bitcoin: 117 | prune: 5000 118 | dbcache: 3000 119 | docker_tag: jamesob/bitcoind:master 120 | 121 | ssd-2.ccl: 122 | tags: [bitcoind] 123 | ssh_hostname: ssd-2.ccl.bmon.j.co 124 | check_host_keys: accept 125 | username: ccl 126 | wireguard: 127 | wg-bmon: 128 | ip: 10.33.0.51 129 | endpoint: au92.org 130 | pubkey: 6XVsUu/fhRSqZ5nlysVg+3Sobl/Lx88ej0v02QRhGRY= 131 | bitcoin: 132 | prune: 5000 133 | dbcache: 3000 134 | docker_tag: jamesob/bitcoind:744157ef1a0b61ceb714cc27c9ae158907aecdc9 135 | extra_args: -v2transport=1 136 | listen: 1 137 | 138 | # Google hosts taken down because they are unreliable. 139 | # b-milan-goog-01: 140 | # tags: [bitcoind] 141 | # ssh_hostname: 34.154.73.245 142 | # become_method: sudo 143 | # check_host_keys: accept 144 | # username: james 145 | # wireguard: 146 | # wg-bmon: 147 | # ip: 10.33.0.141 148 | # endpoint: au92.org 149 | # pubkey: 150 | # bitcoin: 151 | # prune: 5000 152 | # dbcache: 3000 153 | # docker_tag: jamesob/bitcoind:0.19.1 154 | 155 | # b-paris-goog-01: 156 | # tags: [bitcoind] 157 | # ssh_hostname: 34.163.100.36 158 | # become_method: sudo 159 | # check_host_keys: accept 160 | # username: james 161 | # wireguard: 162 | # wg-bmon: 163 | # ip: 10.33.0.140 164 | # endpoint: au92.org 165 | # pubkey: 166 | # bitcoin: 167 | # prune: 5000 168 | # dbcache: 3000 169 | # docker_tag: jamesob/bitcoind:0.19.1 170 | -------------------------------------------------------------------------------- /infra/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "setuptools-scm"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "bmon-infra" 7 | description = "Infra for bmon" 8 | requires-python = ">=3.9" 9 | keywords = ["bitcoin"] 10 | license = {text = "MIT License"} 11 | classifiers = [ 12 | "Programming Language :: Python :: 3", 13 | ] 14 | dependencies = [ 15 | 'importlib-metadata; python_version<"3.10"', 16 | 'mitogen @ git+https://git@github.com/jamesob/mitogen.git', 17 | 'fscm @ git+https://git@github.com/jamesob/fscm.git', 18 | 'clii >= 1.0.0', 19 | 'pyyaml', 20 | ] 21 | version = "0.0.1" 22 | 23 | [project.scripts] 24 | bmon-config = "bmon_infra.config:main" 25 | bmon-infra = "bmon_infra.infra:main" 26 | 27 | [tool.setuptools] 28 | packages = ["bmon_infra"] 29 | -------------------------------------------------------------------------------- /manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | """Run administrative tasks.""" 9 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bmon.settings') 10 | try: 11 | from django.core.management import execute_from_command_line 12 | except ImportError as exc: 13 | raise ImportError( 14 | "Couldn't import Django. Are you sure it's installed and " 15 | "available on your PYTHONPATH environment variable? Did you " 16 | "forget to activate a virtual environment?" 17 | ) from exc 18 | execute_from_command_line(sys.argv) 19 | 20 | 21 | if __name__ == '__main__': 22 | main() 23 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "setuptools-scm"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "bmon" 7 | description = "A high-level monitoring system for Bitcoin Core" 8 | readme = "README.rst" 9 | requires-python = ">=3.10" 10 | keywords = ["bitcoin"] 11 | license = {text = "MIT License"} 12 | classifiers = [ 13 | "Programming Language :: Python :: 3", 14 | ] 15 | dependencies = [ 16 | 'importlib-metadata; python_version<"3.10"', 17 | 'psycopg2', # for postgresql support 18 | 'django', 19 | 'redis[hiredis]', 20 | 'huey @ git+https://github.com/jamesob/huey.git', 21 | 'walrus', 22 | 'django-ninja', 23 | 'whitenoise', 24 | 'fastavro', 25 | 'clii', 26 | 'google-cloud-storage', 27 | 'prometheus-client', 28 | 'sentry-sdk', 29 | ] 30 | version = "0.0.1" 31 | 32 | [project.scripts] 33 | bmon-watch-bitcoind-logs = "bmon.bitcoind_tasks:watch_bitcoind_logs" 34 | bmon-util = "bmon.util_cli:main" 35 | bmon-bitcoind-monitor = "bmon.bitcoind_monitor:main" 36 | bmon-server-monitor = "bmon.server_monitor:main" 37 | 38 | 39 | [project.optional-dependencies] 40 | tests = [ 41 | 'pytest', 42 | 'pytest-django', 43 | 'django-stubs', 44 | 'mypy', 45 | 'flake8', 46 | 'types-redis', 47 | ] 48 | 49 | [tool.setuptools] 50 | packages = ["bmon"] 51 | 52 | [tool.mypy] 53 | plugins = ["mypy_django_plugin.main"] 54 | exclude = [ 55 | "migrations/*", 56 | "bitcoin/rpc.py", 57 | ] 58 | 59 | [[tool.mypy.overrides]] 60 | module = [ 61 | 'walrus', 62 | 'huey', 63 | 'pygments', 64 | 'pygments.*', 65 | 'sqlparse', 66 | 'clii', 67 | 'google.*', 68 | 'bmon_infra', 69 | 'fscm.*', 70 | ] 71 | ignore_missing_imports = true 72 | 73 | [tool.pylsp-mypy] 74 | enabled = true 75 | live_mode = true 76 | strict = false 77 | 78 | [tool.django-stubs] 79 | django_settings_module = "bmon.settings" 80 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | 3 | DJANGO_SETTINGS_MODULE = bmon.settings_test 4 | python_files = tests.py test_*.py *_tests.py 5 | --------------------------------------------------------------------------------