├── .cirrus.yml ├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ ├── docker-push.yml │ ├── docker-tag.yml │ ├── nigiri-infra.yml │ └── pytest.yml ├── .gitignore ├── .pre-commit-config.yaml ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── README.md ├── pyproject.toml ├── requirements.txt ├── setup.py ├── src └── cryptoadvance │ └── spectrum │ ├── __init__.py │ ├── __main__.py │ ├── cli │ ├── __init__.py │ └── cli_server.py │ ├── config.py │ ├── db.py │ ├── elsock.py │ ├── server.py │ ├── server_endpoints │ ├── __init__.py │ ├── core_api.py │ └── healthz.py │ ├── spectrum.py │ ├── spectrum_error.py │ ├── util.py │ └── util_specter.py ├── tests ├── conftest.py ├── fix_infrastructure.py ├── fix_keys_and_seeds_embit.py ├── install_noded.sh ├── integration │ ├── basics.py │ ├── elsock_test.py │ ├── spectrum_test.py │ └── wallet_import_rescan.py ├── test_bdk.py ├── test_config.py ├── test_elsock.py ├── test_se_healthz.py ├── test_spectrum.py ├── test_spectrum_rpc.py └── test_util.py └── utils ├── compile_and_run_electrs.py └── release.sh /.cirrus.yml: -------------------------------------------------------------------------------- 1 | container: 2 | # image: python:slim 3 | # image: ubuntu:focal 4 | # image: python:3.8-buster 5 | image: registry.gitlab.com/cryptoadvance/specter-desktop/cirrus-focal:20210831 6 | 7 | # We assume here that we're having a proper python3 system including virtualenv and pip 8 | prep_stuff_template: &PREP_STUFF_TEMPLATE 9 | bitcoind_installation_cache: 10 | folder: ./tests/bitcoin 11 | fingerprint_script: 12 | - cat tests/bitcoin_gitrev_pinned 2> /dev/null || true 13 | - cat /etc/os-release | grep VERSION 14 | populate_script: ./tests/install_noded.sh --debug --bitcoin binary 15 | verify_script: 16 | - echo " --> Version of python, virtualenv and pip3" 17 | - python3 --version && virtualenv --version && pip3 --version 18 | - echo " --> Executables in tests/bitcoin/src" 19 | - find tests/bitcoin/src -maxdepth 1 -type f -executable -exec ls -ld {} \; || true 20 | - echo " --> Executables in tests/bitcoin/bin" 21 | - find tests/bitcoin/bin -maxdepth 1 -type f -executable -exec ls -ld {} \; || true 22 | - echo " --> bitcoind version" 23 | - tests/bitcoin/src/bitcoind -version | head -1 || true 24 | - tests/bitcoin/bin/bitcoind -version | head -1 || true 25 | 26 | 27 | pip_script: 28 | #folder: /tmp/cirrus-ci-build/.env 29 | #fingerprint_script: echo muh && cat requirements.txt && cat test_requirements.txt 30 | #populate_script: 31 | - virtualenv --python=python .env 32 | - source ./.env/bin/activate 33 | - pip3 install -r requirements.txt --require-hashes && pip3 install -r test_requirements.txt 34 | install_script: 35 | - source ./.env/bin/activate 36 | - pip3 install -e . 37 | 38 | test_task: 39 | pre_prep_script: 40 | - apt-get update && apt-get install -y --no-install-recommends python3-dev python3-pip wget 41 | << : *PREP_STUFF_TEMPLATE 42 | test_script: 43 | - source ./.env/bin/activate 44 | - echo $PATH 45 | - pytest --cov=cryptoadvance --junitxml=./testresults.xml 46 | always: 47 | junit_artifacts: 48 | path: "./testresults.xml" 49 | format: junit 50 | 51 | 52 | 53 | enhanced_test_task: 54 | pre_prep_script: 55 | - apt-get update && apt-get install -y --no-install-recommends python3-dev python3-pip wget 56 | bitcoind_installation_cache: 57 | folder: ./tests/bitcoin 58 | fingerprint_script: 59 | - cat tests/bitcoin_gitrev_pinned 2> /dev/null || true 60 | - cat /etc/os-release | grep VERSION 61 | populate_script: ./tests/install_noded.sh --debug --bitcoin compile 62 | verify_script: 63 | - echo " --> Version of python, virtualenv and pip3" 64 | - python3 --version && virtualenv --version && pip3 --version 65 | - echo " --> Executables in tests/bitcoin/src" 66 | - find tests/bitcoin/src -maxdepth 1 -type f -executable -exec ls -ld {} \; || true 67 | - echo " --> Executables in tests/bitcoin/bin" 68 | - find tests/bitcoin/bin -maxdepth 1 -type f -executable -exec ls -ld {} \; || true 69 | - echo " --> bitcoind version" 70 | - tests/bitcoin/src/bitcoind -version | head -1 || true 71 | - tests/bitcoin/bin/bitcoind -version | head -1 || true 72 | 73 | pip_script: 74 | #folder: /tmp/cirrus-ci-build/.env 75 | #fingerprint_script: echo muh && cat requirements.txt && cat test_requirements.txt 76 | #populate_script: 77 | - virtualenv --python=python .env 78 | - source ./.env/bin/activate 79 | - pip3 install -r requirements.txt --require-hashes && pip3 install -r test_requirements.txt 80 | install_script: 81 | - source ./.env/bin/activate 82 | - pip3 install -e . 83 | test_script: 84 | - source ./.env/bin/activate 85 | - echo $PATH 86 | #- pip3 install -e . 87 | - pytest tests/traffic_gen.py 88 | always: 89 | junit_artifacts: 90 | path: "./testresults.xml" 91 | format: junit -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .gitignore -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Start it like '...' 16 | 2. use software or curl or bitcoin-cli like '....' 17 | 3. See the result and/or the logs 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | 23 | **Desktop (please complete the following information):** 24 | - which version do you use 25 | - What's your backend? 26 | - Which Database do you use? 27 | 28 | **Additional context** 29 | Add any other context about the problem here. 30 | -------------------------------------------------------------------------------- /.github/workflows/docker-push.yml: -------------------------------------------------------------------------------- 1 | name: Build Docker container on push 2 | 3 | on: 4 | push: 5 | branches: 6 | - "*" 7 | 8 | jobs: 9 | build: 10 | name: Build image 11 | runs-on: ubuntu-20.04 12 | 13 | steps: 14 | - name: Checkout project 15 | uses: actions/checkout@v2 16 | 17 | - name: Set env variables 18 | run: | 19 | echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g')" >> $GITHUB_ENV 20 | REPO_OWNER=${{ github.repository_owner }} 21 | echo "IMAGE_NAME=${REPO_OWNER,,}/${GITHUB_REPOSITORY#*/}" >> $GITHUB_ENV 22 | 23 | - name: Login to GitHub Container Registry 24 | uses: docker/login-action@v1 25 | with: 26 | registry: ghcr.io 27 | username: ${{ github.repository_owner }} 28 | password: ${{ secrets.GITHUB_TOKEN }} 29 | 30 | - name: Set up QEMU 31 | uses: docker/setup-qemu-action@v1 32 | id: qemu 33 | 34 | - name: Setup Docker buildx action 35 | uses: docker/setup-buildx-action@v1 36 | id: buildx 37 | 38 | - name: Run Docker buildx 39 | run: | 40 | docker buildx build \ 41 | --platform linux/amd64,linux/arm64 \ 42 | --tag ghcr.io/$IMAGE_NAME:$BRANCH \ 43 | --output "type=registry" ./ 44 | -------------------------------------------------------------------------------- /.github/workflows/docker-tag.yml: -------------------------------------------------------------------------------- 1 | name: Build Docker container on tag 2 | 3 | on: 4 | push: 5 | tags: 6 | - v[0-9]+.[0-9]+.[0-9]+ 7 | - v[0-9]+.[0-9]+.[0-9]+-* 8 | 9 | jobs: 10 | build: 11 | name: Build image 12 | runs-on: ubuntu-20.04 13 | 14 | steps: 15 | - name: Checkout project 16 | uses: actions/checkout@v2 17 | 18 | - name: Set env variables 19 | run: | 20 | echo "TAG=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV 21 | REPO_OWNER=${{ github.repository_owner }} 22 | echo "IMAGE_NAME=${REPO_OWNER,,}/${GITHUB_REPOSITORY#*/}" >> $GITHUB_ENV 23 | 24 | - name: Login to GitHub Container Registry 25 | uses: docker/login-action@v1 26 | with: 27 | registry: ghcr.io 28 | username: ${{ github.repository_owner }} 29 | password: ${{ secrets.GITHUB_TOKEN }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v1 33 | 34 | - name: Setup Docker buildx action 35 | uses: docker/setup-buildx-action@v1 36 | 37 | - name: Run Docker buildx 38 | run: | 39 | docker buildx build \ 40 | --platform linux/amd64,linux/arm64 \ 41 | --tag ghcr.io/$IMAGE_NAME:$TAG \ 42 | --output "type=registry" ./ 43 | -------------------------------------------------------------------------------- /.github/workflows/nigiri-infra.yml: -------------------------------------------------------------------------------- 1 | name: Nigiri pipeline 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | 9 | jobs: 10 | integration: 11 | name: Integration Tests 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: ["3.10"] 16 | 17 | steps: 18 | - name: Check out repository code 19 | uses: actions/checkout@v3 20 | - name: Set up Python ${{ matrix.python-version }} 21 | uses: actions/setup-python@v3 22 | with: 23 | python-version: ${{ matrix.python-version }} 24 | - name: Upgrade pip 25 | run: | 26 | python3 -m pip install --upgrade pip 27 | - name: Install dependencies 28 | run: | 29 | pip3 install -e . 30 | pip3 install ".[test]" 31 | - name: Run Nigiri 32 | uses: vulpemventures/nigiri-github-action@v1 33 | with: 34 | use_liquid: false 35 | - name: Run integration tests 36 | run: | 37 | pytest tests/integration/basics.py 38 | pytest tests/integration/spectrum_test.py 39 | pytest tests/integration/elsock_test.py 40 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | name: Run Python tests 2 | 3 | on: 4 | push: 5 | pull_request: 6 | branches: [master] 7 | 8 | jobs: 9 | build: 10 | name: Run tests 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: ["3.10"] 15 | 16 | steps: 17 | # This action provides the following functionality for GitHub Actions users: 18 | # * Installing a version of Python or PyPy and (by default) adding it to the PATH 19 | # * Optionally caching dependencies for pip, pipenv and poetry 20 | # * Registering problem matchers for error output 21 | - uses: actions/checkout@v3 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v3 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | pip3 install -e . 29 | pip3 install ".[test]" 30 | - name: pytest 31 | run: | 32 | pytest -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | config.toml 3 | utils/db/ 4 | utils/electrs-0.9.9/ 5 | utils/electrs.toml 6 | utils/electrs.tar.gz 7 | 8 | tests/bitcoin* 9 | 10 | # Byte-compiled / optimized / DLL files 11 | __pycache__/ 12 | *.py[cod] 13 | *$py.class 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Distribution / packaging 19 | .Python 20 | build/ 21 | develop-eggs/ 22 | dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | wheels/ 32 | pip-wheel-metadata/ 33 | share/python-wheels/ 34 | *.egg-info/ 35 | .installed.cfg 36 | *.egg 37 | MANIFEST 38 | 39 | # PyInstaller 40 | # Usually these files are written by a python script from a template 41 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 42 | *.manifest 43 | *.spec 44 | 45 | # Installer logs 46 | pip-log.txt 47 | pip-delete-this-directory.txt 48 | 49 | # Unit test / coverage reports 50 | htmlcov/ 51 | .tox/ 52 | .nox/ 53 | .coverage 54 | .coverage.* 55 | .cache 56 | nosetests.xml 57 | coverage.xml 58 | *.cover 59 | *.py,cover 60 | .hypothesis/ 61 | .pytest_cache/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | db.sqlite3-journal 72 | 73 | # Flask stuff: 74 | instance/ 75 | .webassets-cache 76 | 77 | # Scrapy stuff: 78 | .scrapy 79 | 80 | # Sphinx documentation 81 | docs/_build/ 82 | 83 | # PyBuilder 84 | target/ 85 | 86 | # Jupyter Notebook 87 | .ipynb_checkpoints 88 | 89 | # IPython 90 | profile_default/ 91 | ipython_config.py 92 | 93 | # pyenv 94 | .python-version 95 | 96 | # pipenv 97 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 98 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 99 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 100 | # install all needed dependencies. 101 | #Pipfile.lock 102 | 103 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 104 | __pypackages__/ 105 | 106 | # Celery stuff 107 | celerybeat-schedule 108 | celerybeat.pid 109 | 110 | # SageMath parsed files 111 | *.sage.py 112 | 113 | # Environments 114 | .env 115 | .venv 116 | env/ 117 | venv/ 118 | ENV/ 119 | env.bak/ 120 | venv.bak/ 121 | 122 | # Spyder project settings 123 | .spyderproject 124 | .spyproject 125 | 126 | # Rope project settings 127 | .ropeproject 128 | 129 | # mkdocs documentation 130 | /site 131 | 132 | # mypy 133 | .mypy_cache/ 134 | .dmypy.json 135 | dmypy.json 136 | 137 | # Pyre type checker 138 | .pyre/ 139 | 140 | # MacOS 141 | .DS_Store 142 | src/cryptoadvance/spectrum/_version.py 143 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: 22.3.0 4 | hooks: 5 | - id: black 6 | language_version: python3.8 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim-bullseye 2 | 3 | RUN pip install --upgrade pip 4 | 5 | WORKDIR /usr/src/app 6 | ENV PYTHONUNBUFFERED 1 7 | EXPOSE 8081 8 | 9 | COPY . . 10 | RUN mkdir /home/.ssh 11 | 12 | RUN pip3 install -r requirements.txt && pip3 install -e . 13 | CMD [ "python3", "-m", "cryptoadvance.spectrum", "server", "--config", "cryptoadvance.spectrum.config.ProductionConfig", "--host", "0.0.0.0"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 specter.solutions 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include src/cryptoadvance/specterext/spectrum/templates * 2 | recursive-include src/cryptoadvance/specterext/spectrum/static * 3 | recursive-include src/cryptoadvance/specterext/spectrum/*/LC_MESSAGES *.mo 4 | recursive-include src/cryptoadvance/specterext/spectrum/translations/*/LC_MESSAGES *.po 5 | include requirements.txt -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Spectrum - Specter Desktop + Electrum 2 | 3 | This is a electrum-adapter. It exposes a Bitcoin-Core style API while using an electron API in the backend. It might be useful in specific usecases, e.g. having better performance when connecting to a electrum-server via Tor. In order to do that, it needs a Database. Quite easily you can use a kind of builtin SQLite. Depending on your usecase, you might want to use an external DB. 4 | 5 | ## Modes of usage 6 | 7 | This can be used either in standalone mode or as a specter-extension. The second option is probably the main use-case. 8 | 9 | ## Standalone 10 | 11 | Get this to work with something like that: 12 | ``` 13 | python3 --version # Make sure you have at least 3.8. Might also work with lower versions though 14 | virtualenv --python=python3 .env 15 | . ./.env/bin/activate 16 | pip3 install -e . 17 | 18 | # If you have a electrum server running on localhost: 19 | python3 -m cryptoadvance.spectrum server --config cryptoadvance.spectrum.config.NigiriLocalElectrumLiteConfig 20 | 21 | # If you want to run on mainnet and use emzy's Server 22 | python3 -m cryptoadvance.spectrum server --config cryptoadvance.spectrum.config.EmzyElectrumLiteConfig 23 | 24 | # Using Emzy's server but with a postgres 25 | export DB_USERNAME=bla 26 | export DB_PASSWORD=blub 27 | python3 -m cryptoadvance.spectrum server --config cryptoadvance.spectrum.config.EmzyElectrumPostgresConfig 28 | ``` 29 | 30 | Check the `config.py` for the env-vars which need to be exported in order to connect to something different than localhost. 31 | 32 | ## Specter Extension 33 | 34 | In order to get a development environment: 35 | ``` 36 | virtualenv --python=python3 .env 37 | . ./.env/bin/activate 38 | pip3 install -e . 39 | pip3 install cryptoadvance.specter 40 | python3 -m cryptoadvance.specter server --config DevelopmentConfig --debug 41 | ``` 42 | 43 | 44 | ## TODO: 45 | 46 | - refill keypool when address is used or new address is requested 47 | - flask `debug=True` flag creates two electrum sockets that notify twice - this causes duplications in the database 48 | - reconnect with electrum on disconnect 49 | - add support for credentials / cookie file for RPC calls 50 | 51 | 52 | ## Run the Tests 53 | 54 | ```sh 55 | pip3 install -e ".[test]" 56 | pytest 57 | ``` 58 | 59 | ## Development 60 | 61 | Before your create a PR, make sure to [blackify](https://github.com/psf/black) all your changes. In order to automate that, 62 | there is a git [pre-commit hook](https://ljvmiranda921.github.io/notebook/2018/06/21/precommits-using-black-and-flake8/) which you can simply install like this: 63 | ``` 64 | pre-commit install 65 | ``` 66 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=45", "setuptools_scm[toml]>=6.2", "babel" 4 | ] 5 | build-backend = "setuptools.build_meta" 6 | 7 | [project] 8 | name = "cryptoadvance.spectrum" 9 | 10 | authors = [ 11 | { name="Stepan Snigirev"}, 12 | { name="k9ert"}, 13 | ] 14 | description = "Implements A Bitcoin Core API which querying an Electrum" 15 | 16 | urls = { Homepage = "https://github.com/cryptoadvance/spectrum" } 17 | readme = "README.md" 18 | license = {file = "LICENSE"} 19 | 20 | 21 | requires-python = ">=3.10" 22 | 23 | classifiers =[ 24 | 'Programming Language :: Python :: 3', 25 | 'License :: OSI Approved :: MIT License', 26 | 'Operating System :: OS Independent', 27 | 'Framework :: Flask', 28 | ] 29 | dynamic=["dependencies","version"] 30 | 31 | [tool.setuptools.dynamic] 32 | dependencies = {file = ["requirements.txt"]} 33 | 34 | [tool.setuptools_scm] 35 | write_to = "src/cryptoadvance/spectrum/_version.py" 36 | 37 | [tool.pytest.ini_options] 38 | norecursedirs = "tests/bintegration/*" 39 | log_format = "[%(levelname)8s] %(message)s %(name)s (%(filename)s:%(lineno)s)" 40 | markers = [ 41 | "slow: mark test as slow.", 42 | "elm: mark test as elementsd dependent", 43 | ] 44 | python_files = "tests/test*.py" 45 | 46 | filterwarnings = [ 47 | "ignore::DeprecationWarning:bitbox02[.*]" 48 | ] 49 | 50 | [project.optional-dependencies] 51 | test = [ 52 | "pytest >=7.1.3", 53 | "pytest-cov[all]", 54 | "mock", 55 | "black", 56 | "pre-commit", 57 | "bdkpython" 58 | ] -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | embit>=0.6.1 2 | Flask>=2.1.1 3 | Flask-SQLAlchemy==2.5.1 4 | sqlalchemy==2.0.30 5 | psycopg2-binary 6 | requests>=2.26.0 7 | pysocks==1.7.1 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_namespace_packages 2 | 3 | setup( 4 | packages=find_namespace_packages("src", include=["cryptoadvance.*"]), 5 | package_dir={"": "src"}, 6 | package_data={}, 7 | # take METADATA.in into account, include that stuff as well (static/templates) 8 | include_package_data=True, 9 | ) 10 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cryptoadvance/spectrum/81cf99ff5380c2e6b6fadd8f68b51036b0eb5144/src/cryptoadvance/spectrum/__init__.py -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/__main__.py: -------------------------------------------------------------------------------- 1 | from .cli import entry_point 2 | import logging 3 | 4 | if __name__ == "__main__": 5 | entry_point() 6 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/cli/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import time 5 | from logging.config import dictConfig 6 | 7 | import click 8 | 9 | from .cli_server import server 10 | 11 | 12 | @click.group() 13 | @click.option("--debug", is_flag=True, help="Show debug information on errors.") 14 | def entry_point(debug): 15 | setup_logging(debug) 16 | 17 | 18 | entry_point.add_command(server) 19 | 20 | 21 | def setup_logging(debug=False): 22 | """central and early configuring of logging see 23 | https://flask.palletsprojects.com/en/1.1.x/logging/#basic-configuration 24 | However the dictConfig doesn't work, so let's do something similiar programatically 25 | """ 26 | ch = logging.StreamHandler() 27 | ch.setLevel(logging.DEBUG) 28 | logger = logging.getLogger("cryptoadvance") 29 | if debug: 30 | formatter = logging.Formatter("[%(levelname)7s] in %(module)15s: %(message)s") 31 | logger.setLevel(logging.DEBUG) 32 | # but not that chatty connectionpool 33 | logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO) 34 | ch.setFormatter(formatter) 35 | logger.debug("RUNNING IN DEBUG MODE") 36 | else: 37 | formatter = logging.Formatter( 38 | # Too early to format that via the flask-config, so let's copy it from there: 39 | os.getenv( 40 | "SPECTERCM_LOGFORMAT", 41 | "[%(asctime)s] %(levelname)s in %(module)s: %(message)s", 42 | ) 43 | ) 44 | logger.setLevel(logging.INFO) 45 | ch.setFormatter(formatter) 46 | 47 | for logger in [ 48 | # app.logger, 49 | # logging.getLogger('sqlalchemy'), 50 | ]: 51 | logger.setLevel(logging.DEBUG) 52 | 53 | logging.getLogger().handlers = [] 54 | logging.getLogger().addHandler(ch) 55 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/cli/cli_server.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from ..server import create_app, init_app 4 | import click 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | @click.group() 10 | def cli(): 11 | pass 12 | 13 | 14 | @cli.command() 15 | @click.option("--port", default="5000") 16 | # set to 0.0.0.0 to make it available outside 17 | @click.option("--host", default="127.0.0.1") 18 | # for https: 19 | @click.option("--cert") 20 | @click.option("--key") 21 | @click.option( 22 | "--config", 23 | default="cryptoadvance.spectrum.config.LocalElectrumConfig", 24 | help="A class which sets reasonable default values.", 25 | ) 26 | def server(port, host, cert, key, config): 27 | # a hack as the pass_context is broken for some reason 28 | # we determine debug from what is set in the entry_point via the debug-level. 29 | debug = logger.isEnabledFor(logging.DEBUG) 30 | logger.info(f"DEBUG is {debug}") 31 | 32 | app = create_app(config) 33 | init_app(app) 34 | logger.info("Starting up ...") 35 | app.run(debug=debug, port=app.config["PORT"], host=host) 36 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/config.py: -------------------------------------------------------------------------------- 1 | """ A config module contains static configuration """ 2 | import configparser 3 | import datetime 4 | import logging 5 | import os 6 | import secrets 7 | from pathlib import Path 8 | 9 | from cryptoadvance.spectrum.util_specter import _get_bool_env_var 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class BaseConfig(object): 15 | """Base configuration.""" 16 | 17 | SECRET_KEY = "development key" 18 | USERNAME = "admin" 19 | HOST = "127.0.0.1" 20 | PORT = 8081 21 | SPECTRUM_DATADIR = "data" # used for sqlite but also for txs-cache 22 | 23 | 24 | # Level 1: How does persistence work? 25 | # Convention: BlaConfig 26 | 27 | 28 | class LiteConfig(BaseConfig): 29 | DATABASE = os.path.abspath( 30 | os.path.join(BaseConfig.SPECTRUM_DATADIR, "wallets.sqlite") 31 | ) 32 | SQLALCHEMY_DATABASE_URI = "sqlite:///" + DATABASE 33 | SQLALCHEMY_TRACK_MODIFICATIONS = False 34 | 35 | 36 | class PostgresConfig(BaseConfig): 37 | """Development configuration with Postgres.""" 38 | 39 | DEBUG = True 40 | DB_USERNAME = os.environ.get("DB_USER", default="spectrum") 41 | DB_PASSWORD = os.environ.get("DB_PASSWORD") 42 | DB_HOST = os.environ.get( 43 | "DB_HOST", default="127.0.0.1" 44 | ) # will be overridden in docker-compose, but good for dev 45 | DB_PORT = os.environ.get("DB_PORT", default="5432") 46 | DB_DATABASE = os.environ.get("DB_DATABASE", default="spectrum") 47 | SQL_ALCHEMY_TRACK_MODIFICATIONS = False 48 | SQLALCHEMY_DATABASE_URI = f"postgresql+psycopg2://{DB_HOST}:{DB_PORT}/{DB_DATABASE}?user={DB_USERNAME}&password={DB_PASSWORD}" # &ssl=true 49 | 50 | 51 | # Level 2: Where do we get an electrum from ? 52 | # Convention: Prefix a level 1 config with the electrum solution 53 | class NigiriLocalElectrumLiteConfig(LiteConfig): 54 | ELECTRUM_HOST = "127.0.0.1" 55 | ELECTRUM_PORT = 50000 56 | ELECTRUM_USES_SSL = _get_bool_env_var( 57 | "ELECTRUM_USES_SSL", default="false" 58 | ) # Nigiri doesn't use SSL 59 | 60 | 61 | class EmzyElectrumLiteConfig(LiteConfig): 62 | ELECTRUM_HOST = os.environ.get("ELECTRUM_HOST", default="electrum.emzy.de") 63 | ELECTRUM_PORT = int(os.environ.get("ELECTRUM_PORT", default="50002")) 64 | ELECTRUM_USES_SSL = _get_bool_env_var("ELECTRUM_USES_SSL", default="true") 65 | 66 | 67 | class EmzyElectrumPostgresConfig(PostgresConfig): 68 | ELECTRUM_HOST = os.environ.get("ELECTRUM_HOST", default="electrum.emzy.de") 69 | ELECTRUM_PORT = int(os.environ.get("ELECTRUM_PORT", default="50002")) 70 | ELECTRUM_USES_SSL = _get_bool_env_var("ELECTRUM_USES_SSL", default="true") 71 | 72 | 73 | # Level 2: Back to the problem-Space. 74 | # Convention: ProblemConfig where problem is usually one of Test/Production or so 75 | 76 | 77 | class TestConfig(NigiriLocalElectrumLiteConfig): 78 | pass 79 | 80 | 81 | class ProductionConfig(EmzyElectrumPostgresConfig): 82 | """Not sure whether we're production ready, though""" 83 | 84 | SECRET_KEY = os.getenv("SECRET_KEY", secrets.token_urlsafe(16)) 85 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/db.py: -------------------------------------------------------------------------------- 1 | """ 2 | [![](https://mermaid.ink/img/pako:eNp9VMtuwjAQ_BXLpz7gB3Ks4NBTD1CVA1K02Eti4diRHxQU5d_rPAiJA80pnhl7Z8ebVJRpjjShTIK1KwGZgWKvSHh-QEp05G25JCu0zIjSadNR93VLb9rFbNcT-Hu7--rAThCBI-V2N9M1UAe2hnt11UGEvG-cESojCgocsIPWkpRGnMFhesKrTVHBQSKPd1mcY2Wo8qsNTy3Ivpd6bOAeRTUtCMyJc2RCKIdGgYyL8CjfwAQpUXhxqVAcLw8Kd6FUkw0j7air0TXEaA42nzEu5DSAHyL7DCczrY7CFKN8esKrOQWcG7T25XWwPTbeXPXUtrsIPgHO2rsJkKPIchfXhkJ75aYRS81ONyv1jeqesYnt7h8LfRCH5qxJQs-ttFon4qkzWEpg2EzbQKyVLwgLGWfaXJ-3_bjHgDbKI_bn1XRBCzQFCB4-4ranPXU5BiM0Ca8cj-DD4NK9aqS-5KHwmoswajQ5grS4oOCd3lwVo4kzHm-i_l_Qq-o_ZgxJhg)](https://mermaid-js.github.io/mermaid-live-editor/edit#pako:eNp9VMtuwjAQ_BXLpz7gB3Ks4NBTD1CVA1K02Eti4diRHxQU5d_rPAiJA80pnhl7Z8ebVJRpjjShTIK1KwGZgWKvSHh-QEp05G25JCu0zIjSadNR93VLb9rFbNcT-Hu7--rAThCBI-V2N9M1UAe2hnt11UGEvG-cESojCgocsIPWkpRGnMFhesKrTVHBQSKPd1mcY2Wo8qsNTy3Ivpd6bOAeRTUtCMyJc2RCKIdGgYyL8CjfwAQpUXhxqVAcLw8Kd6FUkw0j7air0TXEaA42nzEu5DSAHyL7DCczrY7CFKN8esKrOQWcG7T25XWwPTbeXPXUtrsIPgHO2rsJkKPIchfXhkJ75aYRS81ONyv1jeqesYnt7h8LfRCH5qxJQs-ttFon4qkzWEpg2EzbQKyVLwgLGWfaXJ-3_bjHgDbKI_bn1XRBCzQFCB4-4ranPXU5BiM0Ca8cj-DD4NK9aqS-5KHwmoswajQ5grS4oOCd3lwVo4kzHm-i_l_Qq-o_ZgxJhg) 3 | 4 | 5 | """ 6 | 7 | from flask_sqlalchemy import SQLAlchemy 8 | from embit.descriptor import Descriptor as EmbitDescriptor 9 | from embit.descriptor.checksum import add_checksum 10 | from embit.script import Script as EmbitScript 11 | from enum import Enum 12 | import time 13 | from .util import sat_to_btc 14 | from sqlalchemy.ext.declarative import declared_attr 15 | from cryptoadvance.spectrum.util_specter import snake_case2camelcase 16 | from sqlalchemy.orm import DeclarativeMeta, declarative_base 17 | from flask_sqlalchemy.model import BindMetaMixin, Model 18 | 19 | 20 | class NoNameMeta(BindMetaMixin, DeclarativeMeta): 21 | pass 22 | 23 | 24 | CustomModel = declarative_base(cls=Model, metaclass=NoNameMeta, name="Model") 25 | 26 | db = SQLAlchemy(model_class=CustomModel) 27 | # db = SQLAlchemy() 28 | 29 | 30 | class SpectrumModel(db.Model): 31 | __abstract__ = True 32 | 33 | @declared_attr 34 | def __tablename__(cls): 35 | return "spectrum_" + snake_case2camelcase(cls.__name__) 36 | 37 | 38 | class TxCategory(Enum): 39 | UNKNOWN = 0 40 | RECEIVE = 1 41 | SEND = 2 42 | CHANGE = 3 # hidden 43 | 44 | def __str__(self): 45 | return self.name.lower() 46 | 47 | 48 | class Wallet(SpectrumModel): 49 | id = db.Column(db.Integer, primary_key=True) 50 | # maybe later User can be added to the wallet, 51 | # so wallet name may not be unique 52 | # value is like specter_hotstorage.../wallet_name_can_be_long 53 | name = db.Column(db.String(200), unique=True) 54 | private_keys_enabled = db.Column(db.Boolean, default=False) 55 | # if non-empty, using hdseed, 32 bytes 56 | # potentially encrypted, so some extra space here 57 | seed = db.Column(db.String(200), nullable=True, default=None) 58 | # salt for password if password is used, None if password is not used 59 | password_salt = db.Column(db.String(100), nullable=True, default=None) 60 | 61 | def get_descriptor(self, internal=False): 62 | for desc in self.descriptors: 63 | if desc.active and desc.internal == internal: 64 | return desc 65 | 66 | def get_keypool(self, internal=False): 67 | # TODO 68 | return 1000 69 | 70 | 71 | class Descriptor(SpectrumModel): 72 | """Descriptors tracked by the wallet""" 73 | 74 | id = db.Column(db.Integer, primary_key=True) 75 | wallet_id = db.Column( 76 | db.Integer, db.ForeignKey(f"{Wallet.__tablename__}.id"), nullable=False 77 | ) 78 | wallet = db.relationship("Wallet", backref=db.backref("descriptors", lazy=True)) 79 | # if we should use this descriptor for new addresses 80 | active = db.Column(db.Boolean, default=True) 81 | # if we should use it for change or receiving addresses 82 | internal = db.Column(db.Boolean, default=False) 83 | # descriptor itself, 15 cosigners, each xpub is 111 chars, plus derivation 84 | # but 3k should be enough 85 | descriptor = db.Column(db.String(3000), nullable=False) 86 | # original descriptor with private keys (if private keys are enabled) 87 | # potentially encrypted somehow 88 | # reqiured for Specter's hot wallet storage 89 | private_descriptor = db.Column(db.String(3000), nullable=True, default=None) 90 | # address index used by the next getnewaddress() call 91 | next_index = db.Column(db.Integer, default=0) 92 | 93 | def getscriptpubkey(self, index=None): 94 | if index is None: 95 | index = self.next_index 96 | d = EmbitDescriptor.from_string(self.private_descriptor or self.descriptor) 97 | return d.derive(index).script_pubkey() 98 | 99 | def derive(self, index): 100 | d = EmbitDescriptor.from_string(self.private_descriptor or self.descriptor) 101 | d = d.derive(index) 102 | for k in d.keys: 103 | k.key = k.key.get_public_key() 104 | return add_checksum(str(d)) 105 | 106 | def get_descriptor(self, index=None): 107 | """Returns Descriptor class""" 108 | d = EmbitDescriptor.from_string(self.private_descriptor or self.descriptor) 109 | if index is not None: 110 | d = d.derive(index) 111 | return d 112 | 113 | 114 | # We store script pubkeys instead of addresses as database is chain-agnostic 115 | class Script(SpectrumModel): 116 | id = db.Column(db.Integer, primary_key=True) 117 | wallet_id = db.Column( 118 | db.Integer, db.ForeignKey(f"{Wallet.__tablename__}.id"), nullable=False 119 | ) 120 | wallet = db.relationship("Wallet", backref=db.backref("scripts", lazy=True)) 121 | # this must be nullable as we may need to label external scripts 122 | descriptor_id = db.Column( 123 | db.Integer, 124 | db.ForeignKey(f"{Descriptor.__tablename__}.id"), 125 | nullable=True, 126 | default=None, 127 | ) 128 | descriptor = db.relationship("Descriptor", backref=db.backref("scripts", lazy=True)) 129 | # derivation index if it's our address 130 | index = db.Column(db.Integer, nullable=True, default=None) 131 | 132 | script = db.Column(db.String(100), nullable=False) 133 | label = db.Column(db.String(500), nullable=True, default=None) 134 | # scripthash for electrum subscribtions, store for lookups 135 | scripthash = db.Column(db.String(64), nullable=True, default=None) 136 | # electrum stuff - hash of all txs on the address 137 | state = db.Column(db.String(64), nullable=True, default=None) 138 | # confirmed balance in sat 139 | confirmed = db.Column(db.BigInteger, default=0) 140 | # unconfirmed balance in sat 141 | unconfirmed = db.Column(db.BigInteger, default=0) 142 | 143 | def address(self, network): 144 | return self.script_pubkey.address(network) 145 | 146 | @property 147 | def script_pubkey(self): 148 | return EmbitScript(bytes.fromhex(self.script)) 149 | 150 | 151 | class UTXO(SpectrumModel): 152 | id = db.Column(db.Integer, primary_key=True) 153 | txid = db.Column(db.String(64)) 154 | vout = db.Column(db.Integer) 155 | height = db.Column(db.Integer, default=None) 156 | # amount in sat 157 | amount = db.Column(db.BigInteger) 158 | # frozen or not 159 | locked = db.Column(db.Boolean, default=False) 160 | # refs 161 | script_id = db.Column(db.Integer, db.ForeignKey(f"{Script.__tablename__}.id")) 162 | script = db.relationship("Script", backref=db.backref("utxos", lazy=True)) 163 | wallet_id = db.Column( 164 | db.Integer, db.ForeignKey(f"{Wallet.__tablename__}.id"), nullable=False 165 | ) 166 | wallet = db.relationship("Wallet", backref=db.backref("utxos", lazy=True)) 167 | 168 | 169 | class Tx(SpectrumModel): 170 | id = db.Column(db.Integer, primary_key=True) 171 | txid = db.Column(db.String(64)) 172 | blockhash = db.Column(db.String(64), default=None) 173 | height = db.Column(db.Integer, default=None) 174 | blocktime = db.Column(db.BigInteger, default=None) 175 | replaceable = db.Column(db.Boolean, default=False) 176 | category = db.Column(db.Enum(TxCategory), default=TxCategory.UNKNOWN) 177 | vout = db.Column(db.Integer, default=None) 178 | amount = db.Column(db.BigInteger, default=0) 179 | fee = db.Column(db.BigInteger, default=None) # only for send 180 | # refs 181 | script_id = db.Column(db.Integer, db.ForeignKey(f"{Script.__tablename__}.id")) 182 | script = db.relationship("Script", backref=db.backref("txs", lazy=True)) 183 | wallet_id = db.Column( 184 | db.Integer, db.ForeignKey(f"{Wallet.__tablename__}.id"), nullable=False 185 | ) 186 | wallet = db.relationship("Wallet", backref=db.backref("txs", lazy=True)) 187 | 188 | def to_dict(self, blockheight, network): 189 | confirmed = bool(self.height) 190 | confs = (blockheight - self.height + 1) if self.height else 0 191 | t = self.blocktime if confirmed else int(time.time()) 192 | obj = { 193 | "address": self.script.address(network), 194 | "category": str(self.category), 195 | "amount": sat_to_btc(self.amount), 196 | "label": "", 197 | "vout": self.vout, 198 | "confirmations": confs, 199 | "txid": self.txid, 200 | "time": t, 201 | "timereceived": t, 202 | "walletconflicts": [], 203 | "bip125-replaceable": "yes" if self.replaceable else "no", 204 | "script_id": self.script_id, 205 | } 206 | if self.category == TxCategory.SEND: 207 | obj.update({"fee": -sat_to_btc(self.fee or 0)}) 208 | if confirmed: 209 | obj.update( 210 | { 211 | "blockhash": self.blockhash, 212 | "blockheight": self.height, 213 | "blocktime": t, 214 | } 215 | ) 216 | else: 217 | obj.update({"trusted": False}) 218 | return obj 219 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/elsock.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import random 4 | import socket 5 | import socks 6 | import ssl 7 | import sys 8 | import threading 9 | import time 10 | from queue import Queue 11 | 12 | from .spectrum_error import RPCError 13 | from .util import FlaskThread, SpectrumInternalException, handle_exception 14 | 15 | # TODO: normal handling of ctrl+C interrupt 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | class ElSockTimeoutException(Exception): 21 | """Called in different contexts where a timeout is relevant""" 22 | 23 | pass 24 | 25 | 26 | class ElectrumSocket: 27 | """An Electrum protocol implementation based on threads 28 | Supports ssl, tor and uses callbacks for notification 29 | and a callback if the socket has been recreated. 30 | 31 | ### Implementation description 32 | 33 | This uses a _monitor_thread which is creating/controlling 4 other more technical threads: 34 | * the write and recv threads are reading from self._requests and writing to self._results 35 | ( and writing to self._notifications for new blocks and new states of scripts ) 36 | * the notify thread is reading from self._notifications and callback for those 37 | * the ping-loop is uses the call-method to ping the electrum server. If it's failing for tries_threshold 38 | it'll exit 39 | 40 | All of those threads are background-threads. 41 | 42 | If any of the above threads exits (probably the ping-thread as some canary in the coalmine) 43 | the monitor-loop will detect that and recreate everything (simply spoken). 44 | 45 | 46 | """ 47 | 48 | # fmt: off 49 | call_timeout = 10 # the most relevant timeout as it affects business-methods (using the call-method) 50 | sleep_ping_loop = 10 # every x seconds we test the ability to call (ping) 51 | tries_threshold = 3 # how many tries the ping might fail before it's giving up (monitor-loop will reestablish connection then) 52 | wait_on_exit_timeout= 120 # needs to be bigger than the socket_timeout 53 | 54 | sleep_recv_loop = 0.01 # seconds , the shorter the better performance but 0.001 might be much worse 55 | sleep_write_loop = 0.01 # seconds , the shorter the better performance but 0.001 might be much worse 56 | socket_timeout = 10 # seconds for self._socket.recv(2048) (won't show up in the logs) 57 | # fmt: on 58 | 59 | def __init__( 60 | self, 61 | host="127.0.0.1", 62 | port=50001, 63 | use_ssl=False, 64 | callback=None, 65 | socket_recreation_callback=None, 66 | socket_timeout=None, 67 | call_timeout=None, 68 | proxy_url=None, 69 | ): 70 | """ 71 | Initializes a new instance of the ElectrumSocket class. 72 | 73 | Args: 74 | - host (str): The hostname of the Electrum server. Default is "127.0.0.1". 75 | - port (int): The port number of the Electrum server. Default is 50001. 76 | - use_ssl (bool): Specifies whether to use SSL encryption for the socket connection. Default is False. 77 | - callback (function): The callback function to call when receiving notifications from the Electrum server. Default is None. 78 | - timeout (float): The timeout for the socket connection. Default is 10 seconds. 79 | 80 | Returns: 81 | None 82 | """ 83 | logger.info( 84 | f"Initializing ElectrumSocket with {host}:{port} (ssl: {ssl}) (proxy: {proxy_url})" 85 | ) 86 | self._host = host 87 | self._port = port 88 | self._use_ssl = use_ssl 89 | self.proxy_url = proxy_url 90 | assert type(self._host) == str 91 | assert type(self._port) == int 92 | assert type(self._use_ssl) == bool 93 | self.running = True 94 | self._callback = callback 95 | self._socket_timeout = ( 96 | socket_timeout if socket_timeout else self.__class__.socket_timeout 97 | ) 98 | self._call_timeout = ( 99 | call_timeout if call_timeout else self.__class__.call_timeout 100 | ) 101 | self.wait_on_exit_timeout = ( 102 | self.__class__.call_timeout 103 | if self._socket_timeout * 3 < self.__class__.call_timeout 104 | else self._socket_timeout * 5 105 | ) 106 | 107 | self._results = {} # store results of the calls here 108 | self._requests = [] 109 | self._notifications = [] 110 | self._wanted_status = "ok" # "ok" or "down" 111 | # The monitor-thread will create the other threads 112 | self._monitor_thread = create_and_start_bg_thread(self._monitor_loop) 113 | while not (self.status == "ok" or self.status.startswith("broken_")): 114 | time.sleep(0.2) 115 | # Preventing to execute that callback for the first time 116 | # as the spectrum can't use the connection (we're in the constructor), 117 | # therefore setting it at the very end: 118 | self._on_recreation_callback = socket_recreation_callback 119 | 120 | def shutdown(self): 121 | self._wanted_status = "down" 122 | 123 | def startup(self): 124 | self._wanted_status = "ok" 125 | 126 | @property 127 | def status(self) -> str: 128 | """Check the _monitor_loop for valid stati""" 129 | if hasattr(self, "_status"): 130 | return self._status 131 | return "unknown" 132 | 133 | @status.setter 134 | def status(self, value: str): 135 | """Check the _monitor_loop for valid stati""" 136 | logger.info(f"ElectrumSocket Status changed from {self.status} to {value}") 137 | self._status = value 138 | 139 | @property 140 | def uses_tor(self) -> bool: 141 | """Whether the underlying socket is using tor""" 142 | if hasattr(self, "_uses_tor"): 143 | return self._uses_tor 144 | return False 145 | 146 | @uses_tor.setter 147 | def uses_tor(self, value: bool): 148 | self._uses_tor = value 149 | 150 | def _establish_socket(self) -> bool: 151 | """Establishes a new socket connection to the specified host and port. 152 | 153 | If a socket connection already exists, it will be closed before creating a new one. 154 | If SSL encryption is enabled, the socket will be wrapped with SSL. 155 | The socket_timeout is set to 5 seconds before connecting. 156 | Once connected, the socket timeout is set to self._socket_timeout, which means it 157 | is a non blocking socket. 158 | 159 | Returns: 160 | boolean if successfull 161 | """ 162 | 163 | # Just to be sure, maybe close it upfront 164 | try: 165 | # close if open 166 | if hasattr(self, "_socket"): 167 | if not self.is_socket_closed(): 168 | self._socket.close() 169 | 170 | # maybe use tor 171 | if self.proxy_url: 172 | try: 173 | ip, port = parse_proxy_url(self.proxy_url) 174 | socks.set_default_proxy(socks.PROXY_TYPE_SOCKS5, ip, port, True) 175 | self.uses_tor = True 176 | except SpectrumInternalException as e: 177 | logger.error(f"Cannot use proxy_url : {e}") 178 | self.uses_tor = False 179 | self._socket.settimeout(5) 180 | else: 181 | self.uses_tor = False 182 | 183 | self._socket = ( 184 | socks.socksocket(socket.AF_INET, socket.SOCK_STREAM) 185 | if self.uses_tor 186 | else socket.socket(socket.AF_INET, socket.SOCK_STREAM) 187 | ) 188 | self._socket.settimeout(20 if self.uses_tor else 5) 189 | self._call_timeout = ( 190 | self._call_timeout * 4 if self.uses_tor else self._call_timeout 191 | ) 192 | 193 | logger.debug(f"socket created : {self._socket}") 194 | 195 | # maybe use ssl 196 | if self._use_ssl: 197 | self._socket = ssl.wrap_socket(self._socket) 198 | logger.debug(f"socket wrapped : {self._socket}") 199 | 200 | try: 201 | logger.info(f"Connecting to {self._host}:{self._port}") 202 | self._socket.connect((self._host, int(self._port))) 203 | except socket.gaierror as e: 204 | logger.error(f"Internet connection might not be up: {e}") 205 | return False 206 | except socks.GeneralProxyError as e: 207 | logger.error(f"Tor issue: {e}") 208 | return False 209 | logger.debug(f"socket connected: {self._socket}") 210 | self._socket.settimeout( 211 | self._socket_timeout 212 | ) # That means it's a NON-BLOCKING socket 213 | logger.info( 214 | f"Successfully created Socket {self._socket} (ssl={self._use_ssl}/tor={self.uses_tor})" 215 | ) 216 | return True 217 | except Exception as e: 218 | logger.exception(e) 219 | return False 220 | 221 | def _create_threads(self) -> bool: 222 | """ 223 | Creates and starts the threads for: 224 | * receiving notifications 225 | * writing requests and reading results 226 | * sending pings 227 | 228 | Returns: 229 | boolean if successfull 230 | """ 231 | try: 232 | self._recv_thread = create_and_start_bg_thread(self.recv_loop) 233 | self._write_thread = create_and_start_bg_thread(self._write_loop) 234 | self._ping_thread = create_and_start_bg_thread(self._ping_loop) 235 | self._notify_thread = create_and_start_bg_thread(self._notify_loop) 236 | return True 237 | except Exception as e: 238 | logger.exception() 239 | return False 240 | 241 | def is_socket_closed(self) -> bool: 242 | """Checks whether the socket connection is closed or not. 243 | 244 | Returns: 245 | True if the socket is closed, False otherwise. 246 | """ 247 | try: 248 | fd = self._socket.fileno() 249 | except ValueError: 250 | return True 251 | else: 252 | return False 253 | 254 | def _monitor_loop(self): 255 | """ 256 | An endless loop function for monitoring the socket connection. 257 | If the ping thread is not alive, the socket connection and threads will be recreated via walking through 258 | this state-machine: 259 | 260 | [![](https://mermaid.ink/img/pako:eNqdkj9vAyEMxb_KyWOVWzre0KkZM3VrqU4OOAmCwxFn-kdRvnu5I0mVKmEok-H9eDbiHUCzIehgFBR6triNOLQfjyo0eb09vDdt-9ToSCg2bPuRtSMpIrtZW0d2FHpnvZ8I2WXWjAW5rd23rPCGP0OBpuq-xZ_Da_BquFvkaYDacP9oUH13haUv0kmoj1Rkzt3R-zVqV25VgNmAHSxgoDigNfmPD9MtBbKjgRR0uTS0weRFgQrHjKa9ySlYGiscodugH2kBmIRfvoOGTmKiM3SKyoXaY3hl_t3TbLIq4ZozdvwB373ZuA?type=png)](https://mermaid-js.github.io/mermaid-live-editor/edit#pako:eNqdkj9vAyEMxb_KyWOVWzre0KkZM3VrqU4OOAmCwxFn-kdRvnu5I0mVKmEok-H9eDbiHUCzIehgFBR6triNOLQfjyo0eb09vDdt-9ToSCg2bPuRtSMpIrtZW0d2FHpnvZ8I2WXWjAW5rd23rPCGP0OBpuq-xZ_Da_BquFvkaYDacP9oUH13haUv0kmoj1Rkzt3R-zVqV25VgNmAHSxgoDigNfmPD9MtBbKjgRR0uTS0weRFgQrHjKa9ySlYGiscodugH2kBmIRfvoOGTmKiM3SKyoXaY3hl_t3TbLIq4ZozdvwB373ZuA) 261 | 262 | The states are stored in the `ElectrumSocket.state` property. The Constructor of the `ElectrumSocket` is hardly doing more than just setting up the `_monitor_thread` which is an endless loop going through these states: 263 | * `creating_sockets` will create the sockets and pass to `creating_threads` or to `broken_creating_sockets` if that fails 264 | * `broken_creating_sockets` will try to create the socket and sleep for some time if that fails (and endlessly try to do that) 265 | * `creating_threads` will create the write/recv/ping/notify threads and start them 266 | * `execute_recreation_callback` will call that callback after setting the status to `ok` 267 | * the `ok` state will now simply check the other thready and if one of them is no longer alive (probably the ping-thread as he will exit if ping fails for 4 times) it will transition to `broken_killing_threads` 268 | * `broken_killing_threads` will set `self.running` to false and wait for the threads to terminate. Especially the `recv` thread might not terminate until he get internet connection (again). This might take forever. If all threads are terminated, it will transition to `creating_socket` 269 | 270 | """ 271 | 272 | self.status = "creating_socket" 273 | while True: # Endless loop 274 | try: 275 | time.sleep(1) 276 | if ( 277 | self.status == "creating_socket" 278 | or self.status == "broken_creating_socket" 279 | ): 280 | logger.info("(re-)creating socket ...") 281 | if not self._establish_socket(): 282 | if self.status == "broken_creating_socket": 283 | time.sleep(10) 284 | else: 285 | # don't sleep in order to speed up the boot time 286 | self.status = "broken_creating_socket" 287 | continue 288 | self.status = "creating_threads" 289 | 290 | if self.status == "creating_threads": 291 | if self.is_socket_closed(): 292 | logger.error("Detected broken socket while creating_threads") 293 | self.status = "creating_socket" 294 | continue 295 | logger.info("(re-)creating threads ...") 296 | if not self._create_threads(): 297 | time.sleep(10) 298 | continue 299 | self.status = "execute_recreation_callback" 300 | 301 | if self.status == "execute_recreation_callback": 302 | # set the new status here before we call the callback 303 | # otherwise the receiving code might be confused why called 304 | self.status = "ok" 305 | if ( 306 | hasattr(self, "_on_recreation_callback") 307 | and self._on_recreation_callback is not None 308 | ): 309 | logger.debug( 310 | f"calling self._on_recreation_callback {self._on_recreation_callback.__name__}" 311 | ) 312 | try: 313 | self._on_recreation_callback() 314 | except Exception as e: 315 | logger.error( 316 | "_on_recreation_callback threw an exception {e}" 317 | ) 318 | logger.exception(e) 319 | else: 320 | logger.debug("No reasonable _on_recreation_callback found") 321 | 322 | if self.status == "ok": 323 | while self.thread_status[ 324 | "all_alive" 325 | ]: # most relevant is the ping_status 326 | if self._wanted_status != "ok": 327 | self.status = "broken_killing_threads" 328 | break 329 | time.sleep(1) 330 | self.status = "broken_killing_threads" 331 | if self._wanted_status != "down": 332 | logger.info( 333 | f"Issue with Electrum deteted, threads died: {','.join(self.thread_status['not_alive'])}" 334 | ) 335 | else: 336 | logger.info(f"Shutting down ElectrumSocket ...") 337 | 338 | if self.status == "broken_killing_threads": 339 | logger.info("trying to stop all threads ...") 340 | self.running = False 341 | # self._socket.setblocking(False) 342 | counter = 0 343 | log_frequency = 2 344 | start = time.time() 345 | while self.thread_status["any_alive"]: 346 | # Should we have a timeout? What to do then? 347 | wait_time = time.time() - start 348 | if wait_time > self.wait_on_exit_timeout: 349 | logger.error( 350 | f"Timeout waiting for threads: {' '.join(self.thread_status['alive'])}" 351 | ) 352 | break 353 | if counter % log_frequency == 0: 354 | logger.info( 355 | f"Waiting for those threads to exit: {' '.join(self.thread_status['alive'])} ({counter}/{log_frequency}) ({wait_time}) ({self.socket_timeout})" 356 | ) 357 | if counter > 10: 358 | log_frequency += 1 359 | time.sleep(5) 360 | counter += 1 361 | if self._wanted_status == "down": 362 | self.status = "down" 363 | else: 364 | self.status = "creating_socket" 365 | self.running = True 366 | 367 | if self.status == "down": 368 | logger.info( 369 | "ElSock shutdown. Waiting for further wanted_status requests" 370 | ) 371 | while self._wanted_status == "down": 372 | time.sleep(1) 373 | self.status = "creating_socket" 374 | 375 | except Exception as e: 376 | logger.error( 377 | "Monitoring Loop of Electrum-Socket got an Exception. This is critical if it's happening often!" 378 | ) 379 | logger.exception(e) 380 | time.sleep( 381 | 3 382 | ) # to prevent high cpu load if this exception will occur endlessly 383 | 384 | @property 385 | def thread_status(self) -> dict: 386 | """Returning a handy dict containing all informations about the current 387 | thread_status. 388 | e.g.: 389 | { 390 | 'alive': ['recv', 'write', 'ping', 'notify'], 391 | 'not_alive': [], 392 | 'all_alive': True, 'any_alive': True, 393 | 'not_all_alive': False, 'not_any_alive': False, 394 | 'notify': True, 'ping': True, 'recv': True, 'write': True} 395 | } 396 | """ 397 | status_dict = { 398 | "recv": self._recv_thread.is_alive() 399 | if hasattr(self, "_recv_thread") and self._recv_thread 400 | else False, 401 | "write": self._write_thread.is_alive() 402 | if hasattr(self, "_write_thread") and self._write_thread 403 | else False, 404 | "ping": self._ping_thread.is_alive() 405 | if hasattr(self, "_ping_thread") and self._ping_thread 406 | else False, 407 | "notify": self._notify_thread.is_alive() 408 | if hasattr(self, "_notify_thread") and self._notify_thread 409 | else False, 410 | } 411 | any_alive = any(status_dict.values()) 412 | all_alive = all(status_dict.values()) 413 | alive_list = [key for key, value in status_dict.items() if value] 414 | not_alive_list = [key for key, value in status_dict.items() if not value] 415 | status_dict["alive"] = alive_list 416 | status_dict["not_alive"] = not_alive_list 417 | status_dict["any_alive"] = any_alive 418 | status_dict["not_any_alive"] = not any_alive 419 | status_dict["all_alive"] = all_alive 420 | status_dict["not_all_alive"] = not all_alive 421 | return status_dict 422 | 423 | def _write_loop(self): 424 | """ 425 | The loop function for writing requests to the Electrum server. 426 | """ 427 | sleep = self.sleep_write_loop 428 | while self.running: 429 | while self._requests: 430 | try: 431 | req = self._requests.pop() 432 | self._socket.sendall(json.dumps(req).encode() + b"\n") 433 | sleep = self.sleep_write_loop 434 | except Exception as e: 435 | logger.error(f"Error in write: {e.__class__}") 436 | # handle_exception(e) 437 | sleep = 3 438 | time.sleep(sleep) 439 | logger.info("Ended write-loop") 440 | 441 | def recv_loop(self): 442 | """ 443 | The loop function for receiving data from the Electrum server. 444 | 445 | If the socket breaks, this thread is probably stuck as the thread 446 | is a blocking thread. So in that case the monitor-loop will simply 447 | recreate the corresponding thread. 448 | 449 | Returns: 450 | None 451 | """ 452 | sleep = self.sleep_recv_loop # This probably heavily impacts the sync-time 453 | read_counter = 0 454 | timeout_counter = 0 455 | while self.running: 456 | try: 457 | data = self._socket.recv(2048) 458 | read_counter += 1 459 | except TimeoutError: 460 | pass 461 | # This might happen quite often as we're using a non-blocking socket here. 462 | # And if no data is there to read from and the timeout is reached, we'll 463 | # get this error. However it's not a real error-condition (imho) 464 | 465 | # As i'm not 100% sure about that stuff, i'll keep that code around to uncomment any time: 466 | # timeout_counter += 1 467 | # logger.error( 468 | # f"Timeout in recv-loop, happens in {timeout_counter}/{read_counter} * 100 = {timeout_counter/read_counter * 100 }% of all reads. " 469 | # ) 470 | # logger.error(f"consider to increase socket_timeout which is currently {self._socket_timeout}") 471 | while not data.endswith(b"\n"): # b"\n" is the end of the message 472 | if not self.running: 473 | break 474 | data += self._socket.recv(2048) 475 | # data looks like this: 476 | # b'{"jsonrpc": "2.0", "result": {"hex": "...", "height": 761086}, "id": 2210736436}\n' 477 | arr = [json.loads(d.decode()) for d in data.strip().split(b"\n") if d] 478 | # arr looks like this 479 | # [{'jsonrpc': '2.0', 'result': {'hex': '...', 'height': 761086}, 'id': 2210736436}] 480 | for response in arr: 481 | if "method" in response: # notification 482 | self._notifications.append(response) 483 | if "id" in response: # request 484 | self._results[response["id"]] = response 485 | time.sleep(sleep) 486 | logger.info("Ended recv-loop") 487 | 488 | def _ping_loop(self): 489 | """ 490 | The loop function for sending ping requests to the Electrum server. 491 | 492 | If the ping fails for tries_threshold, it'll return which will end the 493 | thread and cause the monitor thread to recreate all other threads. 494 | 495 | Returns: 496 | None 497 | """ 498 | tries = 0 499 | ts = self.ping() 500 | while self.running: 501 | time.sleep(self.sleep_ping_loop) 502 | try: 503 | self.ping() 504 | tries = 0 505 | except ElSockTimeoutException as e: 506 | tries = tries + 1 507 | logger.error( 508 | f"Timeout in ping-loop ({tries}th time, next try in {self.sleep_ping_loop} seconds if threshold not met" 509 | ) 510 | if tries > self.tries_threshold: 511 | logger.error( 512 | f"More than {self.tries_threshold} Ping failures for {self.tries_threshold * self.sleep_ping_loop} seconds, Giving up!" 513 | ) 514 | return # will end the thread 515 | 516 | def _notify_loop(self): 517 | while self.running: 518 | while self._notifications: 519 | data = self._notifications.pop() 520 | self.notify(data) 521 | time.sleep(0.02) 522 | logger.info("Ended notify-loop") 523 | 524 | def notify(self, data): 525 | if self._callback: 526 | try: 527 | self._callback(data) 528 | except Exception as e: 529 | logger.error(f"Error in callback: {e}") 530 | handle_exception(e) 531 | else: 532 | logger.debug("Notification:", data) 533 | 534 | def call(self, method, params=[]) -> dict: 535 | """ 536 | Calls a method on the Electrum server and returns the response. 537 | 538 | Args: 539 | - method (str): The name of the method to call on the Electrum server. 540 | - *params: The parameters to pass to the method. 541 | 542 | 543 | Returns: 544 | dict: The response from the Electrum server. 545 | 546 | might raise a ElSockTimeoutException if self._call_timeout is over 547 | 548 | """ 549 | uid = random.randint(0, 1 << 32) 550 | obj = {"jsonrpc": "2.0", "method": method, "params": params, "id": uid} 551 | self._requests.append(obj) 552 | start = time.time() 553 | 554 | while uid not in self._results: # wait for response 555 | # time.sleep(1) 556 | time.sleep(0.01) 557 | if time.time() - start > self._call_timeout: 558 | raise ElSockTimeoutException( 559 | f"Timeout in call ({self._call_timeout} seconds) waiting for {method} on {self._socket}" 560 | ) 561 | res = self._results.pop(uid) 562 | if isinstance(res, dict) and "error" in res: 563 | error = res.get("error", {}) 564 | error_code = error.get("code") 565 | error_message = error.get("message") 566 | if error_code is not None and error_message is not None: 567 | raise RPCError(error_message, error_code) 568 | if "result" in res: 569 | return res["result"] 570 | raise SpectrumInternalException(res) 571 | 572 | def ping(self): 573 | start = time.time() 574 | self.call("server.ping") # result None 575 | return time.time() - start 576 | 577 | def __del__(self): 578 | logger.info("Closing socket ...") 579 | if hasattr(self, "_socket"): 580 | self._socket.close() 581 | 582 | 583 | def create_and_start_bg_thread(func) -> FlaskThread: 584 | """Creates and starts a new background thread that executes the given function. 585 | 586 | The thread is started as a daemon thread, which means it will automatically terminate 587 | when the main thread exits. The function is executed in the new thread. 588 | 589 | Args: 590 | func: The function to execute in the background thread. 591 | 592 | Returns: 593 | None 594 | """ 595 | thread = FlaskThread(target=func) 596 | thread.daemon = True 597 | thread.start() 598 | logger.info(f"Started bg thread for {func.__name__}") 599 | return thread 600 | 601 | 602 | def parse_proxy_url(proxy_url: str): 603 | """A proxy_url like socks5h://localhost:9050 will get parsed and returned into something like: 604 | [ "localhost", "9050"] 605 | the url HAS to start with socks5h 606 | """ 607 | if not proxy_url.startswith("socks5h://"): 608 | raise SpectrumInternalException(f"Wrong schema for proxy_url: {proxy_url}") 609 | proxy_url = proxy_url.replace("socks5h://", "") 610 | arr = proxy_url.split(":") 611 | if len(arr) != 2: 612 | raise SpectrumInternalException( 613 | f"Wrong uri has more than one ':' : {proxy_url}" 614 | ) 615 | return arr[0], int(arr[1]) 616 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/server.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | from flask import Flask, g, request 6 | 7 | from .db import Script, db 8 | from .spectrum import Spectrum 9 | from .server_endpoints.core_api import core_api 10 | from .server_endpoints.healthz import healthz 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | def create_app(config="cryptoadvance.spectrum.config.EmzyElectrumLiteConfig"): 16 | if os.environ.get("CONFIG"): 17 | config = os.environ.get("CONFIG") 18 | app = Flask(__name__) 19 | app.config.from_object(config) 20 | logger.info(f"config: {config}") 21 | return app 22 | 23 | 24 | def init_app(app, datadir=None, standalone=True): 25 | # create folder if doesn't exist 26 | if datadir is None: 27 | datadir = app.config["SPECTRUM_DATADIR"] 28 | if not os.path.exists(datadir): 29 | os.makedirs(datadir) 30 | db.init_app(app) 31 | 32 | with app.app_context(): 33 | db.create_all() 34 | app.logger.info("-------------------------CONFIGURATION-OVERVIEW------------") 35 | app.logger.info("Config from " + os.environ.get("CONFIG", "empty")) 36 | for key, value in sorted(app.config.items()): 37 | if key in ["DB_PASSWORD", "SECRET_KEY", "SQLALCHEMY_DATABASE_URI"]: 38 | app.logger.info("{} = {}".format(key, "xxxxxxxxxxxx")) 39 | else: 40 | app.logger.info("{} = {}".format(key, value)) 41 | app.logger.info("-----------------------------------------------------------") 42 | from cryptoadvance.spectrum.server_endpoints.core_api import core_api 43 | from .server_endpoints.healthz import healthz 44 | 45 | app.register_blueprint(core_api) 46 | app.register_blueprint(healthz) 47 | 48 | # if not getattr(g, "electrum", None): 49 | logger.info("Creating Spectrum Object ...") 50 | app.spectrum = Spectrum( 51 | app.config["ELECTRUM_HOST"], 52 | app.config["ELECTRUM_PORT"], 53 | ssl=app.config["ELECTRUM_USES_SSL"], 54 | datadir=app.config["SPECTRUM_DATADIR"], 55 | app=app, 56 | ) 57 | app.spectrum.sync() 58 | 59 | 60 | def main(): 61 | # TODO: debug=True spawns two Electrum servers and this causes duplications in transactions 62 | config = { 63 | "datadir": "data", 64 | "database": os.path.abspath(os.path.join("data", "wallets.sqlite")), 65 | "host": "127.0.0.1", 66 | "port": 8081, 67 | "debug": False, 68 | "electrum": { 69 | "host": "127.0.0.1", 70 | "port": 60401, # 50000, 71 | # "host": "35.201.74.156", 72 | # "port": 143, 73 | }, 74 | } 75 | app = create_app(config) 76 | app.run(debug=config["debug"], port=config["port"], host=config["host"]) 77 | 78 | 79 | if __name__ == "__main__": 80 | main() 81 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/server_endpoints/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cryptoadvance/spectrum/81cf99ff5380c2e6b6fadd8f68b51036b0eb5144/src/cryptoadvance/spectrum/server_endpoints/__init__.py -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/server_endpoints/core_api.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from decimal import Decimal 3 | import json 4 | 5 | from flask import Blueprint, request 6 | 7 | from flask import current_app as app 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | core_api = Blueprint("core_api", __name__) 12 | 13 | 14 | @core_api.route("/", methods=["GET", "POST"]) 15 | def index(): 16 | if request.method == "GET": 17 | return "JSONRPC server handles only POST requests" 18 | data = request.get_json() 19 | if isinstance(data, dict): 20 | return json.dumps(app.spectrum.jsonrpc(data)) 21 | if isinstance(data, list): 22 | return json.dumps([app.spectrum.jsonrpc(item) for item in data]) 23 | 24 | 25 | @core_api.route("/wallet/", methods=["GET", "POST"]) 26 | @core_api.route("/wallet/", methods=["GET", "POST"]) 27 | def walletrpc(wallet_name=""): 28 | if request.method == "GET": 29 | return "JSONRPC server handles only POST requests" 30 | data = request.get_json() 31 | if isinstance(data, dict): 32 | return json.dumps(app.spectrum.jsonrpc(data, wallet_name=wallet_name)) 33 | if isinstance(data, list): 34 | return json.dumps( 35 | [app.spectrum.jsonrpc(item, wallet_name=wallet_name) for item in data] 36 | ) 37 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/server_endpoints/healthz.py: -------------------------------------------------------------------------------- 1 | import json, logging 2 | 3 | from flask import Blueprint, request 4 | 5 | from flask import current_app as app 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | healthz = Blueprint("healthz", __name__) 10 | 11 | 12 | @healthz.route("/healthz/liveness") 13 | def liveness(): 14 | return {"message": "i am alive"} 15 | 16 | 17 | @healthz.route("/healthz/readyness") 18 | def readyness(): 19 | try: 20 | # Probably improvable: 21 | logger.info("ready?") 22 | assert app.spectrum.is_connected() 23 | except Exception as e: 24 | logger.info("no!") 25 | return {"message": "i am not ready"}, 500 26 | return {"message": "i am ready"} 27 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/spectrum.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import math 4 | import os 5 | from pydoc import describe 6 | import random 7 | from socket import socket 8 | import threading 9 | import time 10 | import traceback 11 | from functools import wraps 12 | from datetime import datetime 13 | 14 | from embit import bip32 15 | from embit.descriptor import Descriptor as EmbitDescriptor 16 | from embit.descriptor.checksum import add_checksum 17 | from embit.finalizer import finalize_psbt 18 | from embit.networks import NETWORKS 19 | from embit.psbt import PSBT, DerivationPath 20 | from embit.script import Script as EmbitScript 21 | from embit.script import Witness 22 | from embit.transaction import Transaction as EmbitTransaction 23 | from embit.transaction import TransactionInput, TransactionOutput 24 | from sqlalchemy.sql import func 25 | 26 | from .spectrum_error import RPCError 27 | from .db import UTXO, Descriptor, Script, Tx, TxCategory, Wallet, db 28 | from .elsock import ElectrumSocket, ElSockTimeoutException 29 | from .util import ( 30 | FlaskThread, 31 | SpectrumException, 32 | btc_to_sat, 33 | get_blockhash, 34 | handle_exception, 35 | parse_blockheader, 36 | sat_to_btc, 37 | scripthash, 38 | ) 39 | 40 | 41 | logger = logging.getLogger(__name__) 42 | 43 | # a set of registered rpc calls that do not need a wallet 44 | RPC_METHODS = set() 45 | # wallet-specific rpc calls 46 | WALLETRPC_METHODS = set() 47 | 48 | 49 | def rpc(f): 50 | """A decorator that registers a generic rpc method""" 51 | method = f.__name__ 52 | RPC_METHODS.add(method) 53 | 54 | @wraps(f) 55 | def wrapper(*args, **kwargs): 56 | return f(*args, **kwargs) 57 | 58 | return wrapper 59 | 60 | 61 | def walletrpc(f): 62 | """A decorator that registers a wallet rpc method""" 63 | method = f.__name__ 64 | WALLETRPC_METHODS.add(method) 65 | 66 | @wraps(f) 67 | def wrapper(*args, **kwargs): 68 | return f(*args, **kwargs) 69 | 70 | return wrapper 71 | 72 | 73 | # we detect chain by looking at the hash of the 0th block 74 | ROOT_HASHES = { 75 | "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f": "main", 76 | "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943": "test", 77 | "00000008819873e925422c1ff0f99f7cc9bbb232af63a077a480a3633bee1ef6": "signet", 78 | # anything else is regtest 79 | } 80 | 81 | 82 | class Spectrum: 83 | blocks = 0 84 | chain = "regtest" 85 | roothash = "" # hash of the 0'th block 86 | bestblockhash = "" # hash of the current best block 87 | 88 | def __init__( 89 | self, 90 | host="127.0.0.1", 91 | port=50001, 92 | ssl=True, 93 | datadir="data", 94 | app=None, 95 | proxy_url=None, 96 | ): 97 | self.app = app 98 | self.host = host 99 | self.port = port 100 | self.ssl = ssl 101 | self.proxy_url = proxy_url 102 | assert type(ssl) == bool, f"ssl is of type {type(ssl)}" 103 | self.datadir = datadir 104 | if not os.path.exists(self.txdir): 105 | logger.info(f"Creating txdir {self.txdir} ") 106 | os.makedirs(self.txdir) 107 | 108 | logger.info(f"Creating ElectrumSocket {host}:{port} (ssl={ssl})") 109 | self.sock = ElectrumSocket( 110 | host=host, 111 | port=port, 112 | callback=self.process_notification, 113 | socket_recreation_callback=self._sync, 114 | use_ssl=ssl, 115 | proxy_url=proxy_url, 116 | ) 117 | 118 | # self.sock = ElectrumSocket(host="35.201.74.156", port=143, callback=self.process_notification) 119 | # 143 - Testnet, 110 - Mainnet, 195 - Liquid 120 | self.t0 = time.time() # for uptime 121 | if self.sock and self.sock.status == "ok": 122 | logger.info(f"Pinged electrum in {self.sock.ping()} ") 123 | logger.info("subscribe to block headers") 124 | res = self.sock.call("blockchain.headers.subscribe") 125 | self.blocks = res["height"] 126 | self.bestblockhash = get_blockhash(res["hex"]) 127 | logger.info("detect chain from header") 128 | rootheader = self.sock.call("blockchain.block.header", [0]) 129 | logger.info(f"Set roothash {self.roothash}") 130 | self.roothash = get_blockhash(rootheader) 131 | self.chain = ROOT_HASHES.get(self.roothash, "regtest") 132 | 133 | def stop(self): 134 | logger.info("Stopping Spectrum") 135 | self.sock.shutdown() 136 | 137 | def is_connected(self) -> bool: 138 | """Returns True if there is a socket connection, False otherwise.""" 139 | return self.sock.status == "ok" 140 | 141 | @property 142 | def uses_tor(self): 143 | """Whether the underlying ElectrumSocket uses Tor""" 144 | return self.sock.uses_tor 145 | 146 | @property 147 | def txdir(self): 148 | return os.path.join(self.datadir, "txs") 149 | 150 | @property 151 | def progress_percent(self): 152 | """This reflects the sync-progress of the _sync-method. It'll be returned in the 153 | verificationprogress of getblockchaininfo 154 | """ 155 | if hasattr(self, "_progress_percent"): 156 | return self._progress_percent 157 | else: 158 | return 0 159 | 160 | @progress_percent.setter 161 | def progress_percent(self, value): 162 | """Will be called from the sync-progress only""" 163 | self._progress_percent = int(value) 164 | 165 | def _sync(self): 166 | """This code is checking self.sock for properly working (otherwise offline-mode) 167 | and if it is, it subscribes to all scripts and checks if the state of the 168 | script matches the response from the subscription. If they don't match, 169 | it calls a sync_script function to update the state. It also logs progress 170 | every 100 scripts subscribed to and updates self.progress_percent 171 | """ 172 | try: 173 | if self.sock.status != "ok": 174 | logger.info("Syncprocess not starting, in offline-mode") 175 | return 176 | if hasattr(self, "_sync_in_progress") and self._sync_in_progress: 177 | logger.info("Syncprocess not starting, already running!") 178 | return 179 | self._sync_in_progress = True 180 | 181 | subscription_logging_counter = 0 182 | # subscribe to all scripts 183 | all_scripts = Script.query.all() 184 | all_scripts_len = len(all_scripts) 185 | logger.info( 186 | f"Syncprocess starting ({all_scripts_len} needs subscriptions)..." 187 | ) 188 | ts = datetime.now() 189 | for sc in all_scripts: 190 | # ignore external scripts (labeled recepients) 191 | if sc.index is None: 192 | continue 193 | subscription_logging_counter += 1 194 | if subscription_logging_counter % 100 == 0: 195 | self.sync_speed = subscription_logging_counter / int( 196 | (datetime.now() - ts).total_seconds() 197 | ) 198 | self.progress_percent = int( 199 | subscription_logging_counter / all_scripts_len * 100 200 | ) 201 | logger.info( 202 | f"Syncprocess now subscribed to {subscription_logging_counter} scripthashes ({self.progress_percent}%, {self.sync_speed} scripts/s)" 203 | ) 204 | 205 | try: 206 | res = self.sock.call( 207 | "blockchain.scripthash.subscribe", [sc.scripthash] 208 | ) 209 | except ElSockTimeoutException: 210 | logger.error( 211 | "Syncprocess got an ElSockTimeoutException. Stop Syncing!" 212 | ) 213 | self.progress_percent = 0 214 | return 215 | if res != sc.state: 216 | self.sync_script(sc, res) 217 | self.progress_percent = 100 218 | ts_diff_s = int((datetime.now() - ts).total_seconds()) 219 | logger.info( 220 | f"Syncprocess finished syncing {all_scripts_len} scripts in {ts_diff_s} with {self.sync_speed} scripts/s)" 221 | ) 222 | except Exception as e: 223 | logger.exception(e) 224 | finally: 225 | self._sync_in_progress = False 226 | 227 | def sync(self, asyncc=True): 228 | if asyncc: 229 | # Using a FlaskThread means also by default that it's a daemon-thread. This has the advantage that 230 | # The thread is killed when the main-thread is killed but it does not do it in a tidy way. 231 | # Potentially harmfull for a LiteConfig but hopefully no problem for a PostgresConfig 232 | t = FlaskThread( 233 | target=self._sync, 234 | ) 235 | t.start() 236 | else: 237 | self._sync() 238 | 239 | # ToDo: subcribe_scripts and sync is very similiar. One does it for all of the scripts in the DB, 240 | # the other one only for a specific descriptor. We should merge them! 241 | def subcribe_scripts(self, descriptor, asyncc=True): 242 | """Takes a descriptor and syncs all the scripts into the DB 243 | creates a new thread doing that. 244 | """ 245 | if asyncc: 246 | t = FlaskThread( 247 | target=self._subcribe_scripts, 248 | args=[ 249 | descriptor.id, 250 | ], 251 | ) 252 | t.start() 253 | else: 254 | self._subcribe_scripts(descriptor.id) 255 | 256 | def _subcribe_scripts(self, descriptor_id: int) -> None: 257 | descriptor: Descriptor = Descriptor.query.filter( 258 | Descriptor.id == descriptor_id 259 | ).first() 260 | logger.info(f"Starting sync/subscribe for {descriptor.descriptor[:30]}") 261 | # subscribe to all scripts in a thread to speed up creation of the wallet 262 | sc: Script 263 | relevant_scripts_query = Script.query.filter_by(descriptor=descriptor) 264 | relevant_scripts = relevant_scripts_query.all() 265 | relevant_scripts_count = relevant_scripts_query.count() 266 | 267 | count_scripts = 0 268 | count_syned_scripts = 0 269 | ts = datetime.now() 270 | for sc in relevant_scripts: 271 | # subscribing 272 | res = self.sock.call("blockchain.scripthash.subscribe", [sc.scripthash]) 273 | count_scripts += 1 274 | 275 | # syncing 276 | if res != sc.state: 277 | self.sync_script(sc, res) 278 | count_syned_scripts += 1 279 | 280 | # logging and expose progress 281 | if count_scripts % 100 == 0: 282 | logger.info( 283 | f"Now subscribed to {count_syned_scripts} of {relevant_scripts_count} scripthashes ({self.progress_percent}%) (via importdescriptor))" 284 | ) 285 | self.progress_percent = int( 286 | count_syned_scripts / relevant_scripts_count * 100 287 | ) 288 | 289 | self.progress_percent = 100 290 | ts_diff_s = int((datetime.now() - ts).total_seconds()) 291 | logger.info( 292 | f"Finished Subscribing and syncing for descriptor {descriptor.descriptor[:30]} in {ts_diff_s}" 293 | ) 294 | logger.info( 295 | f"A total of {len(relevant_scripts)} scripts got subscribed where {count_syned_scripts} got synced" 296 | ) 297 | 298 | def sync_script(self, script, state=None): 299 | # Normally every script has 1-2 transactions and 0-1 utxos, 300 | # so even if we delete everything and resync it's ok 301 | # except donation addresses that may have many txs... 302 | logger.debug( 303 | f"Script {script.scripthash[:7]} is not synced {script.state} != {state}" 304 | ) 305 | if script.state != None: 306 | logger.info( 307 | f"Script {script.scripthash[:7]} has an update from state {script.state} to {state}" 308 | ) 309 | script_pubkey = script.script_pubkey 310 | internal = script.descriptor.internal 311 | # get all transactions, utxos and update balances 312 | # {height,tx_hash,tx_pos,value} 313 | utxos = self.sock.call("blockchain.scripthash.listunspent", [script.scripthash]) 314 | # {confirmed,unconfirmed} 315 | balance = self.sock.call( 316 | "blockchain.scripthash.get_balance", [script.scripthash] 317 | ) 318 | # {height,tx_hash} 319 | txs = self.sock.call("blockchain.scripthash.get_history", [script.scripthash]) 320 | # dict with all txs in the database 321 | db_txs = {tx.txid: tx for tx in script.txs} 322 | # delete all txs that are not there any more: 323 | all_txids = {tx["tx_hash"] for tx in txs} 324 | for txid, tx in db_txs.items(): 325 | if txid not in all_txids: 326 | db.session.delete(tx) 327 | for tx in txs: 328 | blockheader = self.sock.call("blockchain.block.header", [tx.get("height")]) 329 | blockheader = parse_blockheader(blockheader) 330 | # update existing - set height 331 | tx_in_db = tx["tx_hash"] in db_txs 332 | try: 333 | tx_magic = self.sock.call( 334 | "blockchain.transaction.get", [tx["tx_hash"], tx_in_db] 335 | ) 336 | except ValueError as e: 337 | if str(e).startswith( 338 | "verbose transactions are currently unsupported" 339 | ): # electrs doesn't support it 340 | tx_magic = self.sock.call( 341 | "blockchain.transaction.get", [tx["tx_hash"], False] 342 | ) 343 | else: 344 | raise e 345 | if tx_in_db: 346 | db_txs[tx["tx_hash"]].height = tx.get("height") 347 | db_txs[tx["tx_hash"]].blockhash = blockheader.get( 348 | "blockhash" 349 | ) # not existing, how can we fix that? 350 | db_txs[tx["tx_hash"]].blocktime = blockheader.get( 351 | "blocktime" 352 | ) # not existing, how can we fix that? 353 | # new tx 354 | else: 355 | tx_details = { 356 | "tx_hash": tx_magic, 357 | "blockhash": blockheader.get("blockhash"), 358 | "blocktime": blockheader.get("blocktime"), 359 | } 360 | # dump to file 361 | fname = os.path.join(self.txdir, "%s.raw" % tx["tx_hash"]) 362 | if not os.path.exists(fname): 363 | with open(fname, "w") as f: 364 | f.write(tx_magic) 365 | 366 | parsedTx = EmbitTransaction.from_string(tx_magic) 367 | replaceable = all([inp.sequence < 0xFFFFFFFE for inp in parsedTx.vin]) 368 | 369 | category = TxCategory.RECEIVE 370 | amount = 0 371 | vout = 0 372 | if script_pubkey not in [out.script_pubkey for out in parsedTx.vout]: 373 | category = TxCategory.SEND 374 | amount = -sum([out.value for out in parsedTx.vout]) 375 | else: 376 | vout = [out.script_pubkey for out in parsedTx.vout].index( 377 | script_pubkey 378 | ) 379 | amount = parsedTx.vout[vout].value 380 | if internal: # receive to change is hidden in txlist 381 | category = TxCategory.CHANGE 382 | 383 | t = Tx( 384 | txid=tx["tx_hash"], 385 | blockhash=tx_details.get("blockhash"), 386 | height=tx.get("height"), 387 | blocktime=tx_details.get("blocktime"), 388 | replaceable=replaceable, 389 | category=category, 390 | vout=vout, 391 | amount=amount, 392 | fee=tx.get("fee", 0), 393 | # refs 394 | script=script, 395 | wallet=script.wallet, 396 | ) 397 | db.session.add(t) 398 | 399 | # dicts of all electrum utxos and all db utxos 400 | all_utxos = {(u["tx_hash"], u["tx_pos"]): u for u in utxos} 401 | db_utxos = {(u.txid, u.vout): u for u in script.utxos} 402 | # delete all utxos that are not in electrum utxos 403 | for k, utxo in db_utxos.items(): 404 | # delete if spent 405 | if k not in all_utxos: 406 | db.session.delete(utxo) 407 | # add all utxos 408 | for k, utxo in all_utxos.items(): 409 | # update existing 410 | if k in db_utxos: 411 | u = db_utxos[k] 412 | u.height = utxo.get("height") 413 | u.amount = utxo["value"] 414 | # add new 415 | else: 416 | u = UTXO( 417 | txid=utxo["tx_hash"], 418 | vout=utxo["tx_pos"], 419 | height=utxo.get("height"), 420 | amount=utxo["value"], 421 | script=script, 422 | wallet=script.wallet, 423 | ) 424 | db.session.add(u) 425 | script.state = state 426 | script.confirmed = balance["confirmed"] 427 | script.unconfirmed = balance["unconfirmed"] 428 | db.session.commit() 429 | 430 | @property 431 | def network(self): 432 | return NETWORKS.get(self.chain, NETWORKS["main"]) 433 | 434 | def process_notification(self, data): 435 | logger.info(f"process Notification: Electrum data {data}") 436 | method = data["method"] 437 | params = data["params"] 438 | if method == "blockchain.headers.subscribe": 439 | logger.info(params) 440 | self.blocks = params[0]["height"] 441 | self.bestblockhash = get_blockhash(params[0]["hex"]) 442 | if method == "blockchain.scripthash.subscribe": 443 | scripthash = params[0] 444 | state = params[1] 445 | logger.info(f"electrum notification sh {scripthash} , state {state}") 446 | with self.app.app_context(): 447 | scripts = Script.query.filter_by(scripthash=scripthash).all() 448 | for sc in scripts: 449 | self.sync_script(sc, state) 450 | 451 | def get_wallet(self, wallet_name): 452 | w = Wallet.query.filter_by(name=wallet_name).first() 453 | if not w: 454 | raise RPCError( 455 | f"Requested wallet {wallet_name} does not exist or is not loaded", -18 456 | ) 457 | return w 458 | 459 | def jsonrpc(self, obj, wallet_name=None, catch_exceptions=True): 460 | method = obj.get("method") 461 | id = obj.get("id", 0) 462 | params = obj.get("params", []) 463 | if not self.app.config.get("SUPPRESS_JSONRPC_LOGGING", False): 464 | logger.debug( 465 | f"RPC called {method} {'wallet_name: ' + wallet_name if wallet_name else ''}" 466 | ) 467 | try: 468 | args = None 469 | kwargs = None 470 | # get wallet by name 471 | wallet = self.get_wallet(wallet_name) if wallet_name is not None else None 472 | # unknown method 473 | if method not in RPC_METHODS and method not in WALLETRPC_METHODS: 474 | raise RPCError(f"Method not found ({method})", -32601) 475 | # wallet is not provided 476 | if method in WALLETRPC_METHODS and wallet is None: 477 | raise RPCError("Wallet file not specified", -19) 478 | m = getattr(self, f"{method}") 479 | if isinstance(params, list): 480 | args = params 481 | kwargs = {} 482 | else: 483 | args = [] 484 | kwargs = params 485 | # for wallet-specific methods also pass wallet 486 | if method in WALLETRPC_METHODS: 487 | res = m(wallet, *args, **kwargs) 488 | else: 489 | res = m(*args, **kwargs) 490 | except RPCError as e: 491 | if not catch_exceptions: 492 | raise e 493 | logger.error( 494 | f"FAIL method: {method} wallet: {wallet_name} args: {args} kwargs: {kwargs} exc {e}" 495 | ) 496 | return dict(result=None, error=e.to_dict(), id=id) 497 | except Exception as e: 498 | if not catch_exceptions: 499 | raise e 500 | logger.error( 501 | f"FAIL method: {method} wallet: {wallet_name} args: {args} kwargs: {kwargs} exc {e}" 502 | ) 503 | handle_exception(e) 504 | return dict(result=None, error={"code": -500, "message": str(e)}, id=id) 505 | return dict(result=res, error=None, id=id) 506 | 507 | # ========= GENERIC RPC CALLS ========== # 508 | 509 | @rpc 510 | def getmininginfo(self): 511 | return { 512 | "blocks": self.blocks, 513 | "chain": self.chain, 514 | "difficulty": 0, # we can potentially get it from the best header 515 | "networkhashps": 0, 516 | "warnings": "", 517 | } 518 | 519 | @rpc 520 | def getblockchaininfo(self): 521 | return { 522 | "chain": self.chain, 523 | "blocks": self.blocks, 524 | "headers": self.blocks, 525 | "bestblockhash": self.bestblockhash, 526 | "difficulty": 0, # TODO: we can get it from block header if we need it 527 | "mediantime": int( 528 | time.time() 529 | ), # TODO: we can get it from block header if we need it 530 | "verificationprogress": self.progress_percent / 100, 531 | "initialblockdownload": self.progress_percent != 100, 532 | "chainwork": "00", # ??? 533 | "size_on_disk": 0, 534 | "pruned": False, 535 | "softforks": {}, 536 | "warnings": "", 537 | } 538 | 539 | @rpc 540 | def getnetworkinfo(self): 541 | """Dummy call, doing nothing""" 542 | return { 543 | "version": 230000, 544 | "subversion": "/Satoshi:0.23.0/", 545 | "protocolversion": 70016, 546 | "localservices": "0000000000000409", 547 | "localservicesnames": ["NETWORK", "WITNESS", "NETWORK_LIMITED"], 548 | "localrelay": True, 549 | "timeoffset": 0, 550 | "networkactive": True, 551 | "connections": 0, 552 | "connections_in": 0, 553 | "connections_out": 0, 554 | "networks": [ 555 | { 556 | "name": "ipv4", 557 | "limited": False, 558 | "reachable": True, 559 | "proxy": "", 560 | "proxy_randomize_credentials": False, 561 | }, 562 | { 563 | "name": "ipv6", 564 | "limited": False, 565 | "reachable": True, 566 | "proxy": "", 567 | "proxy_randomize_credentials": False, 568 | }, 569 | { 570 | "name": "onion", 571 | "limited": True, 572 | "reachable": False, 573 | "proxy": "", 574 | "proxy_randomize_credentials": False, 575 | }, 576 | ], 577 | "relayfee": 0.00001000, 578 | "incrementalfee": 0.00001000, 579 | "localaddresses": [], 580 | "warnings": "", 581 | } 582 | 583 | @rpc 584 | def getmempoolinfo(self): 585 | """Dummy call, doing nothing""" 586 | return { 587 | "loaded": True, 588 | "size": 0, 589 | "bytes": 0, 590 | "usage": 64, 591 | "maxmempool": 300000000, 592 | "mempoolminfee": 0.00001000, 593 | "minrelaytxfee": 0.00001000, 594 | "unbroadcastcount": 0, 595 | } 596 | 597 | @rpc 598 | def uptime(self): 599 | return int(time.time() - self.t0) 600 | 601 | @rpc 602 | def getblockhash(self, height): 603 | if height == 0: 604 | return self.roothash 605 | if height == self.blocks: 606 | return self.bestblockhash 607 | if height < 0 or height > self.blocks: 608 | raise RPCError("Block height out of range", -8) 609 | logger.info(f"height: {height}") 610 | header = self.sock.call("blockchain.block.header", [height]) 611 | return get_blockhash(header) 612 | 613 | @rpc 614 | def scantxoutset(self, action, scanobjects=[]): 615 | """Dummy call, doing nothing""" 616 | return None 617 | 618 | @rpc 619 | def getblockcount(self): 620 | return self.blocks 621 | 622 | @rpc 623 | def gettxoutsetinfo( 624 | self, hash_type="hash_serialized_2", hash_or_height=None, use_index=True 625 | ): 626 | """Dummy call, doing nothing""" 627 | return { 628 | "height": self.blocks, 629 | "bestblock": self.bestblockhash, 630 | "transactions": 0, 631 | "txouts": 0, 632 | "bogosize": 0, 633 | "hash_serialized_2": "", 634 | "disk_size": 0, 635 | "total_amount": 0, 636 | } 637 | 638 | @rpc 639 | def getblockfilter(self, blockhash, filtertype="basic"): 640 | """Dummy call, doing nothing""" 641 | return {} 642 | 643 | @rpc 644 | def estimatesmartfee(self, conf_target, estimate_mode="conservative"): 645 | if conf_target < 1 or conf_target > 1008: 646 | raise RPCError("Invalid conf_target, must be between 1 and 1008", -8) 647 | fee = self.sock.call("blockchain.estimatefee", [conf_target]) 648 | # returns -1 if failed to estimate fee 649 | if fee < 0: 650 | return { 651 | "errors": ["Insufficient data or no feerate found"], 652 | "blocks": conf_target, 653 | } 654 | return { 655 | "feerate": fee, 656 | "blocks": conf_target, 657 | } 658 | 659 | @rpc 660 | def combinepsbt(self, txs): 661 | if not txs: 662 | raise RPCError("Parameter 'txs' cannot be empty", -8) 663 | psbt = PSBT.from_string(txs[0]) 664 | for tx in txs[1::]: 665 | other = PSBT.from_string(tx) 666 | psbt.xpubs.update(other.xpubs) 667 | psbt.unknown.update(other.unknown) 668 | for i, inp in enumerate(other.inputs): 669 | psbt.inputs[i].update(inp) 670 | for i, out in enumerate(other.outputs): 671 | psbt.outputs[i].update(out) 672 | return str(psbt) 673 | 674 | @rpc 675 | def finalizepsbt(self, psbt, extract=True): 676 | psbt = PSBT.from_string(psbt) 677 | tx = None 678 | tx = finalize_psbt(psbt) 679 | if tx: 680 | if extract: 681 | return {"hex": str(tx), "complete": True} 682 | else: 683 | return {"psbt": str(psbt), "complete": True} 684 | return {"psbt": str(psbt), "complete": False} 685 | 686 | @rpc 687 | def testmempoolaccept(self, rawtxs, maxfeerate=0.1): 688 | # TODO: electrum doesn't have this method, we need to verify txs somehow differently 689 | # also missing txid and other stuff here 690 | return [{"allowed": True} for tx in rawtxs] 691 | 692 | @rpc 693 | def getrawtransaction(self, txid, verbose=False): 694 | """ 695 | Get raw transaction data for a given transaction id. 696 | For more information on the Bitcoin RPC call see: https://developer.bitcoin.org/reference/rpc/getrawtransaction.html 697 | 698 | Parameters: 699 | - txid (str): The transaction id of the transaction you want to retrieve. 700 | - verbose (bool): Indicates whether to return detailed information about the transaction. Default is False. 701 | 702 | Returns: 703 | - dict: If verbose is set to True, it returns detailed information about the transaction specified by txid, 704 | otherwise it returns only the transaction data. 705 | Implementation details: 706 | - This method is using the ElectrumX API call `blockchain.transaction.get` which is documented here: 707 | https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get 708 | """ 709 | if verbose: 710 | return self.sock.call("blockchain.transaction.get", [txid, True]) 711 | else: 712 | return self.sock.call("blockchain.transaction.get", [txid, False]) 713 | 714 | @rpc 715 | def sendrawtransaction(self, hexstring, maxfeerate=0.1): 716 | res = self.sock.call("blockchain.transaction.broadcast", [hexstring]) 717 | if len(res) != 64: 718 | raise RPCError(res) 719 | return res 720 | 721 | # ========== WALLETS RPC CALLS ========== 722 | 723 | @rpc 724 | def listwallets(self): 725 | wallets = [w.name for w in Wallet.query.all()] 726 | logger.debug(f"These are the wallets from listwallets call: {wallets}") 727 | return wallets 728 | 729 | @rpc 730 | def listwalletdir(self): 731 | return [w.name for w in Wallet.query.all()] 732 | 733 | @rpc 734 | def createwallet( 735 | self, 736 | wallet_name, 737 | disable_private_keys=False, 738 | blank=False, 739 | passphrase="", 740 | avoid_reuse=False, 741 | descriptors=True, 742 | load_on_startup=True, 743 | external_signer=False, 744 | ): 745 | """Creates a wallet 746 | By default, it'll get a hotwallet 747 | """ 748 | w = Wallet.query.filter_by(name=wallet_name).first() 749 | if w: 750 | raise RPCError("Wallet already exists", -4) 751 | w = Wallet( 752 | name=wallet_name, 753 | private_keys_enabled=(not disable_private_keys), 754 | seed=None, 755 | ) 756 | db.session.add(w) 757 | db.session.commit() 758 | if not blank and not disable_private_keys: 759 | self.set_seed(w) # random seed is set if nothing is passed as an argument 760 | return {"name": wallet_name, "warning": ""} 761 | 762 | @rpc 763 | def loadwallet(self, filename, load_on_startup=True): 764 | """Dummy call, doing nothing except checking wallet""" 765 | # this will raise if wallet doesn't exist 766 | self.get_wallet(filename) 767 | return {"name": filename, "warning": ""} 768 | 769 | @rpc 770 | def unloadwallet(self, filename, load_on_startup=True): 771 | """Dummy call, doing nothing except checking that wallet exists""" 772 | self.get_wallet(filename) 773 | return {"name": filename, "warning": ""} 774 | 775 | @walletrpc 776 | def getwalletinfo(self, wallet): 777 | confirmed, unconfirmed = self._get_balance(wallet) 778 | txnum = ( 779 | db.session.query(Tx.txid) 780 | .filter(Tx.wallet_id == wallet.id) 781 | .distinct() 782 | .count() 783 | ) 784 | return { 785 | "walletname": wallet.name, 786 | "walletversion": 169900, 787 | "format": "sqlite", 788 | "balance": sat_to_btc(confirmed), 789 | "unconfirmed_balance": sat_to_btc(unconfirmed), 790 | "immature_balance": 0, 791 | "txcount": txnum, 792 | "keypoolsize": wallet.get_keypool(internal=False), 793 | "keypoolsize_hd_internal": wallet.get_keypool(internal=True), 794 | "paytxfee": 0, 795 | "private_keys_enabled": wallet.private_keys_enabled, 796 | "avoid_reuse": False, 797 | "scanning": False, 798 | "descriptors": True, 799 | "external_signer": False, 800 | } 801 | 802 | @walletrpc 803 | def rescanblockchain(self, wallet: Wallet, up_from_height): 804 | """Dummy call, doing nothing""" 805 | logger.info("NOP: rescanblockchain") 806 | return {} 807 | 808 | @walletrpc 809 | def importdescriptors(self, wallet, requests): 810 | results = [] 811 | for request in requests: 812 | try: 813 | self.importdescriptor(wallet, **request) 814 | result = {"success": True} 815 | except Exception as e: 816 | handle_exception(e) 817 | result = {"success": False, "error": {"code": -500, "message": str(e)}} 818 | results.append(result) 819 | return results 820 | 821 | @walletrpc 822 | def getnewaddress(self, wallet, label="", address_type=None): 823 | desc = wallet.get_descriptor(internal=False) 824 | if not desc: 825 | raise RPCError("No active descriptors", -500) 826 | # TODO: refill keypool, set label, subscribe 827 | return desc.getscriptpubkey().address(self.network) 828 | 829 | @walletrpc 830 | def getrawchangeaddress(self, wallet, address_type=None): 831 | desc = wallet.get_descriptor(internal=True) 832 | if not desc: 833 | raise RPCError("No active descriptors", -500) 834 | # TODO: refill keypool, subscribe 835 | return desc.getscriptpubkey().address(self.network) 836 | 837 | @walletrpc 838 | def getaddressinfo(self, wallet, address: str): 839 | ismine = False 840 | ischange = False 841 | iswatchonly = False 842 | scriptpubkey = EmbitScript.from_address(address) 843 | sc = Script.query.filter_by( 844 | script=scriptpubkey.data.hex(), wallet=wallet 845 | ).first() 846 | if sc: 847 | ismine = sc.index is not None 848 | if ismine: 849 | ischange = sc.descriptor.internal 850 | iswatchonly = sc.descriptor.private_descriptor is None 851 | obj = { 852 | "address": address, 853 | "scriptPubKey": scriptpubkey.data.hex(), 854 | "ismine": ismine, 855 | "ischange": ischange, 856 | "solvable": ismine, 857 | "iswatchonly": iswatchonly, 858 | } 859 | if ismine: 860 | desc = sc.descriptor.get_descriptor() 861 | derived_desc = desc.derive(sc.index).to_public() 862 | # convert xpubs to pubs 863 | for k in derived_desc.keys: 864 | if k.is_extended: 865 | k.key = k.key.key 866 | obj.update( 867 | { 868 | "desc": add_checksum(str(derived_desc)), 869 | "parent_desc": add_checksum(str(desc.to_public())), 870 | } 871 | ) 872 | obj["labels"] = [] 873 | if sc and sc.label: 874 | obj["labels"] = [sc.label] 875 | return obj 876 | 877 | @walletrpc 878 | def listlabels(self, wallet, purpose=None): 879 | return list( 880 | { 881 | sc.label or "" 882 | for sc in db.session.query(Script.label) 883 | .filter(Script.wallet_id == wallet.id) 884 | .distinct() 885 | .all() 886 | } 887 | ) 888 | 889 | @walletrpc 890 | def setlabel(self, wallet, address, label): 891 | scriptpubkey = EmbitScript.from_address(address) 892 | sc = Script.query.filter_by( 893 | script=scriptpubkey.data.hex(), wallet=wallet 894 | ).first() 895 | if sc: 896 | sc.label = label 897 | db.session.commit() 898 | 899 | @walletrpc 900 | def getaddressesbylabel(self, wallet, label): 901 | scripts = Script.query.filter_by(wallet=wallet, label=label).all() 902 | obj = {} 903 | for sc in scripts: 904 | obj[sc.address(self.network)] = { 905 | "purpose": "receive" if sc.index is not None else "send" 906 | } 907 | return obj 908 | 909 | def _get_tx(self, txid): 910 | fname = os.path.join(self.txdir, "%s.raw" % txid) 911 | if os.path.exists(fname): 912 | with open(fname, "r") as f: 913 | tx = EmbitTransaction.from_string(f.read()) 914 | return tx 915 | 916 | @walletrpc 917 | def gettransaction(self, wallet, txid, include_watchonly=True, verbose=False): 918 | tx = self._get_tx(txid) 919 | if not tx: 920 | raise RPCError("Invalid or non-wallet transaction id", -5) 921 | txs = Tx.query.filter_by(wallet=wallet, txid=txid).all() 922 | if not txs: 923 | raise RPCError("Invalid or non-wallet transaction id", -5) 924 | tx0 = txs[0] 925 | confirmed = bool(tx0.height) 926 | t = int(time.time()) if not confirmed else tx0.blocktime 927 | obj = { 928 | "amount": sat_to_btc(sum([tx.amount for tx in txs])), 929 | "confirmations": (self.blocks - tx0.height + 1) if tx0.height else 0, 930 | "txid": txid, 931 | "walletconflicts": [], 932 | "time": t, 933 | "timereceived": t, 934 | "bip125-replaceable": "yes" if tx0.replaceable else "no", 935 | "details": [ 936 | { 937 | "address": tx.script.address(self.network), 938 | "category": str(tx.category), 939 | "amount": sat_to_btc(tx.amount), 940 | "label": "", 941 | "vout": tx.vout, 942 | } 943 | for tx in txs 944 | if tx.category != TxCategory.CHANGE 945 | ], 946 | "hex": str(tx), 947 | } 948 | if "send" in [d["category"] for d in obj["details"]]: 949 | obj.update({"fee": -sat_to_btc(tx0.fee or 0)}) 950 | if confirmed: 951 | obj.update( 952 | { 953 | "blockhash": tx0.blockhash, 954 | "blockheight": tx0.height, 955 | "blocktime": tx0.blocktime, 956 | } 957 | ) 958 | else: 959 | obj.update({"trusted": False}) 960 | if verbose: 961 | pass # add "decoded" 962 | return obj 963 | 964 | @walletrpc 965 | def listtransactions( 966 | self, wallet, label="*", count=10, skip=0, include_watchonly=True 967 | ): 968 | txs = ( 969 | db.session.query(Tx) 970 | .filter(Tx.wallet_id == wallet.id) 971 | .offset(skip) 972 | .limit(count) 973 | .all() 974 | ) 975 | return [tx.to_dict(self.blocks, self.network) for tx in txs] 976 | 977 | def _get_balance(self, wallet: Wallet): 978 | """Returns a tuple: (confirmed, unconfirmed) in sats""" 979 | confirmed, unconfirmed = ( 980 | db.session.query( 981 | func.sum(Script.confirmed).label("confirmed"), 982 | func.sum(Script.unconfirmed).label("unconfirmed"), 983 | ) 984 | .filter(Script.wallet == wallet) 985 | .first() 986 | ) 987 | if confirmed is None or unconfirmed is None: 988 | raise SpectrumException(f"No scripts for wallet {wallet.name}") 989 | return confirmed, unconfirmed 990 | 991 | @walletrpc 992 | def getbalances(self, wallet): 993 | confirmed, unconfirmed = self._get_balance(wallet) 994 | b = { 995 | "trusted": round(confirmed * 1e-8, 8), 996 | "untrusted_pending": round(unconfirmed * 1e-8, 8), 997 | "immature": 0.0, 998 | } 999 | if wallet.private_keys_enabled: 1000 | return {"mine": b} 1001 | else: 1002 | return { 1003 | "mine": b, 1004 | "watchonly": b, 1005 | } 1006 | 1007 | @walletrpc 1008 | def lockunspent(self, wallet, unlock, transactions=[]): 1009 | for txobj in transactions: 1010 | txid = txobj["txid"] 1011 | vout = txobj["vout"] 1012 | utxo = UTXO.query.filter_by(wallet=wallet, txid=txid, vout=vout).first() 1013 | if utxo is None: 1014 | raise RPCError("Invalid parameter, unknown transaction", -8) 1015 | if utxo.locked and not unlock: 1016 | raise RPCError("Invalid parameter, output already locked", -8) 1017 | if not utxo.locked and unlock: 1018 | raise RPCError("Invalid parameter, expected locked output", -8) 1019 | utxo.locked = not unlock 1020 | db.session.commit() 1021 | return True 1022 | 1023 | @walletrpc 1024 | def listlockunspent(self, wallet): 1025 | utxos = UTXO.query.filter_by(wallet=wallet, locked=True).all() 1026 | return [{"txid": utxo.txid, "vout": utxo.vout} for utxo in utxos] 1027 | 1028 | @walletrpc 1029 | def listunspent( 1030 | self, 1031 | wallet, 1032 | minconf=1, 1033 | maxconf=9999999, 1034 | addresses=[], 1035 | include_unsafe=True, 1036 | query_options={}, 1037 | ): 1038 | # TODO: options are currently ignored 1039 | options = { 1040 | "minimumAmount": 0, 1041 | "maximumAmount": 0, 1042 | "maximumCount": 99999999999, 1043 | "minimumSumAmount": 0, 1044 | } 1045 | options.update(query_options) 1046 | utxos = UTXO.query.filter_by(wallet=wallet, locked=False).all() 1047 | return [ 1048 | { 1049 | "txid": utxo.txid, 1050 | "vout": utxo.vout, 1051 | "amount": round(utxo.amount * 1e-8, 8), 1052 | "spendable": True, 1053 | "solvable": True, 1054 | "safe": utxo.height is not None, 1055 | "confirmations": (self.blocks - utxo.height + 1) 1056 | if utxo.height 1057 | else 0 1058 | if utxo.height is not None 1059 | else 0, 1060 | "address": utxo.script.address(self.network), 1061 | "scriptPubKey": utxo.script.script, 1062 | "desc": utxo.script.descriptor.derive(utxo.script.index), 1063 | # "desc": True, # should be descriptor, but we only check if desc is there or not 1064 | } 1065 | for utxo in utxos 1066 | ] 1067 | 1068 | @walletrpc 1069 | def listsinceblock( 1070 | self, 1071 | wallet, 1072 | blockhash=None, 1073 | target_confirmations=1, 1074 | include_watchonly=True, 1075 | include_removed=True, 1076 | ): 1077 | query = db.session.query(Tx).filter( 1078 | Tx.wallet_id == wallet.id, 1079 | Tx.category.in_([TxCategory.SEND, TxCategory.RECEIVE]), 1080 | ) 1081 | # TODO: don't know how to get height from blockhash 1082 | # looks like we need to store all block hashes as well 1083 | if target_confirmations > 0: 1084 | query = query.filter(Tx.height <= self.blocks - target_confirmations + 1) 1085 | txs = query.all() 1086 | txs = [ 1087 | { 1088 | "address": tx.script.address(self.network), 1089 | "category": str(tx.category), 1090 | "amount": sat_to_btc(tx.amount), 1091 | "label": "", 1092 | "vout": tx.vout, 1093 | "confirmations": (self.blocks - tx.height + 1) if tx.height else 0, 1094 | "blockhash": tx.blockhash, 1095 | "blockheight": tx.height, 1096 | "blocktime": tx.blocktime, 1097 | "txid": tx.txid, 1098 | "time": tx.blocktime, 1099 | "timereceived": tx.blocktime, 1100 | "walletconflicts": [], 1101 | "bip125-replaceable": "yes" if tx.replaceable else "no", 1102 | } 1103 | for tx in txs 1104 | ] 1105 | return { 1106 | "transactions": [], 1107 | "removed": [], 1108 | "lastblock": self.bestblockhash, # not sure about this one 1109 | } 1110 | 1111 | @walletrpc 1112 | def getreceivedbyaddress(self, wallet, address, minconf=1): 1113 | sc = EmbitScript.from_address(address) 1114 | script = Script.query.filter_by(script=sc.data.hex()).first() 1115 | if not script: 1116 | return 0 1117 | # no transactions on this script 1118 | if script.state is None: 1119 | return 0 1120 | (received,) = ( 1121 | db.session.query( 1122 | func.sum(Tx.amount).label("amount"), 1123 | ) 1124 | .filter( 1125 | Tx.script == script, 1126 | Tx.category.in_([TxCategory.CHANGE, TxCategory.RECEIVE]), 1127 | ) 1128 | .first() 1129 | ) 1130 | return sat_to_btc(received) 1131 | 1132 | @rpc 1133 | def converttopsbt(self, hexstring, permitsigdata=False, iswitness=None): 1134 | tx = EmbitTransaction.from_string(hexstring) 1135 | # remove signatures 1136 | if permitsigdata: 1137 | for vin in tx.vin: 1138 | vin.witness = Witness() 1139 | vin.script_sig = EmbitScript() 1140 | for vin in tx.vin: 1141 | if vin.witness or vin.script_sig: 1142 | raise RPCError( 1143 | "Inputs must not have scriptSigs and scriptWitnesses", -22 1144 | ) 1145 | return str(PSBT(tx)) 1146 | 1147 | def _fill_scope(self, scope, script, add_utxo=False): 1148 | if add_utxo: 1149 | tx = self._get_tx(scope.txid.hex()) 1150 | if tx is not None: 1151 | is_segwit = tx.is_segwit 1152 | # clear witness 1153 | for vin in tx.vin: 1154 | vin.witness = Witness() 1155 | scope.non_witness_utxo = tx 1156 | vout = tx.vout[scope.vout] 1157 | if is_segwit: 1158 | scope.witness_utxo = vout 1159 | d = script.descriptor.get_descriptor(script.index) 1160 | scope.witness_script = d.witness_script() 1161 | scope.redeem_script = d.redeem_script() 1162 | for k in d.keys: 1163 | scope.bip32_derivations[k.get_public_key()] = DerivationPath( 1164 | k.origin.fingerprint, k.origin.derivation 1165 | ) 1166 | 1167 | @walletrpc 1168 | def walletcreatefundedpsbt( 1169 | self, wallet, inputs=[], outputs=[], locktime=0, options={}, bip32derivs=True 1170 | ): 1171 | # we need to add more inputs if it's in options or if inputs are empty 1172 | add_inputs = options.get("add_inputs", not bool(inputs)) 1173 | include_unsafe = options.get("include_unsafe", False) 1174 | changeAddress = options.get("changeAddress", None) 1175 | if changeAddress is None: 1176 | desc = wallet.get_descriptor(internal=True) 1177 | if not desc: 1178 | raise RPCError("No active descriptors", -500) 1179 | changeAddress = desc.getscriptpubkey().address(self.network) 1180 | changePosition = options.get("changePosition", None) 1181 | lockUnspents = options.get("lockUnspents", False) 1182 | fee_rate = options.get("fee_rate", options.get("feeRate", 0) * 1e5) 1183 | subtractFeeFromOutputs = options.get("subtractFeeFromOutputs", []) 1184 | conf_target = options.get("conf_target", 6) 1185 | replaceable = options.get("replaceable", False) 1186 | if not fee_rate: 1187 | fee_rate = self.sock.call("blockchain.estimatefee", [conf_target]) * 1e5 1188 | if fee_rate < 0: 1189 | fee_rate = 1 1190 | destinations = [] 1191 | for out in outputs: 1192 | for addr, amount in out.items(): 1193 | destinations.append( 1194 | TransactionOutput( 1195 | btc_to_sat(amount), EmbitScript.from_address(addr) 1196 | ) 1197 | ) 1198 | # don't add change out for now, just keep it here 1199 | changeOut = TransactionOutput(0, EmbitScript.from_address(changeAddress)) 1200 | # get utxos from inputs 1201 | inputs = [ 1202 | UTXO.query.filter_by( 1203 | wallet=wallet, txid=inp["txid"], vout=inp["vout"] 1204 | ).first() 1205 | for inp in inputs 1206 | ] 1207 | if None in inputs: 1208 | raise RPCError("Insufficient funds", -4) # wrong utxo is provided in inputs 1209 | sum_outs = sum([out.value for out in destinations]) 1210 | sum_ins = sum([inp.amount for inp in inputs]) 1211 | utxos = UTXO.query.filter_by(wallet=wallet, locked=False).order_by( 1212 | UTXO.amount.desc() 1213 | ) 1214 | tx = EmbitTransaction( 1215 | vin=[TransactionInput(bytes.fromhex(inp.txid), inp.vout) for inp in inputs], 1216 | vout=destinations, 1217 | locktime=locktime, 1218 | ) 1219 | sz = len(tx.serialize()) 1220 | # TODO: proper coin selection 1221 | if add_inputs and sum_ins < (sum_outs + sz * fee_rate): 1222 | for utxo in utxos: 1223 | if not include_unsafe and not bool(utxo.height): 1224 | continue 1225 | if utxo not in inputs: 1226 | inputs.append(utxo) 1227 | txin = TransactionInput(bytes.fromhex(utxo.txid), utxo.vout) 1228 | tx.vin.append(txin) 1229 | sz += len(txin.serialize()) 1230 | sum_ins += utxo.amount 1231 | if sum_ins >= (sum_outs + sz * fee_rate): 1232 | break 1233 | if sum_ins < sum_outs: 1234 | raise RPCError(f"Insufficient funds", -4) 1235 | if not subtractFeeFromOutputs and sum_ins < (sum_outs + sz * fee_rate): 1236 | raise RPCError(f"Insufficient funds", -4) 1237 | change_amount = int( 1238 | sum_ins - sum_outs - (sz + len(changeOut.serialize())) * fee_rate 1239 | ) 1240 | # if it makes sense to add change output 1241 | changepos = -1 1242 | if change_amount > 0: 1243 | changeOut.value = sum_ins - sum_outs # we don't subtract fee right now 1244 | tx.vout.insert( 1245 | changePosition or random.randint(0, len(tx.vout) + 1), changeOut 1246 | ) 1247 | changepos = tx.vout.index(changeOut) 1248 | fee = math.ceil(len(tx.serialize()) * fee_rate) 1249 | # subtract fee 1250 | if subtractFeeFromOutputs: 1251 | for idx in subtractFeeFromOutputs: 1252 | tx.vout[idx].value -= math.ceil(fee / len(subtractFeeFromOutputs)) 1253 | elif changepos >= 0: 1254 | tx.vout[changepos].value -= fee 1255 | # set rbf if requested 1256 | if replaceable: 1257 | for inp in tx.vin: 1258 | inp.sequence = 0xFFFFFFFD 1259 | psbt = PSBT(tx) 1260 | for i, inp in enumerate(psbt.inputs): 1261 | self._fill_scope(inp, inputs[i].script, add_utxo=True) 1262 | if changepos >= 0: 1263 | sc = Script.query.filter_by( 1264 | wallet=wallet, script=psbt.outputs[changepos].script_pubkey.data.hex() 1265 | ).first() 1266 | if sc: 1267 | self._fill_scope(psbt.outputs[changepos], sc) 1268 | if lockUnspents: 1269 | for inp in inputs: 1270 | inp.locked = True 1271 | db.session.commit() 1272 | return {"psbt": str(psbt), "fee": sat_to_btc(fee), "changepos": changepos} 1273 | 1274 | @walletrpc 1275 | def walletprocesspsbt(self, wallet, psbt, sign=True, sighashtype=None): 1276 | psbt = PSBT.from_string(psbt) 1277 | # fill inputs 1278 | for inp in psbt.inputs: 1279 | tx = self._get_tx(inp.txid.hex()) 1280 | if tx is None: 1281 | continue 1282 | is_segwit = tx.is_segwit 1283 | # clear witness 1284 | for vin in tx.vin: 1285 | vin.witness = Witness() 1286 | inp.non_witness_utxo = tx 1287 | vout = tx.vout[inp.vout] 1288 | if is_segwit: 1289 | inp.witness_utxo = vout 1290 | sc = Script.query.filter( 1291 | Script.wallet == wallet, 1292 | Script.index.isnot(None), 1293 | Script.script == vout.script_pubkey.data.hex(), 1294 | ).first() 1295 | if sc: 1296 | self._fill_scope(inp, sc) 1297 | # fill outputs 1298 | for out in psbt.outputs: 1299 | sc = Script.query.filter( 1300 | Script.wallet == wallet, 1301 | Script.index.isnot(None), 1302 | Script.script == out.script_pubkey.data.hex(), 1303 | ).first() 1304 | if sc: 1305 | self._fill_scope(out, sc) 1306 | complete = False 1307 | if sign and wallet.private_keys_enabled: 1308 | for d in wallet.descriptors: 1309 | psbt.sign_with(d.get_descriptor()) 1310 | res = str(psbt) 1311 | try: 1312 | if finalize_psbt(PSBT.from_string(res)): 1313 | complete = True 1314 | except: 1315 | pass 1316 | return {"psbt": res, "complete": complete} 1317 | 1318 | # ========== INTERNAL METHODS ========== 1319 | 1320 | def __repr__(self) -> str: 1321 | return f"" 1322 | 1323 | def set_seed(self, wallet, seed=None): 1324 | if seed is None: 1325 | seed = os.urandom(32).hex() 1326 | self.seed = seed 1327 | root = bip32.HDKey.from_seed(bytes.fromhex(seed)) 1328 | fgp = root.my_fingerprint.hex() 1329 | # TODO: maybe better to use bip84? 1330 | recv_desc = EmbitDescriptor.from_string(f"wpkh([{fgp}]{root}/0h/0/*)") 1331 | change_desc = EmbitDescriptor.from_string(f"wpkh([{fgp}]{root}/0h/1/*)") 1332 | self.importdescriptor(wallet, str(recv_desc), internal=False, active=True) 1333 | self.importdescriptor(wallet, str(change_desc), internal=True, active=True) 1334 | 1335 | def importdescriptor( 1336 | self, 1337 | wallet: Wallet, 1338 | desc: str, 1339 | internal=False, 1340 | active=False, 1341 | label="", 1342 | timestamp="now", 1343 | next_index=0, 1344 | **kwargs, 1345 | ): 1346 | logger.info(f"Importing descriptor {desc}") 1347 | addr_range = kwargs.get("range", 300) # because range is special keyword 1348 | descriptor = EmbitDescriptor.from_string(desc) 1349 | has_private_keys = any([k.is_private for k in descriptor.keys]) 1350 | private_descriptor = None 1351 | if has_private_keys: 1352 | private_descriptor = desc 1353 | desc = str(descriptor) 1354 | if active: 1355 | # deactivate other active descriptor 1356 | for old_desc in wallet.descriptors: 1357 | if old_desc.internal == internal and old_desc.active: 1358 | old_desc.active = False 1359 | d = Descriptor( 1360 | wallet_id=wallet.id, 1361 | active=active, 1362 | internal=internal, 1363 | descriptor=desc, 1364 | private_descriptor=private_descriptor, 1365 | next_index=next_index, 1366 | ) 1367 | db.session.add(d) 1368 | db.session.commit() 1369 | # TODO: move to keypoolrefill or something 1370 | # Add scripts 1371 | logger.info( 1372 | f"Creating {next_index + addr_range} scriptpubkeys for wallet {wallet}" 1373 | ) 1374 | for i in range(0, next_index + addr_range): 1375 | scriptpubkey = descriptor.derive(i).script_pubkey() 1376 | address = scriptpubkey.address() 1377 | # logger.info(f" {address}") 1378 | sc = Script( 1379 | wallet=wallet, 1380 | descriptor=d, 1381 | index=i, 1382 | script=scriptpubkey.data.hex(), 1383 | scripthash=scripthash(scriptpubkey), 1384 | ) 1385 | db.session.add(sc) 1386 | db.session.commit() 1387 | self.subcribe_scripts(d) 1388 | return d 1389 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/spectrum_error.py: -------------------------------------------------------------------------------- 1 | class RPCError(Exception): 2 | """Should use one of : https://github.com/bitcoin/bitcoin/blob/v22.0/src/rpc/protocol.h#L25-L88""" 3 | 4 | def __init__(self, message, code=-1): # -1 is RPC_MISC_ERROR 5 | self.message = message 6 | self.code = code 7 | 8 | def to_dict(self): 9 | return {"code": self.code, "message": self.message} -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/util.py: -------------------------------------------------------------------------------- 1 | from binascii import hexlify, unhexlify 2 | import hashlib 3 | import io 4 | import logging 5 | from threading import Thread 6 | from decimal import Decimal 7 | 8 | from embit import hashes 9 | from flask import current_app as app 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | def get_blockhash(hex_header): 15 | return hashes.double_sha256(bytes.fromhex(hex_header))[::-1].hex() 16 | 17 | 18 | def scripthash(script): 19 | """Calculates a scripthash for Electrum from address""" 20 | return hashes.sha256(script.data)[::-1].hex() 21 | 22 | 23 | def sat_to_btc(sat): 24 | """Core is returning floats which is not good. We need to switch over to decimal at some point 25 | but this is not yet used yet. 26 | If we do it, also have a look at: 27 | https://github.com/relativisticelectron/specter-desktop/pull/3 28 | """ 29 | sat = sat or 0 # if None is passed 30 | return round(sat * 1e-8, 8) 31 | 32 | 33 | def btc_to_sat(btc): 34 | btc = btc or 0 # if None is passed 35 | return round(btc * 1e8) 36 | 37 | 38 | class SpectrumException(Exception): 39 | pass 40 | 41 | 42 | class SpectrumInternalException(Exception): 43 | pass 44 | 45 | 46 | def handle_exception(exception): 47 | """prints the exception and most important the stacktrace""" 48 | logger.error( 49 | "----START-TRACEBACK-----------------------------------------------------------------" 50 | ) 51 | logger.exception(exception) # the exception instance 52 | logger.error( 53 | "----END---TRACEBACK-----------------------------------------------------------------" 54 | ) 55 | 56 | 57 | class FlaskThread(Thread): 58 | """A FlaskThread passes the applicationcontext to the new thread in order to make stuff working seamlessly in new threadsS 59 | copied from https://stackoverflow.com/questions/39476889/use-flask-current-app-logger-inside-threading""" 60 | 61 | def __init__(self, *args, **kwargs): 62 | super().__init__(*args, **kwargs) 63 | self.daemon = True 64 | try: 65 | self.app = app._get_current_object() 66 | self.flask_mode = True 67 | except RuntimeError as e: 68 | if str(e).startswith("Working outside of application context."): 69 | self.flask_mode = False 70 | 71 | def run(self): 72 | if self.flask_mode: 73 | with self.app.app_context(): 74 | logger.debug(f"starting new FlaskThread: {self._target.__name__}") 75 | super().run() 76 | else: 77 | logger.debug(f"starting new Thread: {self._target.__name__}") 78 | super().run() 79 | 80 | 81 | # inspired by Jimmy: 82 | # https://github.com/jimmysong/programmingbitcoin/blob/3fba6b992ece443e4256df057595cfbe91edda75/code-ch09/answers.py#L109-L123 83 | 84 | 85 | def _little_endian_to_int(b): 86 | """little_endian_to_int takes byte sequence as a little-endian number. 87 | Returns an integer""" 88 | return int.from_bytes(b, "little") 89 | 90 | 91 | def parse_blockheader(s): 92 | """pass in a string, 80 bytes or""" 93 | if isinstance(s, str): 94 | s = unhexlify(s) 95 | if isinstance(s, bytes): 96 | assert len(s) == 80, f"a Blockheader is exactly 80 bytes but this has {len(s)}" 97 | mybytes = s 98 | blockhash_bytes = hashlib.sha256(hashlib.sha256(mybytes).digest()).digest()[ 99 | ::-1 100 | ] 101 | blockhash_str = hexlify(blockhash_bytes).decode() 102 | s = io.BytesIO(s) 103 | version = _little_endian_to_int(s.read(4)) 104 | prev_block = s.read(32)[::-1] 105 | merkle_root = s.read(32)[::-1] 106 | timestamp = _little_endian_to_int(s.read(4)) 107 | bits = s.read(4) 108 | nonce = s.read(4) 109 | return { 110 | "version": version, 111 | "prev_block": prev_block, 112 | "merkle_root": merkle_root, 113 | "blocktime": timestamp, 114 | "bits": bits, 115 | "nonce": nonce, 116 | "blockhash": blockhash_str, 117 | } 118 | -------------------------------------------------------------------------------- /src/cryptoadvance/spectrum/util_specter.py: -------------------------------------------------------------------------------- 1 | """ Contains stuff which is copy and pasted from specter 2 | in order to avoid dependency issues which are already severe enough. 3 | """ 4 | import logging 5 | import os 6 | from http.client import HTTPConnection 7 | import requests 8 | from urllib3.exceptions import NewConnectionError 9 | from requests.exceptions import ConnectionError 10 | import datetime 11 | import urllib3 12 | import json 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | def snake_case2camelcase(word): 18 | return "".join(x.capitalize() or "_" for x in word.split("_")) 19 | 20 | 21 | def setup_logging(debug=False, tracerpc=False, tracerequests=False): 22 | """This code sets up logging for a Python application. It sets the logging level to DEBUG if the tracerpc 23 | or tracerequests flags are set, and INFO otherwise. It also sets up the formatter for the log messages, 24 | which can be customized with an environment variable. Finally, it adds a StreamHandler to the root 25 | logger and removes any existing handlers. 26 | """ 27 | ch = logging.StreamHandler() 28 | ch.setLevel(logging.DEBUG) 29 | if tracerpc or tracerequests: 30 | if tracerpc: 31 | debug = True # otherwise this won't work 32 | logging.getLogger("cryptoadvance.specter.rpc").setLevel(logging.DEBUG) 33 | if tracerequests: 34 | # from here: https://stackoverflow.com/questions/16337511/log-all-requests-from-the-python-requests-module 35 | HTTPConnection.debuglevel = 1 36 | requests_log = logging.getLogger("requests.packages.urllib3") 37 | requests_log.setLevel(logging.DEBUG) 38 | requests_log.propagate = True 39 | else: 40 | logging.getLogger("cryptoadvance.specter.rpc").setLevel(logging.INFO) 41 | 42 | if debug: 43 | # No need for timestamps while developing 44 | formatter = logging.Formatter("[%(levelname)7s] in %(module)15s: %(message)s") 45 | logging.getLogger("cryptoadvance").setLevel(logging.DEBUG) 46 | # but not that chatty connectionpool 47 | logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO) 48 | else: 49 | formatter = logging.Formatter( 50 | # Too early to format that via the flask-config, so let's copy it from there: 51 | os.getenv( 52 | "SPECTER_LOGFORMAT", 53 | "[%(asctime)s] %(levelname)s in %(module)s: %(message)s", 54 | ) 55 | ) 56 | logging.getLogger("cryptoadvance").setLevel(logging.INFO) 57 | ch.setFormatter(formatter) 58 | logging.getLogger().handlers = [] 59 | logging.getLogger().addHandler(ch) 60 | 61 | 62 | def _get_bool_env_var(varname, default=None): 63 | 64 | value = os.environ.get(varname, default) 65 | 66 | if value is None: 67 | return False 68 | elif isinstance(value, str) and value.lower() == "false": 69 | return False 70 | elif bool(value) is False: 71 | return False 72 | else: 73 | return bool(value) 74 | 75 | 76 | class BitcoinRPC: 77 | """A slim version of the Specter BitcoinRPC""" 78 | 79 | counter = 0 80 | 81 | # These are used for tracing the calls without too many duplicates 82 | last_call_hash = None 83 | last_call_hash_counter = 0 84 | 85 | # https://docs.python-requests.org/en/master/user/quickstart/#timeouts 86 | # None means until connection closes. It's specified in seconds 87 | default_timeout = None # seconds 88 | 89 | def __init__( 90 | self, 91 | user="bitcoin", 92 | password="secret", 93 | host="127.0.0.1", 94 | port=8332, 95 | protocol="http", 96 | path="", 97 | timeout=None, 98 | session=None, 99 | proxy_url="socks5h://localhost:9050", 100 | only_tor=False, 101 | **kwargs, 102 | ): 103 | path = path.replace("//", "/") # just in case 104 | self.user = user 105 | self._password = password 106 | self.port = port 107 | self.protocol = protocol 108 | self.host = host 109 | self.path = path 110 | self.timeout = timeout or self.__class__.default_timeout 111 | self.proxy_url = proxy_url 112 | self.only_tor = only_tor 113 | self.r = None 114 | self.last_call_hash = None 115 | self.last_call_hash_counter = 0 116 | # session reuse speeds up requests 117 | if session is None: 118 | self._create_session() 119 | else: 120 | self.session = session 121 | 122 | def _create_session(self): 123 | session = requests.Session() 124 | session.auth = (self.user, self.password) 125 | self.session = session 126 | 127 | def wallet(self, name=""): 128 | """Return new instance connected to a specific wallet""" 129 | return type(self)( 130 | user=self.user, 131 | password=self.password, 132 | port=self.port, 133 | protocol=self.protocol, 134 | host=self.host, 135 | path="{}/wallet/{}".format(self.path, name), 136 | timeout=self.timeout, 137 | session=self.session, 138 | proxy_url=self.proxy_url, 139 | only_tor=self.only_tor, 140 | ) 141 | 142 | @property 143 | def url(self): 144 | return "{s.protocol}://{s.host}:{s.port}{s.path}".format(s=self) 145 | 146 | @property 147 | def password(self): 148 | return self._password 149 | 150 | @password.setter 151 | def password(self, value): 152 | self._password = value 153 | self._create_session() 154 | 155 | def test_connection(self): 156 | """returns a boolean depending on whether getblockchaininfo() succeeds""" 157 | try: 158 | self.getblockchaininfo() 159 | return True 160 | except: 161 | return False 162 | 163 | def clone(self): 164 | """ 165 | Returns a clone of self. 166 | Useful if you want to mess with the properties 167 | """ 168 | return BitcoinRPC( 169 | self.user, 170 | self.password, 171 | self.host, 172 | self.port, 173 | self.protocol, 174 | self.path, 175 | self.timeout, 176 | self.session, 177 | self.proxy_url, 178 | self.only_tor, 179 | ) 180 | 181 | def multi(self, calls: list, **kwargs): 182 | """Makes batch request to Core""" 183 | type(self).counter += len(calls) 184 | # some debug info for optimizations 185 | # methods = " ".join(list(dict.fromkeys([call[0] for call in calls]))) 186 | # wallet = self.path.split("/")[-1] 187 | # print(f"{self.counter}: +{len(calls)} {wallet} {methods}") 188 | headers = {"content-type": "application/json"} 189 | payload = [ 190 | { 191 | "method": method, 192 | "params": args if args != [None] else [], 193 | "jsonrpc": "2.0", 194 | "id": i, 195 | } 196 | for i, (method, *args) in enumerate(calls) 197 | ] 198 | timeout = self.timeout 199 | if "timeout" in kwargs: 200 | timeout = kwargs["timeout"] 201 | 202 | if kwargs.get("no_wait"): 203 | # Zero is treated like None, i.e. infinite wait 204 | timeout = 0.001 205 | 206 | url = self.url 207 | if "wallet" in kwargs: 208 | url = url + "/wallet/{}".format(kwargs["wallet"]) 209 | ts = self.trace_call_before(url, payload) 210 | try: 211 | r = self.session.post( 212 | url, data=json.dumps(payload), headers=headers, timeout=timeout 213 | ) 214 | except (ConnectionError, NewConnectionError, ConnectionRefusedError) as ce: 215 | raise Exception(ce) 216 | 217 | except (requests.exceptions.Timeout, urllib3.exceptions.ReadTimeoutError) as to: 218 | # Timeout is effectively one of the two: 219 | # ConnectTimeout: The request timed out while trying to connect to the remote server 220 | # ReadTimeout: The server did not send any data in the allotted amount of time. 221 | # ReadTimeoutError: Raised when a socket timeout occurs while receiving data from a server 222 | if kwargs.get("no_wait"): 223 | # Used for rpc calls that don't immediately return (e.g. rescanblockchain) so we don't 224 | # expect any data back anyway. __getattr__ expects a list of formatted json. 225 | self.trace_call_after(url, payload, timeout) 226 | return [{"error": None, "result": None}] 227 | 228 | logger.error( 229 | "Timeout after {} secs while {} call({: <28}) payload:{} Exception: {}".format( 230 | timeout, 231 | self.__class__.__name__, 232 | "/".join(url.split("/")[3:]), 233 | payload, 234 | to, 235 | ) 236 | ) 237 | logger.exception(to) 238 | raise Exception( 239 | "Timeout after {} secs while {} call({: <28}). Check the logs for more details.".format( 240 | timeout, 241 | self.__class__.__name__, 242 | "/".join(url.split("/")[3:]), 243 | payload, 244 | ) 245 | ) 246 | self.trace_call_after(url, payload, ts) 247 | self.r = r 248 | if r.status_code != 200: 249 | logger.debug(f"last call FAILED: {r.text}") 250 | if r.text.startswith("Work queue depth exceeded"): 251 | raise Exception( 252 | "Your Bitcoind is running hot (Work queue depth exceeded)! Bitcoind gets more requests than it can process. Please refrain from doing anything for some minutes." 253 | ) 254 | raise Exception( 255 | "Server responded with error code %d: %s" % (r.status_code, r.text), r 256 | ) 257 | r = r.json() 258 | return r 259 | 260 | @classmethod 261 | def trace_call_before(cls, url, payload): 262 | """get a timestamp if needed in order to measure how long the call takes""" 263 | if logger.level == logging.DEBUG: 264 | return datetime.datetime.now() 265 | 266 | @classmethod 267 | def trace_call_after(cls, url, payload, timestamp): 268 | """logs out the call and its payload (if necessary), reduces noise by suppressing repeated calls""" 269 | if logger.level == logging.DEBUG: 270 | timediff_ms = int( 271 | (datetime.datetime.now() - timestamp).total_seconds() * 1000 272 | ) 273 | current_hash = hash( 274 | json.dumps({"url": url, "payload": payload}, sort_keys=True) 275 | ) 276 | if cls.last_call_hash == None: 277 | cls.last_call_hash = current_hash 278 | cls.last_call_hash_counter = 0 279 | elif cls.last_call_hash == current_hash: 280 | cls.last_call_hash_counter = cls.last_call_hash_counter + 1 281 | return 282 | else: 283 | if cls.last_call_hash_counter > 0: 284 | logger.debug(f"call repeated {cls.last_call_hash_counter} times") 285 | cls.last_call_hash_counter = 0 286 | cls.last_call_hash = current_hash 287 | else: 288 | cls.last_call_hash = current_hash 289 | logger.debug( 290 | "call({: <28})({: >5}ms) payload:{}".format( 291 | "/".join(url.split("/")[3:]), timediff_ms, payload 292 | ) 293 | ) 294 | 295 | def __getattr__(self, method): 296 | def fn(*args, **kwargs): 297 | r = self.multi([(method, *args)], **kwargs)[0] 298 | if r["error"] is not None: 299 | raise Exception( 300 | f"Request error for method {method}{args}: {r['error']['message']}", 301 | r, 302 | ) 303 | return r["result"] 304 | 305 | return fn 306 | 307 | def __repr__(self) -> str: 308 | return f"" 309 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import code 2 | import json 3 | import logging 4 | import shutil 5 | import signal 6 | import sys 7 | import tempfile 8 | import traceback 9 | from binascii import hexlify 10 | import pytest 11 | 12 | from cryptoadvance.spectrum.util_specter import setup_logging 13 | from cryptoadvance.spectrum.cli import setup_logging 14 | from cryptoadvance.spectrum.config import TestConfig 15 | from cryptoadvance.spectrum.server import create_app, init_app 16 | from embit import script 17 | from embit.bip32 import NETWORKS, HDKey 18 | from embit.bip39 import mnemonic_to_seed 19 | from flask import Flask 20 | from werkzeug.utils import import_string 21 | from werkzeug.utils import ImportStringError 22 | 23 | from fix_infrastructure import MockServer 24 | 25 | logger = logging.getLogger(__name__) 26 | 27 | pytest_plugins = [ 28 | # "fix_infrastructure", 29 | "fix_keys_and_seeds_embit" 30 | ] 31 | 32 | # This is from https://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application 33 | # it enables stopping a hanging test via sending the pytest-process a SIGUSR2 (12) 34 | # kill 12 pid-of-pytest 35 | # In the article they claim to open a debug-console which didn't work for me but at least 36 | # you get a stacktrace in the output. 37 | def debug(sig, frame): 38 | """Interrupt running process, and provide a python prompt for 39 | interactive debugging.""" 40 | d = {"_frame": frame} # Allow access to frame object. 41 | d.update(frame.f_globals) # Unless shadowed by global 42 | d.update(frame.f_locals) 43 | 44 | i = code.InteractiveConsole(d) 45 | message = "Signal received : entering python shell.\nTraceback:\n" 46 | message += "".join(traceback.format_stack(frame)) 47 | i.interact(message) 48 | 49 | 50 | def listen(): 51 | signal.signal(signal.SIGUSR2, debug) # Register handler 52 | 53 | 54 | def pytest_addoption(parser): 55 | """Internally called to add options to pytest 56 | see pytest_generate_tests(metafunc) on how to check that 57 | Also used to register the SIGUSR2 (12) as decribed in conftest.py 58 | """ 59 | parser.addoption("--docker", action="store_true", help="run bitcoind in docker") 60 | parser.addoption( 61 | "--bitcoind-version", 62 | action="store", 63 | default="v0.20.1", 64 | help="Version of bitcoind (something which works with git checkout ...)", 65 | ) 66 | parser.addoption( 67 | "--bitcoind-log-stdout", 68 | action="store", 69 | default=False, 70 | help="Whether bitcoind should log to stdout (default:False)", 71 | ) 72 | parser.addoption( 73 | "--elementsd-version", 74 | action="store", 75 | default="master", 76 | help="Version of elementsd (something which works with git checkout ...)", 77 | ) 78 | parser.addoption( 79 | "--config", 80 | action="store", 81 | default="cryptoadvance.spectrum.config.TestConfig", 82 | help="The config-class to use, usually cryptoadvance.spectrum.config.Testconfig ", 83 | ) 84 | listen() 85 | 86 | 87 | @pytest.fixture 88 | def empty_data_folder(): 89 | # Make sure that this folder never ever gets a reasonable non-testing use-case 90 | with tempfile.TemporaryDirectory(prefix="specter_home_tmp_") as data_folder: 91 | yield data_folder 92 | 93 | 94 | def spectrum_app_with_config(config={}): 95 | """helper-function to create SpectrumFlasks""" 96 | setup_logging(debug=True) 97 | logger.info("Deleting ./data") 98 | shutil.rmtree("./data", ignore_errors=True) 99 | if isinstance(config, dict): 100 | tempClass = type("tempClass", (TestConfig,), {}) 101 | for key, value in config.items(): 102 | setattr(tempClass, key, value) 103 | # service_manager will expect the class to be defined as a direct property of the module: 104 | if hasattr(sys.modules[__name__], "tempClass"): 105 | delattr(sys.modules[__name__], "tempClass") 106 | assert not hasattr(sys.modules[__name__], "tempClass") 107 | setattr(sys.modules[__name__], "tempClass", tempClass) 108 | assert hasattr(sys.modules[__name__], "tempClass") 109 | assert getattr(sys.modules[__name__], "tempClass") == tempClass 110 | config = tempClass 111 | app = create_app(config=config) 112 | try: 113 | shutil.rmtree(app.config["SPECTRUM_DATADIR"], ignore_errors=False) 114 | except FileNotFoundError: 115 | pass 116 | with app.app_context(): 117 | app.config["TESTING"] = True 118 | app.testing = True 119 | init_app(app, standalone=True) 120 | return app 121 | 122 | 123 | @pytest.fixture 124 | def config(request): 125 | # Creates a class out of a fully qualified Class as string 126 | try: 127 | mytype = import_string(request.config.getoption("config")) 128 | except ImportStringError as e: 129 | raise Exception( 130 | """ 131 | Module not found. Try: 132 | --config cryptoadvance.spectrum.config.TestConfig (default) or 133 | --config cryptoadvance.spectrum.config.EmzyElectrumLiteConfig 134 | """ 135 | ) 136 | raise e 137 | return mytype 138 | 139 | 140 | @pytest.fixture 141 | def app() -> Flask: 142 | """the Flask-App, but uninitialized""" 143 | return spectrum_app_with_config(config="cryptoadvance.spectrum.config.TestConfig") 144 | 145 | 146 | @pytest.fixture 147 | def app_offline() -> Flask: 148 | """provoke an offline spectrum by passing a closed port""" 149 | return spectrum_app_with_config( 150 | config={ 151 | "ELECTRUM_PORT": "localhost", 152 | "ELECTRUM_PORT": 30011, 153 | "ELECTRUM_USES_SSL": False, 154 | } 155 | ) 156 | 157 | 158 | @pytest.fixture 159 | def app_nigiri() -> Flask: 160 | """the Flask-App, but uninitialized""" 161 | server = MockServer( 162 | spectrum_app_with_config(config="cryptoadvance.spectrum.config.TestConfig") 163 | ) 164 | server.start() 165 | yield server 166 | server.shutdown_server() 167 | 168 | 169 | @pytest.fixture 170 | def client(app): 171 | """a test_client from an initialized Flask-App""" 172 | return app.test_client() 173 | 174 | 175 | @pytest.fixture 176 | def spectrum_node(): 177 | """A Spectrum node""" 178 | node_dict = { 179 | "python_class": "cryptoadvance.specterext.spectrum.spectrum_node.SpectrumNode", 180 | "name": "Spectrum Node", 181 | "alias": "spectrum_node", 182 | "host": "electrum.emzy.de", 183 | "port": 5002, 184 | "ssl": True, 185 | } 186 | 187 | # Instantiate via PersistentObject: 188 | sn = PersistentObject.from_json(node_dict) 189 | assert type(sn) == SpectrumNode 190 | return sn 191 | -------------------------------------------------------------------------------- /tests/fix_infrastructure.py: -------------------------------------------------------------------------------- 1 | import os 2 | from threading import Thread 3 | from uuid import uuid4 4 | 5 | import pytest 6 | import requests 7 | from flask import Flask, jsonify 8 | 9 | 10 | class MockServer(Thread): 11 | """A Flask-Server which you can spinup in tests. It's running in a thread 12 | 13 | copied from https://gist.github.com/eruvanos/f6f62edb368a20aaa880e12976620db8 14 | """ 15 | 16 | def __init__(self, app, port=8081): 17 | super().__init__() 18 | self.port = port 19 | self.app = app 20 | self.url = "http://localhost:%s" % self.port 21 | 22 | self.app.add_url_rule("/shutdown", view_func=self._shutdown_server) 23 | 24 | def _shutdown_server(self): 25 | from flask import request 26 | 27 | if not "werkzeug.server.shutdown" in request.environ: 28 | raise RuntimeError("Not running the development server") 29 | request.environ["werkzeug.server.shutdown"]() 30 | return "Server shutting down..." 31 | 32 | def shutdown_server(self): 33 | requests.get("http://localhost:%s/shutdown" % self.port) 34 | self.join() 35 | 36 | def add_callback_response(self, url, callback, methods=("GET",)): 37 | callback.__name__ = str( 38 | uuid4() 39 | ) # change name of method to mitigate flask exception 40 | self.app.add_url_rule(url, view_func=callback, methods=methods) 41 | 42 | def add_json_response(self, url, serializable, methods=("GET",)): 43 | def callback(): 44 | return jsonify(serializable) 45 | 46 | self.add_callback_response(url, callback, methods=methods) 47 | 48 | def run(self): 49 | self.app.run(port=self.port) 50 | -------------------------------------------------------------------------------- /tests/fix_keys_and_seeds_embit.py: -------------------------------------------------------------------------------- 1 | from binascii import hexlify 2 | import json 3 | import pytest 4 | 5 | from embit.bip39 import mnemonic_to_seed 6 | from embit.bip32 import HDKey, NETWORKS 7 | from embit import script 8 | 9 | from bdkpython import bdk 10 | import bdkpython as bdk 11 | 12 | mnemonic_ghost_machine = ( 13 | "ghost ghost ghost ghost ghost ghost ghost ghost ghost ghost ghost machine" 14 | ) 15 | 16 | 17 | seed = bdk.Mnemonic.from_string(mnemonic_ghost_machine) 18 | 19 | 20 | @pytest.fixture 21 | def mnemonic_keen_join(): 22 | return 11 * "keen " + "join" 23 | 24 | 25 | @pytest.fixture 26 | def seed_keen_join(mnemonic_keen_join): 27 | seed = bdk.Mnemonic.from_string(mnemonic_ghost_machine) 28 | print(f"Keen Join seed: {hexlify(seed)}") 29 | return mnemonic_to_seed(mnemonic_keen_join) 30 | 31 | 32 | @pytest.fixture 33 | def rootkey_keen_join(seed_keen_join): 34 | rootkey = HDKey.from_seed(seed_keen_join) 35 | print(f"Keen Join rootkey: {rootkey.to_base58()}") 36 | # xprv9s21ZrQH143K3LEXAFcSsTmDGYrgbRs62sNyv1GMwFFwxQDVC3hQZK7LRDUBknzKnN8iT6RxRt9zSibY3qLrnrfTRTw1LtmBSdZJwfLAgK1 37 | print(f"Keen Join rootkey fp: {hexlify(rootkey.my_fingerprint)}") # dcbf0caf 38 | return rootkey 39 | 40 | 41 | @pytest.fixture 42 | def acc0xprv_keen_join(rootkey_keen_join: HDKey): 43 | xprv = rootkey_keen_join.derive("m/84h/1h/0h") 44 | print(f"Keen Join acc0xprv: {xprv.to_base58(version=NETWORKS['test']['xprv'])}") 45 | # tprv8fguxcy3Z9ptPeGvnXzExfb1szXtnXkDDfU8oaYp6ynLPkomKWCJ77SikGXt1Gf4zkJaagBJSQFt8UvY2HviJyAKPej7cWn8oD3bkpV2CVQ 46 | 47 | return xprv 48 | 49 | 50 | @pytest.fixture 51 | def acc0xpub_keen_join(acc0xprv_keen_join: HDKey): 52 | xpub = acc0xprv_keen_join.to_public() 53 | print(f"Keen Join acc0xpub: {xpub.to_base58(version=NETWORKS['test']['xpub'])}") 54 | # tpubDCNx731HhXWZH7JigBeqN5F8T23pwrw7ny4v66b7XFajEF4Xwu1tHc4avQRVofghtzUs5BVNjYwRcqyfiBzftmfRFMKBWVCVdhQTWM1b7wM 55 | return xpub 56 | 57 | 58 | @pytest.fixture 59 | def acc0key0pubkey_keen_join(acc0xpub_keen_join: HDKey): 60 | pubkey = acc0xpub_keen_join.derive("m/0/0") 61 | print(f"Keen Join {pubkey.key}") 62 | print(f"Keen Join hexlify(pubkey.sec()) : {hexlify(pubkey.sec())}") 63 | print(hexlify(pubkey.sec())) 64 | return pubkey 65 | 66 | 67 | @pytest.fixture 68 | def acc0key0addr_keen_join(acc0key0pubkey_keen_join): 69 | sc = script.p2wpkh(acc0key0pubkey_keen_join) 70 | address = sc.address(NETWORKS["test"]) 71 | print(f"Keen Join {address}") # m/84'/1'/0'/0/0 72 | return address 73 | 74 | 75 | @pytest.fixture 76 | def key_keen_join(acc0key0pubkey_keen_join): 77 | sc = script.p2wpkh(acc0key0pubkey_keen_join) 78 | address = sc.address(NETWORKS["test"]) 79 | return address 80 | 81 | 82 | @pytest.fixture 83 | def acc0key_keen_join(acc0xpub_keen_join, rootkey_keen_join: HDKey): 84 | 85 | key: Key = Key( 86 | acc0xpub_keen_join.to_base58( 87 | version=NETWORKS["test"]["xpub"] 88 | ), # original (ToDo: better original) 89 | hexlify(rootkey_keen_join.my_fingerprint).decode("utf-8"), # fingerprint 90 | "m/84h/1h/0h", # derivation 91 | "wpkh", # key_type 92 | "Muuh", # purpose 93 | acc0xpub_keen_join.to_base58(version=NETWORKS["test"]["xpub"]), # xpub 94 | ) 95 | mydict = key.json 96 | print(json.dumps(mydict)) 97 | 98 | return key 99 | 100 | 101 | # hold hold hold hold hold hold hold hold hold hold hold accident 102 | # This is a formal creation of all major bitcoin artifacts from the 103 | # hold accident mnemonic 104 | 105 | 106 | @pytest.fixture 107 | def mnemonic_hold_accident(): 108 | return 11 * "hold " + "accident" 109 | 110 | 111 | @pytest.fixture 112 | def seed_hold_accident(mnemonic_hold_accident): 113 | seed = mnemonic_to_seed(mnemonic_hold_accident) 114 | print(f"Hold Accident seed: {hexlify(seed)}") 115 | return mnemonic_to_seed(mnemonic_hold_accident) 116 | 117 | 118 | @pytest.fixture 119 | def rootkey_hold_accident(seed_hold_accident): 120 | rootkey = HDKey.from_seed(seed_hold_accident) 121 | print(f"Hold Accident rootkey: {rootkey.to_base58()}") 122 | # xprv9s21ZrQH143K45uYUg7zhHku3bik5a2nw8XcanYCUGHn7RE1Bhkr53RWcjAQVFDTmruDceNDAGbc7yYsZCGveKMDrPr18hMsMcvYTGJ4Mae 123 | print(f"Hold Accident rootkey fp: {hexlify(rootkey.my_fingerprint)}") 124 | return rootkey 125 | 126 | 127 | @pytest.fixture 128 | def acc0xprv_hold_accident(rootkey_hold_accident: HDKey): 129 | xprv = rootkey_hold_accident.derive("m/84h/1h/0h") 130 | print(f"Hold Accident acc0xprv: {xprv.to_base58(version=NETWORKS['test']['xprv'])}") 131 | # tprv8g6WHqYgjvGrEU6eEdJxXzNUqN8DvLFb3iv3yUVomNRcNqT5JSKpTVNBzBD3qTDmmhRHPLcjE5fxFcGmU3FqU5u9zHm9W6sGX2isPMZAKq2 132 | 133 | return xprv 134 | 135 | 136 | @pytest.fixture 137 | def acc0xpub_hold_accident(acc0xprv_hold_accident: HDKey): 138 | xpub = acc0xprv_hold_accident.to_public() 139 | print(f"Hold Accident acc0xpub: {xpub.to_base58(version=NETWORKS['test']['xpub'])}") 140 | # vpub5YkPJgRQsev79YZM1NRDKJWDjLFcD2xSFAt6LehC5iiMMqQgMHyCFQzwsu16Rx9rBpXZVXPjWAxybuCpsayaw8qCDZtjwH9vifJ7WiQkHwu 141 | return xpub 142 | 143 | 144 | @pytest.fixture 145 | def acc0key0pubkey_hold_accident(acc0xpub_hold_accident: HDKey): 146 | pubkey = acc0xpub_hold_accident.derive("m/0/0") 147 | print("------------") 148 | print(pubkey.key) 149 | # 03584dc8282f626ce5570633018be0760baae68f1ecd6e801192c466ada55f5f31 150 | print(hexlify(pubkey.sec())) 151 | # b'03584dc8282f626ce5570633018be0760baae68f1ecd6e801192c466ada55f5f31' 152 | return pubkey 153 | 154 | 155 | @pytest.fixture 156 | def acc0key0addr_hold_accident(acc0key0pubkey_hold_accident): 157 | sc = script.p2wpkh(acc0key0pubkey_hold_accident) 158 | address = sc.address(NETWORKS["test"]) 159 | print(address) # m/84'/1'/0'/0/0 160 | # tb1qnwc84tkupy5v0tzgt27zkd3uxex3nmyr6vfhdd 161 | return address 162 | 163 | 164 | @pytest.fixture 165 | def key_hold_accident(acc0key0pubkey_hold_accident): 166 | sc = script.p2wpkh(acc0key0pubkey_hold_accident) 167 | address = sc.address(NETWORKS["test"]) 168 | print(address) # m/84'/1'/0'/0/0 169 | # tb1qnwc84tkupy5v0tzgt27zkd3uxex3nmyr6vfhdd 170 | return address 171 | -------------------------------------------------------------------------------- /tests/install_noded.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # fail early 3 | set -o pipefail 4 | 5 | # change to the directory the script is located in 6 | cd "$( dirname "${BASH_SOURCE[0]}" )/." 7 | 8 | function checkout { 9 | node_impl=$1 # either bitcoin or elements 10 | 11 | # Clone bitcoind if it doesn't exist, or update it if it does 12 | # (copied from HWI) 13 | node_setup_needed=false 14 | if [ ! -d "./${node_impl}/.git" ]; then 15 | echo " --> cloning $node_impl" 16 | if [ "$node_impl" = "elements" ]; then 17 | clone_url=https://github.com/ElementsProject/elements.git 18 | elif [ "$node_impl" = "bitcoin" ]; then 19 | clone_url=https://github.com/bitcoin/bitcoin.git 20 | else 21 | echo "unknown node_impl $node_impl" 22 | exit 1 23 | fi 24 | git clone $clone_url 25 | return 1 26 | fi 27 | return 0 28 | } 29 | 30 | function maybe_update { 31 | node_impl=$1 # either bitcoin or elements 32 | # Determine if we need to pull. From https://stackoverflow.com/a/3278427 33 | UPSTREAM=origin/master 34 | LOCAL=$(git describe --all | sed 's/heads\///' | sed 's/tags\///') # gives either a tag or "master" 35 | if cat ../../pytest.ini | grep "addopts = --${node_impl}d-version" ; then 36 | # in this case, we use the expected version from the test also as the tag to be checked out 37 | # i admit that this is REALLY ugly. Happy for any recommendations to do that more easy 38 | PINNED=$(cat ../../pytest.ini | grep "addopts = " | cut -d'=' -f2 | sed 's/--/+/g' | tr '+' '\n' | grep ${node_impl} | cut -d' ' -f2) 39 | if [ "$node_impl" = "elements" ]; then 40 | # in the case of elements, the tags have a "elements-" prefix 41 | PINNED=$(echo "$PINNED" | sed 's/v//' | sed 's/^/elements-/') 42 | fi 43 | fi 44 | 45 | # the version in pytest.ini is (also) used to check the version via getnetworkinfo()["subversion"] 46 | # However, this might not be a valid git rev. So we need another way to specify the git-rev used 47 | # as we want to be able to test against specific commits 48 | if [ -f ../${node_impl}_gitrev_pinned ]; then 49 | PINNED=$(cat ../${node_impl}_gitrev_pinned) 50 | fi 51 | 52 | if [ -z $PINNED ]; then 53 | REMOTE=$(git rev-parse "$UPSTREAM") 54 | BASE=$(git merge-base @ "$UPSTREAM") 55 | if [ "$LOCAL" = "$REMOTE" ]; then 56 | echo "Up-to-date" 57 | elif [ "$LOCAL" = "$BASE" ]; then 58 | git pull 59 | git reset --hard origin/master 60 | return 1 61 | fi 62 | else 63 | if [ "$LOCAL" = "$PINNED" ]; then 64 | echo " --> Pinned: $PINNED! Checkout not needed!" 65 | else 66 | echo " --> Pinned: $PINNED! Checkout needed!" 67 | git fetch 68 | git checkout $PINNED 69 | return 1 70 | fi 71 | fi 72 | if [ -f ./tests/${node_impl}/src/${node_impl}d ]; then 73 | return 0 74 | else 75 | return 1 76 | fi 77 | } 78 | 79 | function calc_pytestinit_nodeimpl_version { 80 | 81 | # returns the version of $node_impl from pytest.ini from a line which looks like: 82 | # addopts = --bitcoind-version v22.0 --elementsd-version v0.20.99 83 | # special treatments for bitcoin and elements necessary, see below 84 | local node_impl=$1 85 | if cat ../pytest.ini | grep -q "${node_impl}d-version" ; then 86 | # in this case, we use the expected version from the test also as the tag to be checked out 87 | # i admit that this is REALLY ugly. Happy for any recommendations to do that more easy 88 | PINNED=$(cat ../pytest.ini | grep "addopts = " | cut -d'=' -f2 | sed 's/--/+/g' | tr '+' '\n' | grep ${node_impl} | cut -d' ' -f2) 89 | 90 | if [ "$node_impl" = "elements" ]; then 91 | # in the case of elements, the tags have a "elements-" prefix 92 | PINNED=$(echo "$PINNED" | sed 's/v//' | sed 's/^/elements-/') 93 | fi 94 | if [ "$node_impl" = "bitcoin" ]; then 95 | # in the case of bitcoin, the binary-version-artifacts are missing a ".0" at the end which we remove here 96 | PINNED=$(echo "$PINNED" | sed 's/..$//') 97 | fi 98 | fi 99 | echo $PINNED 100 | } 101 | 102 | function build_node_impl { 103 | node_impl=$1 # either bitcoin or elements 104 | nodeimpl_setup_needed=$2 105 | 106 | if [ "$nodeimpl_setup_needed" = 1 ] ; then 107 | echo " --> Autogen & Configure necessary" 108 | # Build dependencies. This is super slow, but it is cached so it runs fairly quickly. 109 | cd contrib 110 | # This is hopefully fullfilles (via .travis.yml most relevantly) 111 | # sudo apt install make automake cmake curl g++-multilib libtool binutils-gold bsdmainutils pkg-config python3 patch 112 | 113 | if [ $(uname) = "Darwin" ]; then 114 | brew install berkeley-db@4 115 | brew link berkeley-db4 --force || : 116 | fi 117 | echo " --> Building db4" 118 | ./install_db4.sh $(pwd) 119 | echo " --> Finishing db4" 120 | ls -l 121 | cd .. 122 | echo " --> Setup needed. Starting autogen" 123 | ./autogen.sh 124 | echo " --> Starting configure" 125 | export BDB_PREFIX="$(pwd)/contrib/db4" 126 | echo " BDB_PREFIX=$BDB_PREFIX" 127 | # This is for reducing mem-footprint as for some reason cirrus fails even though it has 4GB Mem 128 | # CXXFLAGS="--param ggc-min-expand=1 --param ggc-min-heapsize=32768 -O2" 129 | 130 | if [ "$node_impl" = "elements" ]; then 131 | ./configure BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" BDB_CFLAGS="-I${BDB_PREFIX}/include" 132 | elif [ "$node_impl" = "bitcoin" ]; then 133 | ./configure BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" BDB_CFLAGS="-I${BDB_PREFIX}/include" CXXFLAGS="--param ggc-min-expand=1 --param ggc-min-heapsize=32768 -O2" --with-miniupnpc=no --without-gui --disable-zmq --disable-tests --disable-bench --with-libs=no --with-utils=no 134 | else 135 | echo "unknown node_impl $node_impl" 136 | exit 1 137 | fi 138 | else 139 | echo " --> Skipping Autogen & Configure" 140 | fi 141 | export BDB_PREFIX="$(pwd)/contrib/db4" 142 | BDB_CFLAGS="-I${BDB_PREFIX}/include" 143 | # optimizing for speed would use the maximum threads available: 144 | #make -j$(nproc) 145 | # but we're optimizing for mem-allocation. 1 thread is quite slow, let's try 4 (we have 4GB and need to find the sweet-spot) 146 | make -j2 147 | cd ../.. #travis is sourcing this script 148 | echo " --> Finished build $node_impl" 149 | 150 | 151 | } 152 | 153 | function sub_help { 154 | echo "This script will result in having bitcoind or elementsd binaries, either by binary download or via compilation" 155 | echo "Do one of these:" 156 | echo "$ ./install_node.sh --bitcoin binary" 157 | echo "$ ./install_node.sh --bitcoin compile" 158 | echo "$ ./install_node.sh --elements binary" 159 | echo "$ ./install_node.sh --elements compile" 160 | echo "For more context, see https://github.com/cryptoadvance/specter-desktop/blob/master/docs/development.md#how-to-run-the-tests" 161 | } 162 | 163 | function check_compile_prerequisites { 164 | if [ $(uname) = "Darwin" ]; then 165 | echo " --> No binary prerequisites checking for MacOS, GOOD LUCK!" 166 | #brew install automake berkeley-db4 libtool boost miniupnpc pkg-config python qt libevent qrencode sqlite 167 | else 168 | REQUIRED_PKGS="build-essential libtool autotools-dev automake pkg-config bsdmainutils python3 autoconf" 169 | REQUIRED_PKGS="$REQUIRED_PKGS libevent-dev libevent-dev libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev bc nodejs npm libgtk2.0-0 libgtk-3-0 libgbm-dev libnotify-dev libgconf-2-4 libnss3 libxss1 libasound2 libxtst6 xauth xvfb" 170 | REQUIRED_PKGS="$REQUIRED_PKGS wget" 171 | for REQUIRED_PKG in $REQUIRED_PKGS; do 172 | PKG_OK=$(dpkg-query -W --showformat='${Status}\n' $REQUIRED_PKG|grep "install ok installed") 173 | echo Checking for $REQUIRED_PKG: $PKG_OK 174 | if [ "" = "$PKG_OK" ]; then 175 | echo "No $REQUIRED_PKG. Setting up $REQUIRED_PKG." 176 | echo "WARNING: THIS SHOULD NOT BE NECESSARY, PLEASE FIX!" 177 | apt-get --yes install $REQUIRED_PKG 178 | fi 179 | done 180 | fi 181 | } 182 | 183 | function check_binary_prerequisites { 184 | if [ $(uname) = "Darwin" ]; then 185 | echo " --> No binary prerequisites checking for MacOS, GOOD LUCK!" 186 | else 187 | REQUIRED_PKGS="wget" 188 | for REQUIRED_PKG in $REQUIRED_PKGS; do 189 | PKG_OK=$(dpkg-query -W --showformat='${Status}\n' $REQUIRED_PKG|grep "install ok installed") 190 | echo Checking for $REQUIRED_PKG: $PKG_OK 191 | if [ "" = "$PKG_OK" ]; then 192 | echo "No $REQUIRED_PKG. Setting up $REQUIRED_PKG." 193 | echo "WARNING: THIS SHOULD NOT BE NECESSARY, PLEASE FIX!" 194 | apt-get --yes install $REQUIRED_PKG 195 | fi 196 | done 197 | fi 198 | } 199 | 200 | function sub_compile { 201 | START=$(date +%s.%N) 202 | check_compile_prerequisites 203 | node_impl=$1 204 | echo " --> install_node.sh Start $(date) (compiling for $node_impl)" 205 | echo " checkout ..." 206 | checkout $node_impl 207 | cd $node_impl 208 | maybe_update $node_impl 209 | update=$? 210 | build_node_impl $node_impl $update 211 | echo " --> Listing binaries" 212 | if [ $(uname) = "Darwin" ]; then 213 | find tests/${node_impl}/src -maxdepth 1 -type f -perm +111 -exec ls -ld {} \; 214 | else 215 | find tests/${node_impl}/src -maxdepth 1 -type f -executable -exec ls -ld {} \; 216 | fi 217 | END=$(date +%s.%N) 218 | DIFF=$(echo "$END - $START" | bc) 219 | echo " --> install_node.sh End $(date) took $DIFF" 220 | } 221 | 222 | function sub_binary { 223 | node_impl=$1 224 | echo " --> install_noded.sh Start $(date) (binary) for node_impl $node_impl" 225 | START=$(date +%s) 226 | check_binary_prerequisites 227 | # todo: Parametrize this 228 | version=$(calc_pytestinit_nodeimpl_version $node_impl) 229 | echo " --> install version $version" 230 | # remove the v-prefix 231 | version=$(echo $version | sed -e 's/v//') 232 | if [ $(uname) = "Darwin" ]; then 233 | binary_file=${node_impl}-${version}-osx64.tar.gz 234 | else 235 | binary_file=${node_impl}-${version}-x86_64-linux-gnu.tar.gz 236 | fi 237 | if [[ ! -f $binary_file ]]; then 238 | if [ "$node_impl" = "elements" ]; then 239 | wget https://github.com/ElementsProject/elements/releases/download/${version}/${binary_file} 240 | fi 241 | if [ "$node_impl" = "bitcoin" ]; then 242 | wget https://bitcoincore.org/bin/bitcoin-core-${version}/${binary_file} 243 | fi 244 | fi 245 | 246 | tar -xzf ${binary_file} 247 | if [[ -d ./"$node_impl" ]]; then 248 | if [[ -d ./"$node_impl"/src ]]; then 249 | mv ./"$node_impl" ./"$node_impl"-src 250 | else 251 | rm -rf ./"$node_impl" 252 | fi 253 | fi 254 | ln -s ./"$node_impl"-${version} "$node_impl" 255 | echo " --> Listing binaries" 256 | if [ $(uname) = "Darwin" ]; then 257 | find ./"$node_impl"/bin -maxdepth 1 -type f -perm +111 -exec ls -ld {} \; 258 | else 259 | find ./"$node_impl"/bin -maxdepth 1 -type f -executable -exec ls -ld {} \; 260 | fi 261 | echo " --> checking for ${node_impl}d" 262 | test -x ./bitcoin/bin/${node_impl}d || exit 2 263 | echo " --> Finished installing ${node_impl}d binary" 264 | END=$(date +%s) 265 | DIFF=$(echo "$END - $START" | bc) 266 | echo " --> install_noded.sh End $(date) took $DIFF seconds" 267 | } 268 | 269 | 270 | function parse_and_execute() { 271 | if [[ $# = 0 ]]; then 272 | sub_help 273 | exit 0 274 | fi 275 | 276 | while [[ $# -gt 0 ]] 277 | do 278 | arg="$1" 279 | case $arg in 280 | "" | "-h" | "--help") 281 | sub_help 282 | shift 283 | ;; 284 | --debug) 285 | set -x 286 | DEBUG=true 287 | shift 288 | ;; 289 | --bitcoin) 290 | node_impl=bitcoin 291 | shift 292 | ;; 293 | --elements) 294 | node_impl=elements 295 | shift 296 | ;; 297 | help) 298 | sub_help 299 | shift 300 | ;; 301 | compile) 302 | sub_compile $node_impl || exit 2 303 | shift 304 | ;; 305 | binary) 306 | sub_binary $node_impl || exit 2 307 | shift 308 | ;; 309 | *) 310 | shift 311 | sub_${arg} $@ && ret=0 || ret=$? 312 | if [ "$ret" = 127 ]; then 313 | echo "Error: '$arg' is not a known subcommand." >&2 314 | echo " Run '$progname --help' for a list of known subcommands." >&2 315 | exit 1 316 | else 317 | exit $ret_value 318 | fi 319 | ;; 320 | esac 321 | done 322 | } 323 | 324 | parse_and_execute $@ 325 | -------------------------------------------------------------------------------- /tests/integration/basics.py: -------------------------------------------------------------------------------- 1 | from distutils import core 2 | import logging 3 | import shutil 4 | import sys 5 | import pytest 6 | from decimal import Decimal, getcontext 7 | from random import random 8 | import time 9 | from unittest.mock import MagicMock 10 | from embit.bip32 import NETWORKS, HDKey 11 | from mock import patch 12 | from embit.descriptor.checksum import add_checksum 13 | from cryptoadvance.spectrum.util_specter import BitcoinRPC 14 | 15 | 16 | logger = logging.getLogger("cryptoadvance") 17 | 18 | number_of_txs = 10 19 | keypoolrefill = number_of_txs 20 | 21 | 22 | def test_getblockchaininfo(caplog): 23 | """Test is using a rpc connecting to nigiri's core""" 24 | caplog.set_level(logging.INFO) 25 | rpc: BitcoinRPC = BitcoinRPC( 26 | user="admin1", password="123", host="localhost", port="18443" 27 | ) 28 | result = rpc.getblockchaininfo() 29 | assert result["blocks"] == 101 30 | assert result["chain"] == "regtest" 31 | assert result["chainwork"].startswith("000000000") 32 | -------------------------------------------------------------------------------- /tests/integration/elsock_test.py: -------------------------------------------------------------------------------- 1 | from distutils import core 2 | import logging 3 | import shutil 4 | import sys 5 | import pytest 6 | from decimal import Decimal, getcontext 7 | from random import random 8 | import time 9 | from unittest.mock import MagicMock 10 | from embit.bip32 import NETWORKS, HDKey 11 | from mock import patch 12 | from embit.descriptor.checksum import add_checksum 13 | from cryptoadvance.spectrum.elsock import ElectrumSocket 14 | from datetime import datetime 15 | 16 | logger = logging.getLogger("cryptoadvance") 17 | 18 | number_of_txs = 10 19 | keypoolrefill = number_of_txs 20 | 21 | 22 | def test_elsock(caplog): 23 | 24 | caplog.set_level(logging.DEBUG) 25 | 26 | def callback(something): 27 | print(something) 28 | 29 | # Speed up the test ... 30 | ElectrumSocket.tries_threshold = 1 31 | ElectrumSocket.sleep_ping_loop = 1 32 | logger.info(f"{datetime.now()} Testing ElectrumSocket") 33 | elsock = ElectrumSocket( 34 | host="electrum.emzy.de", 35 | port=50002, 36 | callback=callback, 37 | use_ssl=True, 38 | call_timeout=1, 39 | ) 40 | ts = elsock.ping() 41 | logger.info(f"First working ping in {ts} ms") 42 | logger.info(elsock._socket) 43 | assert ( 44 | caplog.text.count( 45 | "ElectrumSocket Status changed from unknown to creating_socket" 46 | ) 47 | == 1 48 | ) 49 | assert ( 50 | caplog.text.count( 51 | "ElectrumSocket Status changed from creating_socket to creating_threads" 52 | ) 53 | == 1 54 | ) 55 | assert ( 56 | caplog.text.count( 57 | "ElectrumSocket Status changed from creating_threads to execute_recreation_callback" 58 | ) 59 | == 1 60 | ) 61 | assert ( 62 | caplog.text.count( 63 | "ElectrumSocket Status changed from execute_recreation_callback to ok" 64 | ) 65 | == 1 66 | ) 67 | elsock._socket.close() 68 | logger.info( 69 | f"{datetime.now()} =======================NOW the socket was intentionally closed===============================================" 70 | ) 71 | # Should recover within 8 seconds ( 2 seconds buffer) 72 | for i in range(0, 10): 73 | logger.info( 74 | f"...................................... timer: {i} seconds passed (elsock.is_socket_closed() returns {elsock.is_socket_closed()})" 75 | ) 76 | time.sleep(1) 77 | logger.info( 78 | f"{datetime.now()}========================The socket connection should now work properly again================================" 79 | ) 80 | logger.info(elsock._socket) 81 | ts = elsock.ping() 82 | logger.info(f"second working ping in {ts} ms") 83 | assert ts < 1 84 | assert caplog.text.count("ElectrumSocket Status changed") == 9 85 | 86 | assert ( 87 | caplog.text.count( 88 | "ElectrumSocket Status changed from ok to broken_killing_threads" 89 | ) 90 | == 1 91 | ) 92 | assert ( 93 | caplog.text.count( 94 | "ElectrumSocket Status changed from broken_killing_threads to creating_socket" 95 | ) 96 | == 1 97 | ) 98 | assert ( 99 | caplog.text.count( 100 | "ElectrumSocket Status changed from creating_socket to creating_threads" 101 | ) 102 | == 2 103 | ) 104 | assert ( 105 | caplog.text.count( 106 | "ElectrumSocket Status changed from creating_threads to execute_recreation_callback" 107 | ) 108 | == 2 109 | ) 110 | assert ( 111 | caplog.text.count( 112 | "ElectrumSocket Status changed from execute_recreation_callback to ok" 113 | ) 114 | == 2 115 | ) 116 | -------------------------------------------------------------------------------- /tests/integration/spectrum_test.py: -------------------------------------------------------------------------------- 1 | from distutils import core 2 | import logging 3 | import shutil 4 | import sys 5 | import pytest 6 | from decimal import Decimal, getcontext 7 | from random import random 8 | import time 9 | from unittest.mock import MagicMock 10 | from embit.bip32 import NETWORKS, HDKey 11 | from mock import patch 12 | from embit.descriptor.checksum import add_checksum 13 | from cryptoadvance.spectrum.spectrum import Spectrum 14 | from cryptoadvance.spectrum.elsock import ElectrumSocket 15 | from datetime import datetime 16 | from cryptoadvance.spectrum.db import Descriptor, Script, Wallet 17 | from conftest import spectrum_app_with_config 18 | 19 | logger = logging.getLogger("cryptoadvance") 20 | 21 | number_of_txs = 10 22 | keypoolrefill = number_of_txs 23 | 24 | 25 | def fill_spectrum( 26 | spectrum, 27 | rootkey_hold_accident, 28 | ): 29 | spectrum = spectrum.spectrum 30 | # calculate the descriptor 31 | tpriv = rootkey_hold_accident.to_base58(version=NETWORKS["regtest"]["xprv"]) 32 | desc = add_checksum("wpkh(" + tpriv + "/84'/1'/0'/0/*)") 33 | desc = desc.replace("'", "h") 34 | spectrum.createwallet( 35 | "bob_the_wallet", disable_private_keys=True 36 | ) # not a hotwallet! 37 | wallet: Wallet = Wallet.query.filter_by(name="bob_the_wallet").first() 38 | logger.info("TEST: Import descriptor") 39 | spectrum.importdescriptor(wallet, desc) 40 | descriptor: Descriptor = Descriptor.query.filter_by( 41 | wallet=wallet 42 | ).all() # could use first() but let's assert! 43 | assert len(descriptor) == 1 44 | descriptor = descriptor[0] 45 | 46 | 47 | def test_spectrum_resilience(caplog, empty_data_folder, rootkey_hold_accident): 48 | """ 49 | Should test the behaviour of the system if the socket gets broken. 50 | We're doing it intentionally here by spectrum_app.spectrum.sock._socket.close() 51 | 52 | In that case the monitor-thread of ElectrumSocket should detect that, recreate 53 | the socket and the threads and call the callback which will cause a new sync on the spectrum side 54 | 55 | Here is how the successfull Logging of that would look like: 56 | 57 | [ INFO] in conftest: Deleting ./data 58 | [ INFO] in server: config: 59 | [ INFO] in server: -------------------------CONFIGURATION-OVERVIEW------------ 60 | [ INFO] in server: Config from empty 61 | [ INFO] in server: APPLICATION_ROOT = / 62 | [ INFO] in server: DATABASE = /home/kim/src/spectrum/data/wallets.sqlite 63 | [ INFO] in server: DEBUG = False 64 | [ INFO] in server: ELECTRUM_HOST = electrum.emzy.de 65 | [ INFO] in server: ELECTRUM_PORT = 50002 66 | [ INFO] in server: ELECTRUM_USES_SSL = True 67 | [...] 68 | [ INFO] in server: USERNAME = admin 69 | [ INFO] in server: USE_X_SENDFILE = False 70 | [ INFO] in server: ----------------------------------------------------------- 71 | [ INFO] in server: Creating Spectrum Object ... 72 | [ INFO] in spectrum: Creating txdir data/txs 73 | [ INFO] in spectrum: Creating ElectrumSocket electrum.emzy.de:50002 (ssl=True) 74 | [ INFO] in elsock: Initializing ElectrumSocket with electrum.emzy.de:50002 (ssl: ) 75 | [ DEBUG] in elsock: socket created : 76 | [ DEBUG] in elsock: socket wrapped : 77 | [ DEBUG] in elsock: socket connected: 78 | [ DEBUG] in util: starting new FlaskThread: recv_loop 79 | [ INFO] in elsock: Started bg thread for recv_loop 80 | [ DEBUG] in util: starting new FlaskThread: _write_loop 81 | [ INFO] in elsock: Started bg thread for _write_loop 82 | [ DEBUG] in util: starting new FlaskThread: _ping_loop 83 | [ INFO] in elsock: Started bg thread for _ping_loop 84 | [ DEBUG] in util: starting new FlaskThread: _notify_loop 85 | [ INFO] in elsock: Started bg thread for _notify_loop 86 | [ DEBUG] in util: starting new FlaskThread: _monitor_loop 87 | [ INFO] in elsock: Started bg thread for _monitor_loop 88 | [ INFO] in spectrum: Pinged electrum in 0.050364017486572266 89 | [ INFO] in spectrum: subscribe to block headers 90 | [ INFO] in spectrum: detect chain from header 91 | [ INFO] in spectrum: Set roothash 92 | [ DEBUG] in util: starting new FlaskThread: _sync 93 | [ INFO] in spectrum: Syncing ... 94 | [ INFO] in spectrum_test: TEST: Import descriptor 95 | [ INFO] in spectrum: Importing descriptor wpkh(tprv8ZgxMBicQKsPeu959EyVrwNtMj8xK64oGgSjTCxexEnFu1y6B56bannxXuL4Vcbn9JRzcjyyKdBQaq6cgQcsTNcpP34Jo45vGifxtuf9VGZ/84h/1h/0h/0/*)#d9a3mju9 96 | [ INFO] in spectrum: Creating 300 scriptpubkeys for wallet 97 | [ DEBUG] in util: starting new FlaskThread: _subcribe_scripts 98 | [ INFO] in spectrum_test: 2023-02-22 15:52:23.974127 Let's sleep for 20 seconds (that's what it takes to completely sync) 99 | [ INFO] in spectrum: subscribed to 300 scripts for descriptor wpkh(tprv8ZgxMBicQKsPeu959EyVr... where 0 got synced 100 | [ INFO] in spectrum_test: 2023-02-22 15:52:43.993239 --------------------------closed----------------------------------------------------------- 101 | [ INFO] in spectrum_test: 2023-02-22 15:52:43.993468 Let's sleep for 5 seconds 102 | [ ERROR] in elsock: Error in write: 103 | [ ERROR] in elsock: Error in ping-loop (1th time, next try in 1 seconds if threshold not met 104 | [ ERROR] in elsock: Error in ping-loop (2th time, next try in 1 seconds if threshold not met 105 | [ ERROR] in elsock: More than {self.tries_threshold} Ping failures for 60 seconds, Giving up! 106 | [ ERROR] in elsock: Error in write: 107 | [ INFO] in elsock: recreating socket and threads 108 | [ DEBUG] in elsock: socket created : 109 | [ DEBUG] in elsock: socket wrapped : 110 | [ DEBUG] in elsock: socket connected: 111 | [ DEBUG] in util: starting new FlaskThread: recv_loop 112 | [ INFO] in elsock: Started bg thread for recv_loop 113 | [ DEBUG] in util: starting new FlaskThread: _write_loop 114 | [ INFO] in elsock: Started bg thread for _write_loop 115 | [ DEBUG] in util: starting new FlaskThread: _ping_loop 116 | [ INFO] in elsock: Started bg thread for _ping_loop 117 | [ DEBUG] in util: starting new FlaskThread: _notify_loop 118 | [ INFO] in elsock: Started bg thread for _notify_loop 119 | [ DEBUG] in elsock: calling self._on_recreation_callback _sync 120 | [ INFO] in spectrum: Syncing ... 121 | [ INFO] in spectrum_test: 2023-02-22 15:52:48.997232 Let's sleep for 5 seconds 122 | [ INFO] in spectrum: Now subscribed to 100 scripthashes (33%) 123 | 124 | 125 | """ 126 | caplog.set_level(logging.DEBUG) 127 | 128 | # Speed up the test ... 129 | ElectrumSocket.tries_threshold = 1 130 | ElectrumSocket.sleep_ping_loop = 1 131 | ElectrumSocket.timeout = 1 132 | spectrum_app = spectrum_app_with_config( 133 | config={ 134 | "ELECTRUM_HOST": "electrum.emzy.de", 135 | "ELECTRUM_PORT": 50002, 136 | "ELECTRUM_USES_SSL": True, 137 | } 138 | ) 139 | with spectrum_app.app_context(): 140 | 141 | fill_spectrum(spectrum_app, rootkey_hold_accident) 142 | 143 | logger.info( 144 | f"{datetime.now()} Let's sleep for 20 seconds (that's what it takes to completely sync)" 145 | ) 146 | time.sleep(20) 147 | # How mean we are ... 148 | spectrum_app.spectrum.sock._socket.close() 149 | logger.info( 150 | f"{datetime.now()} --------------------------closed-----------------------------------------------------------" 151 | ) 152 | 153 | logger.info(f"{datetime.now()} Let's sleep for 5 seconds") 154 | time.sleep(5) 155 | logger.info(f"{datetime.now()} Let's sleep for 5 seconds") 156 | time.sleep(5) 157 | assert not spectrum_app.spectrum.sock.is_socket_closed() 158 | -------------------------------------------------------------------------------- /tests/integration/wallet_import_rescan.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ Tests an import of a wallet with lots of TXs 3 | 4 | This sets up a node and funds a default-wallet w0 5 | This wallet will then create lots of TXs to a wallet w1 which got created 6 | and imported a descriptor. 7 | 8 | After that, a SpecterWallet with the same descriptor get created + rescan. 9 | At the end the txlist should be very similiar with the TXs of w1. 10 | 11 | 12 | """ 13 | 14 | from distutils import core 15 | import logging 16 | import shutil 17 | import sys 18 | import pytest 19 | from decimal import Decimal, getcontext 20 | from random import random 21 | import time 22 | from unittest.mock import MagicMock 23 | from embit.bip32 import NETWORKS, HDKey 24 | from mock import patch 25 | from embit.descriptor.checksum import add_checksum 26 | from cryptoadvance.spectrum.util_specter import BitcoinRPC 27 | 28 | 29 | logger = logging.getLogger("cryptoadvance") 30 | 31 | number_of_txs = 10 32 | keypoolrefill = number_of_txs 33 | 34 | # Test is green in local dev 35 | def test_import_nigiri_core( 36 | caplog, 37 | empty_data_folder, 38 | # acc0xprv_hold_accident, 39 | # acc0key_hold_accident, 40 | rootkey_hold_accident, 41 | ): 42 | """Test is using a rpc connecting to nigiri's core""" 43 | caplog.set_level(logging.INFO) 44 | rpc: BitcoinRPC = BitcoinRPC( 45 | user="admin1", password="123", host="localhost", port="18443" 46 | ) 47 | # runtest_import_via( 48 | # rpc, 49 | # rpc, 50 | # number_of_txs, 51 | # keypoolrefill, 52 | # caplog, 53 | # empty_data_folder, 54 | # #acc0key_hold_accident, 55 | # rootkey_hold_accident, 56 | # ) 57 | 58 | 59 | # Skipping for now 60 | # It would make more sense to setup a Spectrum and a Spectrum node here and check whether an import of wallet "w1" results in the same balace 61 | # Definitely makes no sense to do to the sending from w0 to w1 twice. 62 | @pytest.mark.skip 63 | def test_import_spectrum_nigiri_electrs_core( 64 | caplog, 65 | app_nigiri, 66 | empty_data_folder, 67 | acc0xprv_keen_join, 68 | # acc0key_keen_join, 69 | # rootkey_keen_join, 70 | ): 71 | """Test is using a rpc connecting to spectrum which is connected via nigiri's electrs to nigiri's core""" 72 | caplog.set_level(logging.INFO) 73 | # Can't be right here! 74 | spectrum_rpc: BitcoinRPC = BitcoinRPC( 75 | user="", password="", host="localhost", port="8081" 76 | ) 77 | 78 | btc_rpc: BitcoinRPC = BitcoinRPC( 79 | user="admin1", password="123", host="localhost", port="18443" 80 | ) 81 | runtest_import_via( 82 | spectrum_rpc, 83 | btc_rpc, 84 | number_of_txs, 85 | keypoolrefill, 86 | caplog, 87 | empty_data_folder, 88 | acc0key_keen_join, 89 | rootkey_keen_join, 90 | ) 91 | 92 | 93 | def runtest_import_via( 94 | spectrum_rpc, 95 | btc_rpc, 96 | number_of_txs, 97 | keypoolrefill, 98 | caplog, 99 | empty_data_folder, 100 | acc0key, 101 | rootkey, 102 | ): 103 | 104 | # caplog.set_level(logging.DEBUG) 105 | # durations = {} 106 | # for i in range(1,2,1): 107 | # for i in range(7000, 8000, 1000): 108 | # shutil.rmtree(empty_data_folder) 109 | # tg = TrafficGen() 110 | # tg.number_of_txs = i 111 | # tg.keypoolrefill = i 112 | # tg.rootkey = rootkey 113 | # tg.acc0key = acc0key 114 | 115 | # tg.empty_data_folder = empty_data_folder 116 | # durations[i] = tg.main() 117 | 118 | logger.info(f"Setup wallets, planning for {number_of_txs}") 119 | logger.info(f"btc_rpc = {btc_rpc}") 120 | logger.info(f"spectrum_rpc = {spectrum_rpc}") 121 | # w0 is a wallet with coinbase rewards 122 | 123 | w0_walletname = "w0" + str(int(time.time())) 124 | w1_walletname = "w1" + str(int(time.time())) 125 | if w0_walletname not in btc_rpc.listwallets(): 126 | btc_rpc.createwallet(w0_walletname) 127 | w0 = btc_rpc.wallet(w0_walletname) 128 | logger.info( 129 | f"result of getbalances (w0 / mine / trusted ): {w0.getbalances()['mine']['trusted']}" 130 | ) 131 | btc_rpc.generatetoaddress(110, w0.getnewaddress()) 132 | logger.info( 133 | f"result of getbalances (w0 / mine / trusted ): {w0.getbalances()['mine']['trusted']}" 134 | ) 135 | 136 | # w1 contains the private keys acc0xprv 137 | if w1_walletname not in btc_rpc.listwallets(): 138 | btc_rpc.createwallet(w1_walletname, blank=True, descriptors=True) 139 | w1 = btc_rpc.wallet(w1_walletname) 140 | tpriv = rootkey.to_base58(version=NETWORKS["regtest"]["xprv"]) 141 | 142 | result = w1.importdescriptors( 143 | [ 144 | { 145 | "desc": add_checksum("wpkh(" + tpriv + "/84'/1'/0'/0/*)"), 146 | "timestamp": "now", 147 | "range": [0, 100], 148 | "active": True, 149 | }, 150 | { 151 | "desc": add_checksum("wpkh(" + tpriv + "/84'/1'/1'/1/*)"), 152 | "timestamp": "now", 153 | "range": [0, 100], 154 | "active": True, 155 | "internal": True, 156 | }, 157 | ] 158 | ) 159 | 160 | logger.info(f"result of importdescriptors: {result}") 161 | zero_address = btc_rpc.deriveaddresses( 162 | add_checksum("wpkh(" + tpriv + "/84'/1'/0'/0/*)"), [0, 0] 163 | )[0] 164 | # zero_address = w1.getnewaddress() 165 | print(f"muh: {zero_address}") 166 | logger.info(f"result of addressinfo(w1)) {w1.getaddressinfo(zero_address)}") 167 | w1.keypoolrefill(199) 168 | 169 | # Create some TXs towards w1 170 | logger.info(f"blockheight: {btc_rpc.getblockchaininfo()['blocks']} ") 171 | logger.info(f"result of getbalances (before): {w1.getbalances()}") 172 | for i in range(0, number_of_txs): 173 | w0.sendtoaddress(w1.getnewaddress(), round(0.001 + random() / 100, 8)) 174 | if i % 10 and random() > 0.8: 175 | btc_rpc.generatetoaddress(1, w0.getnewaddress()) 176 | 177 | # be sure that all the TXs are in the chain 178 | btc_rpc.generatetoaddress(1, w0.getnewaddress()) 179 | logger.info(f"blockheight: {btc_rpc.getblockchaininfo()['blocks']} ") 180 | logger.info(f"result of getbalances (after): {w1.getbalances()}") 181 | 182 | # Create the specter-wallet 183 | wm = WalletManager( 184 | empty_data_folder, 185 | spectrum_rpc, 186 | "regtest", 187 | None, 188 | allow_threading_for_testing=False, 189 | ) 190 | wallet: Wallet = wm.create_wallet( 191 | "hold_accident", 1, "wpkh", [acc0key], MagicMock() 192 | ) 193 | hold_accident = spectrum_rpc.wallet("specter/hold_accident") 194 | ha_zero_address = wallet.get_address(0) # the defaultwallet is already used 195 | # logger.info(f"result of addressinfo(hold_accident)) {hold_accident.getaddressinfo(ha_zero_address)}") 196 | 197 | # Be sure that the addresses of w1 and the specter-wallet matches 198 | assert ha_zero_address == zero_address 199 | 200 | if spectrum_rpc == btc_rpc: 201 | # There is no keypoolrefill in spectrum 202 | hold_accident.keypoolrefill(number_of_txs + 10) 203 | wallet.update() 204 | 205 | # Do a rescan 206 | delete_file(wallet._transactions.path) 207 | # wallet.fetch_transactions() 208 | # This rpc call does not seem to return a result; use no_wait to ignore timeout errors 209 | result = wallet.rpc.rescanblockchain(0) 210 | print(wallet.rpc.getwalletinfo()) 211 | logger.info(f"Result of rescanblockchain: {result}") 212 | time.sleep(15) 213 | # both balances are the same 214 | assert ( 215 | wallet.rpc.getbalances()["mine"]["trusted"] 216 | == w1.getbalances()["mine"]["trusted"] 217 | ) 218 | 219 | # Check the number of TXs 220 | txlist = wallet.txlist(validate_merkle_proofs=False) 221 | print(f"result of hold_accident.getbalances: {hold_accident.getbalances()}") 222 | if keypoolrefill < number_of_txs: 223 | assert len(txlist) == keypoolrefill 224 | else: 225 | assert len(txlist) == number_of_txs 226 | -------------------------------------------------------------------------------- /tests/test_bdk.py: -------------------------------------------------------------------------------- 1 | from binascii import hexlify 2 | import json 3 | import pytest 4 | 5 | from embit.bip39 import mnemonic_to_seed 6 | from embit.bip32 import HDKey, NETWORKS 7 | from embit import script 8 | 9 | from bdkpython import bdk 10 | 11 | # Checkout: 12 | # https://github.com/thunderbiscuit/bitcoindevkit-scripts/tree/aa601c93dfbe92f1812179f4441c002e091f2953/python 13 | # In detail: 14 | # 15 | 16 | 17 | def test_bdk(): 18 | 19 | mnemonic = 11 * "keen " + "join" 20 | 21 | mnemonic = bdk.Mnemonic.from_string(mnemonic) 22 | print(f"Keen Join seed: {mnemonic}") 23 | 24 | rootkey = bdk.DescriptorSecretKey(bdk.Network.REGTEST, mnemonic, "") 25 | print(f"Keen Join rootkey: {rootkey.as_string()}") 26 | 27 | acc0xprv_keen_join = rootkey.derive(bdk.DerivationPath("m/84h/1h/0h")) 28 | print(f"Keen Join acc0xprv: {acc0xprv_keen_join.as_string()}") 29 | 30 | xpub = acc0xprv_keen_join.as_public() 31 | print(f"Keen Join acc0xpub: {xpub.as_string()}") 32 | # tpubDCNx731HhXWZH7JigBeqN5F8T23pwrw7ny4v66b7XFajEF4Xwu1tHc4avQRVofghtzUs5BVNjYwRcqyfiBzftmfRFMKBWVCVdhQTWM1b7wM 33 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | from cryptoadvance.spectrum.config import _get_bool_env_var 2 | import os 3 | 4 | 5 | def test_get_bool_env_var(): 6 | os.environ["bla"] = "false" 7 | assert not _get_bool_env_var("bla") 8 | os.environ["bla"] = "FaLse" 9 | assert not _get_bool_env_var("bla") 10 | os.environ["bla"] = "True" 11 | assert _get_bool_env_var("bla") 12 | os.environ["bla"] = "true" 13 | assert _get_bool_env_var("bla") 14 | os.environ["bla"] = "TrUe" 15 | assert _get_bool_env_var("bla") 16 | os.environ["bla"] = "yes" 17 | assert _get_bool_env_var("bla") 18 | os.environ["bla"] = "yEs" 19 | assert _get_bool_env_var("bla") 20 | -------------------------------------------------------------------------------- /tests/test_elsock.py: -------------------------------------------------------------------------------- 1 | from binascii import hexlify, unhexlify 2 | import io 3 | import time 4 | 5 | import mock 6 | import pytest 7 | from cryptoadvance.spectrum.elsock import ElectrumSocket, ElSockTimeoutException 8 | import hashlib 9 | import struct 10 | 11 | from cryptoadvance.spectrum.util import SpectrumException 12 | 13 | 14 | def test_elsock(config): 15 | with mock.patch("cryptoadvance.spectrum.elsock.socket.socket"): 16 | print(time.time()) 17 | es = ElectrumSocket( 18 | host=config.ELECTRUM_HOST, 19 | port=config.ELECTRUM_PORT, 20 | socket_timeout=1, 21 | call_timeout=1, 22 | ) 23 | with pytest.raises(ElSockTimeoutException): 24 | res = es.ping() 25 | 26 | 27 | def test_elsock_thread_status(): 28 | es = ElectrumSocket(host="notExisting", port=123) 29 | es.running = False 30 | write_mock = mock.MagicMock() 31 | write_mock.is_alive.return_value = False 32 | recv_mock = mock.MagicMock() 33 | recv_mock.is_alive.return_value = False 34 | ping_mock = mock.MagicMock() 35 | ping_mock.is_alive.return_value = False 36 | notify_mock = mock.MagicMock() 37 | notify_mock.is_alive.return_value = False 38 | es._write_thread = write_mock 39 | es._recv_thread = recv_mock 40 | es._ping_thread = ping_mock 41 | es._notify_thread = notify_mock 42 | es.thread_status 43 | assert es.thread_status["write"] == False 44 | assert es.thread_status["recv"] == False 45 | assert es.thread_status["ping"] == False 46 | assert es.thread_status["notify"] == False 47 | assert es.thread_status["any_alive"] == False 48 | assert es.thread_status["not_any_alive"] == True 49 | assert es.thread_status["all_alive"] == False 50 | assert es.thread_status["not_all_alive"] == True 51 | assert es.thread_status["alive"] == [] 52 | assert es.thread_status["not_alive"] == ["recv", "write", "ping", "notify"] 53 | ping_mock.reset_mock() 54 | ping_mock.is_alive.return_value = True 55 | assert es.thread_status["recv"] == False 56 | assert es.thread_status["ping"] == True 57 | assert es.thread_status["any_alive"] == True 58 | assert es.thread_status["not_any_alive"] == False 59 | assert es.thread_status["all_alive"] == False 60 | assert es.thread_status["not_all_alive"] == True 61 | assert es.thread_status["alive"] == ["ping"] 62 | assert es.thread_status["not_alive"] == ["recv", "write", "notify"] 63 | write_mock.reset_mock() 64 | write_mock.is_alive.return_value = True 65 | recv_mock.reset_mock() 66 | recv_mock.is_alive.return_value = True 67 | notify_mock.reset_mock() 68 | notify_mock.is_alive.return_value = True 69 | assert es.thread_status["any_alive"] == True 70 | assert es.thread_status["not_any_alive"] == False 71 | assert es.thread_status["all_alive"] == True 72 | assert es.thread_status["not_all_alive"] == False 73 | assert es.thread_status["alive"] == ["recv", "write", "ping", "notify"] 74 | assert es.thread_status["not_alive"] == [] 75 | -------------------------------------------------------------------------------- /tests/test_se_healthz.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | def test_readyness(caplog, client): 4 | """The root of the app""" 5 | caplog.set_level(logging.DEBUG) 6 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.specter") 7 | result = client.get("/healthz/liveness") 8 | assert result.status_code == 200 9 | result = client.get("/welcome/about") 10 | 11 | result = client.get("/healthz/readyness") 12 | assert result.status_code == 200 13 | result = client.get("/welcome/about") 14 | 15 | def test_readyness(caplog, app_offline): 16 | """The root of the app""" 17 | client = app_offline.test_client() 18 | caplog.set_level(logging.DEBUG) 19 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.specter") 20 | result = client.get("/healthz/liveness") 21 | assert result.status_code == 200 22 | 23 | result = client.get("/healthz/readyness") 24 | assert result.status_code == 500 25 | -------------------------------------------------------------------------------- /tests/test_spectrum.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from unittest.mock import MagicMock 4 | 5 | from flask import Flask 6 | from cryptoadvance.spectrum.db import Descriptor, Script, Wallet 7 | from cryptoadvance.spectrum.spectrum import Spectrum 8 | from embit.descriptor.checksum import add_checksum 9 | from embit.bip32 import NETWORKS 10 | 11 | logger = logging.getLogger("cryptoadvance") 12 | 13 | def test_importdescriptor(app: Flask, rootkey_hold_accident, acc0key0addr_hold_accident): 14 | ''' THis does: 15 | * Creating a wallet 16 | * importing a descriptor 17 | * load the script with index 0 18 | * compare the address with the expected one 19 | ''' 20 | spectrum: Spectrum = app.spectrum 21 | # calculate the descriptor 22 | tpriv = rootkey_hold_accident.to_base58( 23 | version=NETWORKS["regtest"]["xprv"] 24 | ) 25 | desc = add_checksum("wpkh(" + tpriv + "/84'/1'/0'/0/*)") 26 | desc = desc.replace("'","h") 27 | logger.info(f"TEST: created desc: {desc}") 28 | logger.info(f"TEST: expecting address: {acc0key0addr_hold_accident}") 29 | # Now let's derive the first address from this. 30 | 31 | 32 | with app.test_request_context(): 33 | # Create a wallet 34 | spectrum.createwallet("bob_the_wallet", disable_private_keys=True) # not a hotwallet! 35 | wallet: Wallet = Wallet.query.filter_by(name="bob_the_wallet").first() 36 | logger.info("TEST: Import descriptor") 37 | spectrum.importdescriptor(wallet, desc) 38 | descriptor: Descriptor = Descriptor.query.filter_by(wallet=wallet).all() # could use first() but let's assert! 39 | assert len(descriptor) == 1 40 | descriptor = descriptor[0] 41 | logger.info(f"TEST: descriptor {descriptor}") 42 | assert spectrum.getbalances(wallet) == {'mine': {'immature': 0.0, 'trusted': 0.0, 'untrusted_pending': 0.0}, 'watchonly': {'immature': 0.0, 'trusted': 0.0, 'untrusted_pending': 0.0}} 43 | # Load the script with index 0 44 | script: Script = Script.query.filter_by(wallet=wallet, index=0).all() # could use first() but let's assert! 45 | assert len(script) == 1 46 | script = script[0] 47 | logger.info(f"TEST: scripthash {script.scripthash} ") 48 | logger.info(f"TEST: script address {script.address(network=NETWORKS['test'])}") 49 | # compare the address with the expected one 50 | assert acc0key0addr_hold_accident == script.address(network=NETWORKS['test']) 51 | # Depending on the state of electrs, it might take 5 seconds for the sync-thread to finish 52 | # It does not change anything on the result of the test, though 53 | 54 | spectrum.stop() 55 | del spectrum 56 | 57 | -------------------------------------------------------------------------------- /tests/test_spectrum_rpc.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | import logging 4 | from cryptoadvance.spectrum.spectrum import Spectrum 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def test_root(caplog, client): 10 | caplog.set_level(logging.INFO) 11 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 12 | result = client.get("/") 13 | assert result.status_code == 200 14 | assert result.data == b"JSONRPC server handles only POST requests" 15 | result = client.post("/", json={}) 16 | assert result.status_code == 200 17 | assert json.loads(result.data)["error"]["message"] == "Method not found (None)" 18 | 19 | 20 | def test_unknownmethod(caplog, client): 21 | caplog.set_level(logging.INFO) 22 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 23 | result = client.post("/", json={"method": "unknownmethod"}) 24 | assert result.status_code == 200 25 | 26 | 27 | def test_suppress_logging(caplog, client, app): 28 | caplog.set_level(logging.INFO) 29 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 30 | result = client.post("/", json={"method": "getmininginfo"}) 31 | assert result.status_code == 200 32 | assert "RPC called getmininginfo" in caplog.text 33 | app.config["SUPPRESS_JSONRPC_LOGGING"] = True 34 | result = client.post("/", json={"method": "getmininginfo"}) 35 | assert result.status_code == 200 36 | assert caplog.text.count("RPC called getmininginfo") == 1 37 | 38 | 39 | def test_getmininginfo(caplog, client): 40 | caplog.set_level(logging.INFO) 41 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 42 | result = client.post("/", json={"method": "getmininginfo"}) 43 | assert result.status_code == 200 44 | assert json.loads(result.data)["result"]["blocks"] >= 0 45 | 46 | 47 | def test_getblockchaininfo(caplog, client): 48 | caplog.set_level(logging.INFO) 49 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 50 | result = client.post("/", json={"method": "getblockchaininfo"}) 51 | assert result.status_code == 200 52 | print(json.loads(result.data)) 53 | assert json.loads(result.data)["result"]["blocks"] >= 0 54 | 55 | 56 | def test_getnetworkinfo(caplog, client): 57 | caplog.set_level(logging.INFO) 58 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 59 | result = client.post("/", json={"method": "getnetworkinfo"}) 60 | assert result.status_code == 200 61 | print(json.loads(result.data)) 62 | assert json.loads(result.data)["result"]["version"] == 230000 63 | 64 | 65 | def test_getmempoolinfo(caplog, client): 66 | caplog.set_level(logging.INFO) 67 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 68 | result = client.post("/", json={"method": "getmempoolinfo"}) 69 | assert result.status_code == 200 70 | print(json.loads(result.data)) 71 | assert json.loads(result.data)["result"]["loaded"] 72 | 73 | 74 | def test_uptime(caplog, client): 75 | caplog.set_level(logging.INFO) 76 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 77 | result = client.post("/", json={"method": "uptime"}) 78 | assert result.status_code == 200 79 | print(json.loads(result.data)) 80 | assert json.loads(result.data)["result"] >= 0 81 | 82 | 83 | def test_getblockhash(caplog, client): 84 | caplog.set_level(logging.DEBUG) 85 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 86 | 87 | result = client.post( 88 | "/", 89 | json={ 90 | "method": "getblockhash", 91 | "params": [0], 92 | "jsonrpc": "2.0", 93 | "id": 0, 94 | }, 95 | ) 96 | 97 | assert result.status_code == 200 98 | print(json.loads(result.data)) 99 | 100 | # assert json.loads(result.data)["result"] == '0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206' # Hash of hard coded regtest genesis block 101 | 102 | 103 | def test_rescanblockchain(caplog, client): 104 | caplog.set_level(logging.DEBUG) 105 | caplog.set_level(logging.DEBUG, logger="cryptoadvance.spectrum") 106 | 107 | result = client.post( 108 | "/", 109 | json={ 110 | "method": "createwallet", 111 | "params": ["name_of_wallet"], 112 | "jsonrpc": "2.0", 113 | "id": 0, 114 | }, 115 | ) 116 | assert result.status == "200 OK" 117 | 118 | result = client.post( 119 | "/wallet/name_of_wallet", 120 | json={ 121 | "method": "rescanblockchain", 122 | "params": [0], 123 | "jsonrpc": "2.0", 124 | "id": 0, 125 | }, 126 | ) 127 | 128 | assert result.status_code == 200 129 | print(json.loads(result.data)) 130 | -------------------------------------------------------------------------------- /tests/test_util.py: -------------------------------------------------------------------------------- 1 | from binascii import hexlify, unhexlify 2 | import io 3 | from cryptoadvance.spectrum.util import parse_blockheader 4 | 5 | 6 | def test_blockchain_block_header(config): 7 | 8 | # We'll skip here the part where the blockheader is obtained via electrum: 9 | # This will only work in mainnet, run like this: 10 | # pytest tests/test_elsock.py::test_blockchain_block_header --config cryptoadvance.spectrum.config.EmzyElectrumLiteConfig 11 | #es = ElectrumSocket(host=config.ELECTRUM_HOST, port=config.ELECTRUM_PORT, use_ssl=config.ELECTRUM_USES_SSL) 12 | # height=744133 13 | #block_header = es.call("blockchain.block.header", [height]) 14 | # print(f"\nblockchain.block.header (height {height}) :") 15 | block_header = "04004020a59f49990cdd728a8e84d23719eae32287ace2bd5bef05000000000000000000aa5fa0d2d87a22f5251a3b4420b14dc6eeaabef6db78971863172dbead9213b7e108c862afa709173d8ad115" 16 | print(block_header) 17 | block_header = unhexlify(block_header) 18 | assert len(block_header) == 80 # A block_header is 80 byte long 19 | p_block_header = parse_blockheader(block_header) 20 | assert p_block_header["version"] == 541065220 # i don't understand this value, does not look like a proper version 21 | assert p_block_header["blocktime"] == 1657276641 22 | 23 | assert p_block_header["blockhash"] == "0000000000000000000993b3cdc6c0f66c0f2ab210d2ac250db90874e826b646" 24 | assert isinstance(p_block_header["blockhash"], str) -------------------------------------------------------------------------------- /utils/compile_and_run_electrs.py: -------------------------------------------------------------------------------- 1 | #%% 2 | import requests, os, json 3 | import tarfile 4 | 5 | 6 | def install_dep(): 7 | os.system('sudo apt install cargo clang cmake build-essential ') 8 | 9 | 10 | def download_electrs(): 11 | url = 'https://github.com/romanz/electrs/archive/refs/tags/v0.9.9.tar.gz' 12 | filename = 'electrs.tar.gz' 13 | 14 | if not os.path.exists(filename): 15 | response = requests.get(url, stream = True) 16 | with open(filename, 'wb') as file: 17 | for chunk in response.iter_content(chunk_size = 1024): 18 | if chunk: 19 | file.write(chunk) 20 | 21 | return filename 22 | 23 | 24 | 25 | def extract(filename): 26 | with tarfile.open(filename) as tar: 27 | tar.extractall() 28 | return list(tar.getmembers())[0].name 29 | 30 | 31 | 32 | 33 | def compile(electrs_folder): 34 | org_folder = os.path.abspath('.') 35 | os.chdir(electrs_folder) 36 | os.system('cargo build --locked --release') 37 | os.chdir(org_folder) 38 | 39 | 40 | def specter_node_config(node_config_file='~/.specter_dev/nodes/default.json'): 41 | import json 42 | node_config_file = os.path.expanduser(node_config_file) 43 | with open(node_config_file, "r") as file: 44 | node_config = json.load(file) 45 | 46 | return node_config 47 | 48 | 49 | def create_config(node_config): 50 | network = "regtest" 51 | electrs_config = f""" 52 | # File where bitcoind stores the cookie, usually file .cookie in its datadir 53 | cookie_file = "{node_config['datadir']}/{network if network!= 'mainnet' else ''}/.cookie" 54 | 55 | # The listening RPC address of bitcoind, port is usually 8332 56 | daemon_rpc_addr = "{node_config['host']}:{int(node_config['port'])}" 57 | 58 | # The listening P2P address of bitcoind, port is usually 8333 59 | daemon_p2p_addr = "{node_config['host']}:{int(node_config['port'])+1}" 60 | 61 | # Directory where the index should be stored. It should have at least 70GB of free space. 62 | db_dir = "./db" 63 | 64 | # bitcoin means mainnet. Don't set to anything else unless you're a developer. 65 | network = "{network}" 66 | 67 | # The address on which electrs should listen. Warning: 0.0.0.0 is probably a bad idea! 68 | # Tunneling is the recommended way to access electrs remotely. 69 | electrum_rpc_addr = "127.0.0.1:50000" 70 | 71 | # How much information about internal workings should electrs print. Increase before reporting a bug. 72 | log_filters = "INFO" 73 | """ 74 | with open('electrs.toml', "w") as file: 75 | file.write(electrs_config) 76 | 77 | 78 | 79 | 80 | def run_electrs(electrs_folder, node_config): 81 | network = "regtest" 82 | cmd = f"{electrs_folder}/target/release/electrs --log-filters=INFO --db-dir ./db --daemon-dir {node_config['datadir']}/{network if network!= 'mainnet' else ''} --network {network}" 83 | os.system(cmd) 84 | 85 | 86 | 87 | 88 | # %% 89 | filename = download_electrs() 90 | # %% 91 | electrs_folder = extract(filename) 92 | # %% 93 | compile(electrs_folder) 94 | # %% 95 | node_config = specter_node_config() 96 | # %% 97 | 98 | create_config(node_config) 99 | # %% 100 | run_electrs(electrs_folder, node_config) 101 | 102 | 103 | -------------------------------------------------------------------------------- /utils/release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Replacing MacOS utilities with GNU core utilities to make script more robust 4 | # See: https://apple.stackexchange.com/questions/69223/how-to-replace-mac-os-x-utilities-with-gnu-core-utilities 5 | if [[ "$OSTYPE" == "darwin"* ]]; then 6 | brew ls --versions coreutils > /dev/null; 7 | exitCode=$? 8 | if [[ $exitCode == 0 ]]; then 9 | echo "Using Gnu Coreutils ..." 10 | export PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH" 11 | else 12 | echo "GNU core utilities not installed. Run brew install coreutils" 13 | fi 14 | fi 15 | 16 | ask_yn() { 17 | while true; do 18 | read -p "Is this correct [y/n]" yn 19 | case $yn in 20 | [Yy]* ) return 0 ;; 21 | [Nn]* ) return 1;; 22 | * ) echo "Please answer yes or no.";; 23 | esac 24 | done 25 | } 26 | 27 | while [[ $# -gt 0 ]] 28 | do 29 | key="$1" 30 | command="main" 31 | case $key in 32 | --help) 33 | help 34 | shift 35 | exit 0 36 | ;; 37 | --release-notes) 38 | RELEASE_NOTES="yes" 39 | shift # past value 40 | ;; 41 | --dev) 42 | DEV="yes" 43 | shift 44 | ;; 45 | --new-version) 46 | new_version=$2 47 | shift 48 | shift 49 | ;; 50 | tag) 51 | command="tag" 52 | shift 53 | ;; 54 | build) 55 | command="build" 56 | shift 57 | ;; 58 | publish) 59 | command="publish" 60 | shift 61 | ;; 62 | release) 63 | command="release" 64 | shift 65 | ;; 66 | wait_on_master) 67 | command="wait_on_master" 68 | shift 69 | ;; 70 | --debug) 71 | set -x 72 | shift # past argument 73 | ;; 74 | *) # unknown option 75 | POSITIONAL="$1" # save it in an array for later 76 | shift # past argument 77 | ;; 78 | esac 79 | done 80 | 81 | function help() { 82 | # echo HERE_DOC 83 | # ... 84 | echo "not yet implemented" 85 | } 86 | 87 | function check_basic_git_release_infra() { 88 | # Sed is used as there can be whitespaces 89 | if ! [ "$(git remote -v | grep upstream | grep 'git@github.com:cryptoadvance/spectrum.git' | wc -l | sed -e 's/\s*//')" = "2" ]; then 90 | echo " --> You don't have the correct upstream-remote. You need this to release. Please do this:" 91 | echo "git remote add upstream git@github.com:cryptoadvance/spectrum.git " 92 | exit 2 93 | fi 94 | 95 | if ! [ "$(git remote -v | grep origin | grep 'git@github.com:' | wc -l)" = "2" ]; then 96 | echo " --> You don't have a reasonable origin-remote. You need this to release (especially with --dev). Please add one!" 97 | exit 2 98 | fi 99 | } 100 | 101 | function check_on_master_branch() { 102 | current_branch=$(git rev-parse --abbrev-ref HEAD) 103 | if [ "$current_branch" != "master" ]; then 104 | echo "You're currently not on the master-branch, exiting" 105 | exit 2 106 | fi 107 | echo " --> Fetching all tags ..." 108 | git fetch upstream --tags 109 | echo " --> git pull upstream master" 110 | git pull upstream master 111 | } 112 | 113 | function ask_new_version_if_needed() { 114 | if [[ -z "$new_version" ]]; then 115 | echo "What should be the new version? Type in please (e.g. v0.9.3 ):" 116 | read new_version 117 | fi 118 | if ! [[ $new_version =~ ^v([0-9]+)\.([0-9]+)\.([0-9]+)(-([0-9A-Za-z-]+))?$ ]]; then 119 | echo "version $new_version Does not match the pattern!" 120 | exit 1; 121 | fi 122 | } 123 | 124 | function release_notes() { 125 | if [ -z $GH_TOKEN ]; then 126 | echo "Your github-token is missing. Please export them like:" 127 | echo "export GH_TOKEN=" 128 | exit 2 129 | fi 130 | 131 | latest_version=$(git tag -l "v*" | grep -v 'pre' | grep -v 'dev' | sort -V | tail -1) 132 | 133 | echo " --> The latest version is $latest_version. " 134 | if ! ask_yn ; then 135 | echo "Ok, then you type in the latest_version:" 136 | read latest_version 137 | if ! [[ $new_version =~ ^v([0-9]+)\.([0-9]+)\.([0-9]+)(-([0-9A-Za-z-]+))?$ ]]; then 138 | echo "Does not match the pattern!" 139 | exit 1; 140 | fi 141 | fi 142 | 143 | echo "Here are the release-notes:" 144 | echo "--------------------------------------------------" 145 | echo "# Release Notes" > docs/new_release_notes.md 146 | echo "" >> docs/new_release_notes.md 147 | echo "## ${new_version} $(date +'%B %d, %Y')" >> docs/new_release_notes.md 148 | docker run registry.gitlab.com/cryptoadvance/specter-desktop/github-changelog:latest --github-token $GH_TOKEN --branch master cryptoadvance spectrum $latest_version | sort >> docs/new_release_notes.md 149 | echo "" >> docs/new_release_notes.md 150 | 151 | 152 | cat docs/new_release_notes.md 153 | echo "--------------------------------------------------" 154 | 155 | cp docs/release-notes.md docs/release-notes.md.orig 156 | sed -i -e '1,2d' docs/release-notes.md.orig # Assuming the release-Notes start with # Release Notes\n 157 | cat docs/new_release_notes.md docs/release-notes.md.orig > docs/release-notes.md 158 | rm docs/release-notes.md.orig docs/new_release_notes.md 159 | 160 | echo "Please check your new File and modify as you find approriate!" 161 | echo "We're waiting here ..." 162 | echo " --> Should we create a PR-branch now? " 163 | 164 | if ! ask_yn ; then 165 | echo "break" 166 | #git checkout docs/release-notes.md 167 | exit 2 168 | fi 169 | 170 | echo " --> Creating branch ${new_version}_release_notes " 171 | git checkout -b ${new_version}_release_notes 172 | git add docs/release-notes.md 173 | git commit -m "adding release_notes for $new_version" 174 | git push --set-upstream origin ${new_version}_release_notes 175 | 176 | echo "Now go ahead and make your PR:" 177 | echo "https://github.com/cryptoadvance/spectrum/pulls" 178 | exit 0 179 | 180 | 181 | } 182 | 183 | function tag() { 184 | check_basic_git_release_infra 185 | check_on_master_branch 186 | ask_new_version_if_needed 187 | echo " --> Should i now create the tag and push the version $new_version ?" 188 | if [ -n "$DEV" ]; then 189 | echo " --> This will push to your origin-remote!" 190 | else 191 | echo " --> THIS WILL PUSH TO THE UPSTREAM-REMOTE!" 192 | fi 193 | 194 | if ! ask_yn ; then 195 | echo "break" 196 | exit 2 197 | fi 198 | 199 | git tag $new_version 200 | if [ -n "$DEV" ]; then 201 | git push origin $new_version 202 | else 203 | git push upstream $new_version 204 | fi 205 | } 206 | 207 | function build() { 208 | ask_new_version_if_needed 209 | echo " --> Building the package for version $new_version" 210 | current_branch=$(git rev-parse --abbrev-ref HEAD) 211 | current_tag=$(git describe --tags) 212 | if [ "$current_tag" != "$new_version" ]; then 213 | echo "You're currently not on the tag $new_version but on $current_branch" 214 | echo "Maybe you haven't created the tag, yet. Consider:" 215 | echo "./utils/release.sh --new-version $new_version tag" 216 | exit 2 217 | fi 218 | rm -rf dist 219 | python3 -m pip install --upgrade build 220 | python3 -m build 221 | echo " --> Finsihed building. _version.py:" 222 | cat src/cryptoadvance/spectrum/_version.py 223 | echo " --> Done" 224 | 225 | } 226 | 227 | function publish() { 228 | python3 -m pip install --upgrade twine > /dev/null 229 | echo " --> Publishing the package for version $new_version" 230 | new_version_pypi = $(echo "$new_version" | sed -e 's/v//') 231 | 232 | if [ -z $DEV ]; then 233 | python3 -m twine upload dist/cryptoadvance.spectrum-${new_version_pypi}* 234 | else 235 | python3 -m twine upload --repository testpypi dist/cryptoadvance.spectrum-${new_version_pypi}* 236 | fi 237 | } 238 | 239 | function release() { 240 | echo " --> We'll do now the tag" 241 | tag 242 | echo " --> git checkout $new_version" 243 | # This is not necessary! 244 | # The build-script will check whether the commit is the vorrect one 245 | # git checkout $new_version 246 | echo " --> Creating the build" 247 | build 248 | echo " --> Publish to pypi" 249 | publish 250 | } 251 | 252 | 253 | 254 | 255 | function wait_on_master() { 256 | echo "# check status of masterbranch ..." 257 | i=0 258 | # First, wait on the check-runs to be completed: 259 | for i in {1..5} ; do 260 | current_state=$(curl -s https://api.github.com/repos/cryptoadvance/spectrum/commits/master/check-runs) 261 | different_states=$(echo $current_state | jq -r '.check_runs[] | select(.status == "completed") | .status' | uniq | wc -l) 262 | status=$(echo $current_state | jq -r '.check_runs[] | select(.status == "completed") | .status' | uniq) 263 | 264 | if [[ "$different_states" == 1 ]] && [[ "$status" == "completed" ]] ; then 265 | break 266 | fi 267 | echo "# Builds still running. Will check again in 5 seconds." 268 | sleep 5 269 | done 270 | 271 | # Now check all the runs and make sure there are all green: 272 | current_state=$(curl -s https://api.github.com/repos/cryptoadvance/spectrum/commits/master/check-runs) 273 | different_conclusions=$(echo $current_state | jq -r '.check_runs[] | select(.conclusion == "success") | .conclusion' | uniq | wc -l) 274 | conclusion=$(echo $current_state | jq -r '.check_runs[] | select(.conclusion == "success") | .conclusion' | uniq) 275 | 276 | # We only have one conclusion over all runs: 277 | if [ $different_conclusions -gt 1 ] ; then 278 | echo "# different_conclusions: $different_conclusions" 279 | echo "# Seems that master is not green. Exiting 1" 280 | exit 1 281 | fi 282 | # ... and that conclusion is "success" 283 | if [[ "$conclusion" == "success" ]]; then 284 | echo "# Great, conclusion is success! Exiting 0" 285 | exit 0 286 | fi 287 | 288 | echo "# ERROR: I'm confused. This should not happened, exiting 99" 289 | echo "# conclusion = $conclusion" 290 | #echo $current_state 291 | exit 99 292 | } 293 | 294 | $command --------------------------------------------------------------------------------