├── .editorconfig ├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── docker ├── README └── dev │ └── hydrachain │ ├── docker-compose.yml │ ├── node │ ├── Dockerfile │ ├── app.json │ ├── mk_enode.py │ ├── settle_file.py │ └── start.sh │ └── statsmon │ └── Dockerfile ├── examples ├── hc_consensus_explained.md ├── hydrachain ├── __init__.py ├── app.py ├── consensus │ ├── __init__.py │ ├── base.py │ ├── contract.py │ ├── manager.py │ ├── protocol.py │ ├── simulation.py │ ├── synchronizer.py │ └── utils.py ├── examples │ ├── __init__.py │ └── native │ │ ├── __init__.py │ │ └── fungible │ │ ├── __init__.py │ │ ├── fungible_contract.py │ │ ├── test_fungible_contract.py │ │ └── test_iou_contract.py ├── hdc_service.py ├── native_contracts.py ├── nc_utils.py ├── tests │ ├── conftest.py │ ├── test_app.py │ ├── test_base.py │ ├── test_docker_integration.py │ ├── test_hdc_protocol.py │ ├── test_hdc_service.py │ ├── test_native_contracts.py │ ├── test_sim_basics.py │ ├── test_sim_failures.py │ ├── test_sim_joins.py │ ├── test_sim_syncing.py │ ├── test_working_app.py │ └── txperf.py └── utils.py ├── requirements.txt ├── setup.cfg ├── setup.py └── tox.ini /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 4 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | charset = utf-8 11 | end_of_line = lf 12 | 13 | [LICENSE] 14 | insert_final_newline = false 15 | 16 | [Makefile] 17 | indent_style = tab 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | 46 | # Translations 47 | *.mo 48 | *.pot 49 | 50 | # Django stuff: 51 | *.log 52 | 53 | # Sphinx documentation 54 | docs/_build/ 55 | 56 | # PyBuilder 57 | target/ 58 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | sudo: required 3 | services: 4 | - docker 5 | env: 6 | global: 7 | - DOCKER_EMAIL=docker@hydrachain.com 8 | - DOCKER_USERNAME=hydrachain 9 | - secure: Rr+meswIU5zxFWscvMTfumD8Fj23lAaWIQuf2REo9NefTpOUvS3CFaZY/sTFZmUybV/IGSvRXZsQoYxYlrNCsTFqwQh5RXft+A9XjrzK8EPQPxugVROoRaKNrgrCKvfy/4mup2bvCz6xp0EmFILZteMTMoveI8FOJYdEmypHpwc9/HrgB6iGFcH8EB1Ny1kXKjHmubF3jnNtsTmxciFc2+ZuC4yVa7aAZU6DMWK72/tWN6qlR5sTY11nkcuWBI/ot4niAuSkWp4RYhXHPoVeewj+0gWNBWejzID6NWK6+tLUu917+xrmjKt30Whjva3+jsNiKVdBsNuDC9M7AXViBedmfzFaRUThotM9dgqVovG6RKW3Ybdqk4cQZldK3DYOF7uVuZ9CujOlf9zYIuGv6sUihd2j6jxLEgDqpC9yumZUBqAHUqegOUizybINNhpIhcQU83ynceaUARXsPdc6JFZziY9RsjNOFkRwRKo+O7o2OfvnA9XSkIWPF8hnt4hrXHdzuzLKqgbR6spiUczuEhosjf2tNk6KlSFk0+K1vx/lx0jd9GoCKdsO/MGnAmh5M0g2OFW1vaj1/td5tQh06qFiIFULlDfSSdDtH0rNUMTRVbiGDF6NzuECYHZ23s2zIMZAOpVWHlItWlG1faBbkKz9UV0P3lqv2bp5CAuAXqk= 10 | matrix: 11 | - TOXENV=py27 12 | - TOXENV=pypy 13 | addons: 14 | apt: 15 | packages: 16 | - libgmp3-dev 17 | install: 18 | - pip install tox 19 | - pip install coveralls 20 | script: tox -- --ignore hydrachain/tests/test_sim_failures.py --ignore hydrachain/tests/test_sim_syncing.py 21 | after_success: 22 | - coveralls 23 | - '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && docker login -e="$DOCKER_EMAIL" -u="$DOCKER_USERNAME" 24 | -p="$DOCKER_PASSWORD" || true' 25 | - '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && docker build -t hydrachain/hydrachain:${TRAVIS_TAG:-develop} 26 | . || true' 27 | - '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && docker push hydrachain/hydrachain:${TRAVIS_TAG:-develop} 28 | || true' 29 | deploy: 30 | provider: pypi 31 | user: ethereum_pypi_automated 32 | password: 33 | secure: vO6kAkkpdqQz2+Le7aowcIkYHyvQzuNaL35c8ciHzmT2VdeGeCsDBGyJ8RYqITQruwfVzUsKsNjfCXBTcYan3e1vPpG9KKrkxB/Zy6KcNJmd+SL9n2tB6TVFr1ojxnRJgIzFBxbA1FnyY0aodwnMW2bJBlKxFIKJiKVjK6GGDl11wrMsudFFnvs3LkrPO9tr1plmN1xvVTNLzy0UmHjx6lmtjtZ/HMbx635WnFj5NnzdeMK07XD59HEuOYVvQ7BQua2cuCevFbXmSAxHJsDPZuhzYHkEfj+jPhKbS1MyTqD5W72ex2oPRmPlvvnOn01750IkSFSyO0lnZIv1wJuZ3+Y+49Q5XrFBAsuC4bqk1uOVL5hU37b2oDBIunJLQYEP1tn7TibVbv51iOolzx193tRR7KcbyU+pIqdP4a6rWDXT+PRada2JemX/Bx4G5hn9dXc4SmLP0+t7lcYpe/fTLQCGYsLNBbJv7iJdTuOeySX8Dlq9GYBMGreilDwIp5T4eqT4mhc25ZaXRel2HdqQSzie8SM88Sempn1V3uQkvQSUZ/PcwDwrG8wLK37BubaW0qVAZ5Zfkv1Q+pnVd7OkTEU+GgdOST6H3WGKVSO0fs/vLFYhQN0273IqM3Uuh3+s5h/sbGWfXqPMS8lu6febpglBCAxvkgKjwQ0in2S3Fo8= 34 | on: 35 | tags: true 36 | repo: HydraChain/hydrachain 37 | branch: develop 38 | distributions: sdist bdist_wheel 39 | after_deploy: 40 | - docker tag hydrachain/hydrachain:$TRAVIS_TAG hydrachain/hydrachain:latest 41 | - docker push hydrachain/hydrachain:latest 42 | notifications: 43 | slack: 44 | secure: qJyYhNa9Ld427maaO5s2aVtiKYpQfzow0oGP56LGoJVkxOlLXbLzvPtGh/EuCSUsSYAcdMyo64LyNfjjt6UdgCroMnmGYF9i5d2CkJait61TqrL8wB6B8FFNUDfd1bpilArKuY19D6TmHYj1xRqxIq37zePS4GyAq2EanBRTtSiA4++66sUEw+8u79VEFfxDT82k0qZTNys1Ph9X/Ss6MSE7UljTkytzwhcNLWlR0z68O4jgFfhdwB9pJTFL1/80JMUlpvJpE5QhMIMgYEKhQDEAA9u+m8npmVothZVrQFJwcVMTvV3Rh8JPcfQ8IqMkzpo3zkXfL2zMX2cUkGpoFxsmF+jWtaZX+NooRC+ptcvnZJXz3mRpwK3VfwL7trPl0Y0xGVy8CtfxsdLDBYv8YMItBCiX+CrdHLIbQw1WFDODNGmRhrGZEjAy2ti5RiR4+98QdMmezf2rhHta5ttN9Ig1kRnrAsDT2/AyU4sWfLZdYFbrXwR41yP9yi9Q4R+jKk31Okr/mkFLNLRuz3UtDOlfwjVFw+D8tI0zioOybv9TYkk7TCg9Ply6sIZPG9T7x8iAos+B4iufqGoFiUfeJW1oeRb5pIeytMc27LSG1Jedlznu98ct0TjjMZwSdzx9tMXX4eXu6kVP7vkR+5G2O6HL9AqhEjA9Lm4W+pqX0LI= 45 | webhooks: 46 | - secure: "koqn6tf8V20CzyFC1VokqiIrWihkDJ4LvChtZ0ZpmSdrOUUW/ZoXr0lANcpvKjztPhy2EDexHNOh6JFtUP6pbbmMCjUsFwYIFSfXbpElN2BUrxIVeUZ4rNB6fGNMZMY5lJqOHi38Ht2e4BlAsghkZIs8HzP9xVyrJi9npbOmIg1ns2JfqCLisT3zBEqQzOc+6vKkucbQr7eYMljN1ROv+V846NLHKIR86Egfo+4DVvbk06F6P4TBot8BgX0JzldIEpOCE6xFSX1ec1UHwP9c/Jm2thLgSUWPi1Uf28GObaETzgRwEQYlD9RgakAe4hvRj9F3nG4MCGRVnDEo2Axe0+4aJ6qOSH4HPkfkiLMQsud/I137kkDtuLDOBGEpn74Jry3wEKsMS6CcIf9PuXqFlvzCm8biY7kV2tzcmAZFBvp6fBnpN+C0697qRZbZw53GdvuXfO9d2Vllzhx7G42d/byLXuLRmapv09Ud2Ksj1GXjDOE8jp10B7dk3+kL0X8Jy0KhIQbq2+pGLUmtYGpm9B3rU6+PlxZKixE59I0xOr7TCkAr0gfv4lXwWxVfQFFRR4qWxgt88bBHsFCRFpvi4PP6oShww50DQ2bekZCIMY3g8fiBQExRChr9fGGylP1nGadc4muL0W9DeoZP/kBVf72+zzkkPyMW2mktT6APrrg=" 47 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7.10 2 | 3 | RUN apt-get update &&\ 4 | apt-get install -y curl git-core 5 | 6 | RUN apt-get update &&\ 7 | apt-get install -y build-essential libgmp-dev rsync 8 | 9 | WORKDIR / 10 | ADD . hydrachain 11 | 12 | RUN pip install -U setuptools 13 | # Pre-install hydrachain dependency 14 | RUN pip install secp256k1==0.12.1 15 | 16 | WORKDIR /hydrachain 17 | # Reset potentially dirty directory and remove after install 18 | RUN git reset --hard && pip install . && cd .. && rm -rf /hydrachain 19 | WORKDIR / 20 | 21 | ENTRYPOINT ["/usr/local/bin/hydrachain"] 22 | CMD ["run"] 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Heiko Hees 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include requirements.txt 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean-pyc clean-build docs clean 2 | 3 | help: 4 | @echo "clean - remove all build, test, coverage and Python artifacts" 5 | @echo "clean-build - remove build artifacts" 6 | @echo "clean-pyc - remove Python file artifacts" 7 | @echo "clean-test - remove test and coverage artifacts" 8 | @echo "lint - check style with flake8" 9 | @echo "test - run tests quickly with the default Python" 10 | @echo "test-all - run tests on every Python version with tox" 11 | @echo "coverage - check code coverage quickly with the default Python" 12 | @echo "docs - generate Sphinx HTML documentation, including API docs" 13 | @echo "release - package and upload a release" 14 | @echo "dist - package" 15 | @echo "install - install the package to the active Python's site-packages" 16 | 17 | clean: clean-build clean-pyc clean-test 18 | 19 | clean-build: 20 | rm -fr build/ 21 | rm -fr dist/ 22 | rm -fr .eggs/ 23 | find . -name '*.egg-info' -exec rm -fr {} + 24 | find . -name '*.egg' -exec rm -f {} + 25 | 26 | clean-pyc: 27 | find . -name '*.pyc' -exec rm -f {} + 28 | find . -name '*.pyo' -exec rm -f {} + 29 | find . -name '*~' -exec rm -f {} + 30 | find . -name '__pycache__' -exec rm -fr {} + 31 | 32 | clean-test: 33 | rm -fr .tox/ 34 | rm -f .coverage 35 | rm -fr htmlcov/ 36 | 37 | lint: 38 | flake8 hydrachain tests 39 | 40 | test: 41 | python setup.py test 42 | 43 | test-all: 44 | tox 45 | 46 | coverage: 47 | coverage run --source hydrachain setup.py test 48 | coverage report -m 49 | coverage html 50 | open htmlcov/index.html 51 | 52 | docs: 53 | rm -f docs/hydrachain.rst 54 | rm -f docs/modules.rst 55 | sphinx-apidoc -o docs/ hydrachain 56 | $(MAKE) -C docs clean 57 | $(MAKE) -C docs html 58 | open docs/_build/html/index.html 59 | 60 | release: clean 61 | python setup.py sdist upload 62 | python setup.py bdist_wheel upload 63 | 64 | dist: clean 65 | python setup.py sdist 66 | python setup.py bdist_wheel 67 | ls -l dist 68 | 69 | install: clean 70 | python setup.py install 71 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | HydraChain 2 | ========== 3 | 4 | [![Join the chat at https://gitter.im/HydraChain/hydrachain](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/HydraChain/hydrachain?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 5 | [![Build Status](https://travis-ci.org/HydraChain/hydrachain.svg?branch=master)](https://travis-ci.org/HydraChain/hydrachain) 6 | 7 | HydraChain is an extension of the [Ethereum](https://ethereum.org/) platform which adds support for creating [*Permissioned Distributed Ledgers*](http://www.ofnumbers.com/2015/04/06/consensus-as-a-service-a-brief-report-on-the-emergence-of-permissioned-distributed-ledger-systems/). Its primary domain of application are [*private chain* or *consortium chain*](https://blog.ethereum.org/2015/08/07/on-public-and-private-blockchains/) setups. 8 | 9 | Features 10 | -------- 11 | 12 | **Full Compatibility to the Ethereum Protocol** 13 | 14 | HydraChain is 100% compatible on an API and contract level. Existing tool chains to develop and deploy *Smart Contracts* and *ÐApps* can easily be reused. 15 | 16 | **Accountable Validators** 17 | 18 | The main difference is the byzantine fault tolerant consensus protocol ([*detailed here*](https://github.com/HydraChain/hydrachain/blob/master/hc_consensus_explained.md)) which does not depend on proof-of-work. Instead it relies on a registered and accountable set of validators which propose and validate the order of transactions. 19 | 20 | **Instant Finality** 21 | 22 | New blocks are negotiated by the validators. A quorum by the validators which signs the block is required, before it is added to the chain. Thus there will be *no forks or reverts.* Once a block is committed, the state is final. 23 | 24 | The protocol allows for *sub second block times*. New blocks are only created in the presence of pending transactions though. 25 | 26 | 27 | **Native Contracts** 28 | 29 | HydraChain provides an infrastructure to develop smart contracts in the Python high level language. Benefits are significantly reduced development times and better debugging capabilities. As the Ethereum Virtual Machine is bypassed, native contract execution is also way faster. 30 | Native Contracts support the ABI and are inter-operable with EVM based contracts written in the Solidity or Serpent languages and can co-exist on the same chain. The constraint, that all validators need to run a node configured with the same set of native contracts is well manageable in private chain settings. 31 | 32 | **Customizability** 33 | 34 | Many aspects of the system can be freely configured to fit custom needs. For example transaction fees, gas limits, genesis allocation, block time etc. can easily be adjusted. 35 | 36 | **Easy Deployment** 37 | 38 | Setting up a test network can be done with almost zero configuration. [Dockerfile templates](https://github.com/HydraChain/hydrachain/tree/master/docker) are available. 39 | 40 | **Open Source** 41 | 42 | The core software is open source and available under the permissive [MIT license](https://en.wikipedia.org/wiki/MIT_License). 43 | 44 | **Commercial Support** 45 | 46 | Consulting, support plans and custom development are offered by [brainbot technologies](http://www.brainbot.com) and a network of partners. 47 | 48 | Upcoming Features 49 | ----------------- 50 | *Note: We are happy to align our roadmap with the priorities of our users. If you have a certain requirement or prioritization, feel free to [file an issue](https://github.com/HydraChain/hydrachain/issues) or directly [contact us](mailto:heiko.hees@brainbot.com).* 51 | 52 | **Documentation** 53 | 54 | We are working on a comprehensive set of documentation which covers various deployment scenarios. This will be accompanied by a range of example contracts with a focus on use cases of the financial industry. 55 | 56 | **Proof of Identity - KYC/AML** 57 | 58 | An extension to ensure that all transactions in the system are between registered participants only. The goal is full audibility while preserving as much privacy as possible. 59 | 60 | **Selective State Sharing** 61 | 62 | Non-validating users of the system which must not know complete state (e.g. all transactions), are still able to verify the results of transaction and the state of contracts they interact with. 63 | 64 | **Chain Inter-Operability** 65 | 66 | Multi-chain setups can solve scalability and privacy requirements. 67 | As the term *Hydra* in the name already hints, that the software will support to run a node which concurrently participates in multiple chains. Next to other applications, this allows to support cross chain asset transfers as a native feature. 68 | 69 | 70 | Setup & Invocation 71 | ------ 72 | 73 | **Install dependencies on Ubuntu** 74 | 75 | > sudo apt-get install libssl-dev libffi-dev libtool python-dev autoconf automake 76 | 77 | **Installation** 78 | 79 | > git clone https://github.com/HydraChain/hydrachain 80 | > cd hydrachain 81 | > python setup.py develop 82 | 83 | 84 | **Multiple nodes in a single process** 85 | 86 | > hydrachain -d datadir runmultiple --num_validators=3 --seed=42 87 | 88 | 89 | **Multiple nodes in multiple processes on the same machine** 90 | 91 | > hydrachain -d rundummy --num_validators=3 --seed=42 --node_num=0 92 | 93 | Example how to run three nodes in three terminals: 94 | 95 | > hydrachain -d datadir0 rundummy --node_num=0 96 | > hydrachain -d datadir1 rundummy --node_num=1 97 | > hydrachain -d datadir2 rundummy --node_num=2 98 | 99 | The `rundummy` command automatically configures a setup for `num_validator` nodes (instances of the application) which are running on the same machine. The node id of each instance can be specified by `--node_num=` and `--seed=` can be used to configure a different set of keys for all nodes. 100 | 101 | **Multiple nodes in a single docker daemon** 102 | 103 | See the [Dockerfile templates](https://github.com/HydraChain/hydrachain/tree/master/docker) 104 | 105 | 106 | 107 | Status: Work in Progress 108 | ------------------------ 109 | - 08.03.2016 - v0.3.0 - Fix versioning scheme. 110 | - 08.03.2016 - v0.2.0 - Add support for min\_block\_time/multiple tx/block. 111 | - 16.01.2016 - v0.1.10 - Automate docker image creation 112 | - 27.11.2015 - v0.1.0 - Automate PyPI release process 113 | - 26.11.2015 - v0.0.7 - Various fixes; first PyPI release 114 | - 21.11.2015 - v0.0.6 - Various fixes 115 | - 21.10.2015 - v0.0.4 - Run multiple node instances in the same process 116 | - 16.10.2015 - v0.0.3 - Update docker configuration 117 | - 18.09.2015 - Added zero config docker compose files 118 | - 09.09.2015 - Initial release, work in progress. 119 | -------------------------------------------------------------------------------- /docker/README: -------------------------------------------------------------------------------- 1 | Running a local network with docker 2 | =================================== 3 | 4 | Prerequisites 5 | ------------- 6 | 7 | - docker (<= 1.8) 8 | - docker-compose 9 | 10 | 11 | Important: *Due to a change in how docker 1.9 manages container networking 12 | this suetup is currently only supported using docker 1.8.* 13 | 14 | Note: The following commands all have to be run from within the 15 | docker/dev/hydrachain directory 16 | 17 | 18 | Building the images 19 | ------------------- 20 | 21 | $ docker-compose build 22 | 23 | The first time you run this command will take quite a while since it downloads 24 | and compiles all dependencies. 25 | 26 | Subsequent runs will be significantly faster due to docker caching the build 27 | steps. 28 | 29 | Sometimes it may be necessary to temporarily disable this cache (e.g. when 30 | you want to forcefully update the dependencies) this can be done with the 31 | following command: 32 | 33 | $ docker-compose build --no-cache 34 | 35 | 36 | Running the network 37 | ------------------- 38 | 39 | $ docker-compose scale statsmon=1 bootstrap=1 node=9 40 | 41 | This will start one bootstrap and 9 "regular" nodes as well as one status 42 | monitor container. 43 | 44 | The status monitor's web interface will be accessible on port 3000. 45 | The bootstrap node exposes the following ports: 46 | - 30303/upd: P2P discovery protocol 47 | - 30303/tcp: RLPX protocol 48 | - 4000/tcp: JSONRPC interface 49 | 50 | 51 | Stopping & removing the containers 52 | ---------------------------------- 53 | 54 | $ docker-compose stop && docker-compose rm 55 | 56 | 57 | To restart with new (hydrachain) code simply stop, rm and restart there is no need to 58 | rebuild the images. 59 | 60 | To update the dependencies you will however need to rebuild the image (see above). 61 | 62 | 63 | Connecting external nodes 64 | ------------------------- 65 | 66 | To connect an external node (i.e. not running inside a docker container) to 67 | the network you will first need to stop one of the docker nodes to "make room": 68 | 69 | $ docker stop hydrachain_node_1 70 | 71 | Next you need to find the arguments that were used to start that particular 72 | node: 73 | 74 | $ docker logs hydrachain_node_1 2>&1 | grep "/usr/local/bin/hydrachain" 75 | 76 | This should result in something similar to: 77 | 78 | /usr/local/bin/hydrachain \ 79 | --bootstrap_node enode://067fbe6672cbedbd7c023d88db1c0914145e6112c790574e5d64f5c128d38246d5ca7d1a0ccc85aff8fa7a4f0f02953e0ca52568751d4f2c8237898a6f89ea81@hydrachain_node_bootstrap:30303 \ 80 | -l:debug,jsonrpc:info runlocal \ 81 | --num_validators 3 \ 82 | --node_num 1 \ 83 | --seed 23 84 | 85 | Note: Be sure to use the output from your own system rather than the example 86 | above sine otherwise the bootstrap node key won't match. 87 | 88 | You can use this command almost verbatim to run the external node. The only 89 | things that need to be changed are: 90 | - The hostname in the `--bootstrap_node` argument. 91 | 92 | The string "hydrachain_node_bootstrap" needs to be replaced by the 93 | hostname / IP on which the exposed docker ports are opened. 94 | 95 | If you are running docker natively on linux this will usually just be 96 | "localhost". 97 | 98 | If you are using boot2docker or docker-machine (including tools like 99 | Kitematic) you can usually find the IP with one of the following commands: 100 | 101 | $ docker-machine ip dev 102 | $ boot2docker ip 103 | 104 | - Specify the correct path to the executable. 105 | 106 | If you intend to run a node from a local git checkout you will most likely 107 | only need to remove the path "/usr/local/bin". 108 | 109 | 110 | Development 111 | ----------- 112 | 113 | The start script in the node image copies the hydrachain source code into the 114 | container on each start. That means that changes to the hydrachain source 115 | doesn't necessitate rebuilding the image. 116 | -------------------------------------------------------------------------------- /docker/dev/hydrachain/docker-compose.yml: -------------------------------------------------------------------------------- 1 | bootstrap: 2 | build: node 3 | container_name: hydrachain_node_bootstrap 4 | links: 5 | - statsmon 6 | volumes: 7 | - ../../../../hydrachain:/hydrachain.src:ro 8 | - ../../../../pyethapp:/pyethapp.src:ro 9 | ports: 10 | - "127.0.0.1:4000:4000" 11 | - "127.0.0.1:30303:30303" 12 | - "127.0.0.1:30303:30303/udp" 13 | environment: 14 | - HYDRACHAIN_HOST_PREFIX 15 | 16 | node: 17 | build: node 18 | links: 19 | - statsmon 20 | volumes: 21 | - ../../../../hydrachain:/hydrachain.src:ro 22 | - ../../../../pyethapp:/pyethapp.src:ro 23 | environment: 24 | - HYDRACHAIN_HOST_PREFIX 25 | 26 | statsmon: 27 | build: statsmon 28 | container_name: hydrachain_statsmon 29 | environment: 30 | - WS_SECRET=eth-net-stats-has-a-secret 31 | ports: 32 | - "3000:3000" 33 | -------------------------------------------------------------------------------- /docker/dev/hydrachain/node/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7.9 2 | 3 | RUN apt-get update &&\ 4 | apt-get install -y curl git-core &&\ 5 | curl -sL https://deb.nodesource.com/setup | bash - &&\ 6 | apt-get update &&\ 7 | apt-get install -y nodejs 8 | 9 | 10 | RUN apt-get update &&\ 11 | apt-get install -y build-essential libgmp-dev rsync 12 | 13 | RUN cd /root &&\ 14 | git clone https://github.com/cubedro/eth-net-intelligence-api &&\ 15 | cd eth-net-intelligence-api &&\ 16 | npm install &&\ 17 | npm install -g pm2 18 | 19 | RUN pip install -U setuptools 20 | RUN pip install -U pip 21 | 22 | WORKDIR / 23 | RUN git clone https://github.com/HydraChain/hydrachain 24 | WORKDIR /hydrachain 25 | 26 | RUN python setup.py install 27 | 28 | WORKDIR / 29 | 30 | ADD start.sh /root/start.sh 31 | ADD app.json /root/eth-net-intelligence-api/app.json 32 | ADD mk_enode.py /root/mk_enode.py 33 | ADD settle_file.py /root/settle_file.py 34 | 35 | RUN chmod +x /root/start.sh 36 | RUN chmod +x /root/mk_enode.py 37 | RUN chmod +x /root/settle_file.py 38 | 39 | ENTRYPOINT /root/start.sh 40 | -------------------------------------------------------------------------------- /docker/dev/hydrachain/node/app.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name" : "node-app", 4 | "script" : "app.js", 5 | "log_date_format" : "YYYY-MM-DD HH:mm Z", 6 | "merge_logs" : false, 7 | "watch" : false, 8 | "max_restarts" : 10, 9 | "exec_interpreter" : "node", 10 | "exec_mode" : "fork_mode", 11 | "env": 12 | { 13 | "NODE_ENV" : "production", 14 | "RPC_HOST" : "localhost", 15 | "RPC_PORT" : "4000", 16 | "LISTENING_PORT" : "30303", 17 | "INSTANCE_NAME" : "XXX", 18 | "CONTACT_DETAILS" : "", 19 | "WS_SERVER" : "ws://statsmon:3000", 20 | "WS_SECRET" : "eth-net-stats-has-a-secret", 21 | "VERBOSITY" : 2 22 | } 23 | } 24 | ] 25 | -------------------------------------------------------------------------------- /docker/dev/hydrachain/node/mk_enode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import click 4 | from pyethapp.accounts import mk_privkey 5 | from devp2p.crypto import privtopub as privtopub_raw 6 | from devp2p.utils import host_port_pubkey_to_uri 7 | 8 | 9 | @click.command() 10 | @click.option('-h', '--host', default="localhost") 11 | @click.option('-p', '--port', default=30303) 12 | @click.argument('seed', type=int) 13 | @click.argument('node-num', type=int) 14 | def mk_enode(seed, node_num, host, port): 15 | print( 16 | host_port_pubkey_to_uri( 17 | host, 18 | port, 19 | privtopub_raw( 20 | mk_privkey( 21 | '%d:udp:%d' % (seed, node_num))))) 22 | 23 | if __name__ == '__main__': 24 | mk_enode() 25 | -------------------------------------------------------------------------------- /docker/dev/hydrachain/node/settle_file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import hashlib 3 | 4 | import time 5 | 6 | import click 7 | import sys 8 | 9 | 10 | @click.command() 11 | @click.option("-s", "--settle-duration", default=5.0, show_default=True, 12 | help="Duration without change after which file is considered settled.") 13 | @click.option("-t", "--timeout", default=60, show_default=True, 14 | help="Total timeout after which to abort waiting for file to settle") 15 | @click.argument("filename", type=click.Path(exists=True, dir_okay=False)) 16 | def main(settle_duration, timeout, filename): 17 | window = start = time.time() 18 | last_sum = "" 19 | while time.time() - window < settle_duration: 20 | with open(filename) as file_: 21 | file_sum = hashlib.sha1(file_.read()).hexdigest() 22 | if file_sum != last_sum: 23 | window = time.time() 24 | last_sum = file_sum 25 | time.sleep(.2) 26 | 27 | if time.time() - start > timeout: 28 | print("File '{}' hasn't settled after {} s. Aborting.".format(filename, timeout)) 29 | sys.exit(1) 30 | 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /docker/dev/hydrachain/node/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Wait for all nodes to come up 5 | /root/settle_file.py /etc/hosts 6 | 7 | cat /etc/hosts 8 | 9 | SEED=${SEED:-23} 10 | HOST_BASE_NAME=${HYDRACHAIN_HOST_PREFIX:-hydrachain}_node_ 11 | BOOTSTRAP_NODE_NAME=${HOST_BASE_NAME}bootstrap 12 | 13 | if ! grep -q $BOOTSTRAP_NODE_NAME /etc/hosts ; then 14 | echo "No bootstrap node found. Aborting." 15 | exit 1 16 | fi 17 | 18 | OWN_IP=$(ip -o -4 addr show | awk -F '[ /]+' '/global/ {print $4}') 19 | OWN_INDEX=$(egrep "$OWN_IP\s+${HOST_BASE_NAME}" /etc/hosts | grep -v bridge | sed -r "s/^.*${HOST_BASE_NAME}//") 20 | OWN_NAME=${HOST_BASE_NAME}${OWN_INDEX} 21 | NODE_COUNT=$(grep ${HOST_BASE_NAME} /etc/hosts | grep -v bridge | sed -r "s/^.*${HOST_BASE_NAME}//" | sort -n | tail -n1) 22 | # Increment node count to account for bootstrap node 23 | NODE_COUNT=$((NODE_COUNT+1)) 24 | BOOTSTRAP_NODE=$(/root/mk_enode.py --host ${BOOTSTRAP_NODE_NAME} ${SEED} 0) 25 | 26 | if [ ${OWN_NAME} == ${BOOTSTRAP_NODE_NAME} ]; then 27 | OWN_INDEX=0 28 | fi 29 | 30 | (set -o posix; set) 31 | 32 | cd /root/eth-net-intelligence-api 33 | perl -pi -e "s/XXX/${OWN_NAME}/g" app.json 34 | /usr/bin/pm2 start ./app.json 35 | 36 | if [ -f /pyethapp.src/setup.py ]; then 37 | pip uninstall -y pyethapp 38 | 39 | rsync -a --delete /pyethapp.src/* /pyethapp/ 40 | cd /pyethapp 41 | pip install -e . 42 | fi 43 | 44 | rsync -a --delete /hydrachain.src/* /hydrachain/ 45 | cd /hydrachain 46 | pip install -e . 47 | 48 | echo /usr/local/bin/hydrachain --bootstrap_node "$BOOTSTRAP_NODE" -l:debug -c jsonrpc.listen_host=0.0.0.0 "$@" runlocal --num_validators ${NODE_COUNT} --node_num ${OWN_INDEX} --seed ${SEED} 49 | /usr/local/bin/hydrachain --bootstrap_node "$BOOTSTRAP_NODE" -l:debug -c jsonrpc.listen_host=0.0.0.0 "$@" runlocal --num_validators ${NODE_COUNT} --node_num ${OWN_INDEX} --seed ${SEED} 50 | -------------------------------------------------------------------------------- /docker/dev/hydrachain/statsmon/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM iojs 2 | 3 | RUN git clone https://github.com/cubedro/eth-netstats 4 | RUN cd /eth-netstats && npm install 5 | RUN cd /eth-netstats && npm install -g grunt-cli 6 | RUN cd /eth-netstats && grunt 7 | 8 | WORKDIR /eth-netstats 9 | 10 | CMD ["npm", "start"] 11 | -------------------------------------------------------------------------------- /examples: -------------------------------------------------------------------------------- 1 | hydrachain/examples -------------------------------------------------------------------------------- /hc_consensus_explained.md: -------------------------------------------------------------------------------- 1 | HC Consensus 2 | ------------ 3 | 4 | HC Consensus is a byzantine fault tolerant protocol to coordinate consensus on the order of transactions in blockchain systems. 5 | 6 | **Features:** 7 | - Finality, no state re-organisation 8 | - Low overhead in normal operation 9 | 10 | The protocol relies on a set of validators of which no more than 1/3 must be byzantine. 11 | 12 | At each block height one or more rounds are used to agree on a proposed block for the height. The proposer of a block for each height and round is deterministically round robin chosen from the set of validators. New rounds can only be started once +2/3 nodes voted on the last round, which keeps the distributed system in sync. 13 | 14 | Normal operation comes with low overhead, as proposed blocks come with the quorum of signatures on the block of the last height. 15 | 16 | propose H -> commit H-1 -> vote H 17 | 18 | 19 | **Notation** 20 | 21 | - `H,R`: block height, round 22 | - `B`: block 23 | - `+1/n`: more than 1/nth (votes) 24 | 25 | **Round:** 26 | 27 | The sequence `propose H -> vote H` is called a round `R`. There may be more than one round required to commit a block at a given height `H`. A node moves to the next round if it either has received a proposal or received about +2/3 votes in the current round. If nodes don't receive a proposal within a timeout, they send a vote which either repeats their last vote or indicates a timeout. 28 | 29 | **Votes/Locks:** 30 | 31 | Votes are signed messages sent by validators, which sign: (H, R, [B]) 32 | 33 | There are two kind of Votes: 34 | 35 | - Lock(H, R, B) 36 | represents the vote of a validator for a block at H,R who is locked on that block in H,R 37 | - NotLocked(H, R) 38 | represents the vote that a validator is not locked on a block in H and promised to not lock in H,R 39 | 40 | Validators send their vote every round, either in response to a message from the proposer (Lock) or if they timeout (NotLocked or Lock). 41 | 42 | Validators must exactly send one vote per round. 43 | 44 | **LockSet(R):** 45 | 46 | A valid LockSet(R) is a collection of at least +2/3 of the eligible votes in round R 47 | 48 | Each validators collects their own LockSet based on received votes. A valid LockSet must contain +2/3 of eligible votes. Votes can either be Locked on a block or NotLocked (in case of a timeout). Valid locksets allows nodes to move to the next round. 49 | 50 | Proposals at R>R0 contain a LockSet, which proves that +2/3 of nodes voted in the last round. Proposed blocks B(H, R0) must contain a Quorum on a B(H-1, R). This LockSet allows to commit B(H-1, R) and also proves that +2/3 voted in H-1,R. 51 | 52 | There are three kinds of LockSets: 53 | 54 | - Quorum: has +2/3 of all eligible votes voting for the same block 55 | - QuorumPossible: has +1/3 of all eligible votes voting for the same block. 56 | - NoQuorum: has at most -1/3 of all eligible votes on a block (which all could be byzantine) 57 | 58 | **VoteInstruction(R, B):** 59 | 60 | A proposal in R>R0 which includes a QuorumPossible(R-1) and instructs nodes to vote for the block which already has at least one non-byzantine vote. 61 | 62 | 63 | Consensus Protocol: 64 | ------------------- 65 | In order to agree on a block for the next height of the blockchain a round-based protocol is run to determine the next block. Each round is composed of at least two steps (Propose and Vote). 66 | 67 | A round was successful if there is a Quroum for a proposed block. 68 | 69 | In normal operation, the order of steps of a validator at height H is: 70 | 71 | 1. receive proposal B(H,R0) which includes Quorum on B(H-1,R) 72 | 2. commit B(H-1, R) 73 | 3. vote B(H,R0) 74 | 75 | Note: Commits for H-1 are implicit, since proposals in H must contain a Quorum for a block on H-1. 76 | 77 | As nodes also maintain their own LockSet, they can commit as soon as they've seen a Quorum. This is usually before they receive a proposal for a new block height. Thus most of the times the order looks like: 78 | 79 | 1. receive proposal B(H,R0) 80 | 2. vote B(H,R0) 81 | 3. intercept Quorum B(H, R0) 82 | 4. commit B(H,R0) 83 | 84 | This async committing is safe, because if there ever was a Quorum-LockSet, the proposer will at least have learned (and must include) a QuorumPossible which will lead to a consensus for that block. 85 | 86 | In order to avoid votes for conflicting proposals, eligible voters in each round lock by sending a vote, which is either: 87 | 88 | - Locked(H, R, B) if they voted for a block 89 | - NotLocked(H, R) if they did not yet vote on a block 90 | 91 | As long as a node is locked it must not vote for another block on H. A node is only allowed to unlock and vote for a different block if either 92 | 93 | - it is NotLocked (i.e. it had not seen a valid proposal and timed out in the previous round) or 94 | - it received a VoteInstruction (which proves that +1/3 of the nodes are already locked on a different block) 95 | 96 | 97 | Nodes must vote in every round (e.g. repeating older votes but signed for the new round) 98 | 99 | If the desigated proposer has LockSet(R-1) which is not a Quorum it must 100 | if: 101 | 102 | - NoQuorum: propose a new block 103 | - QuorumPossible: broadcast a VoteInstruction message referencing the block 104 | 105 | 106 | In both cases the messages contains the LockSet(R-1), which allow the 107 | nodes to eventually unlock and vote again (either on a block proposed in R or proposed in R-n and instructed to vote in R). 108 | 109 | 110 | **NewRound(H, R):** 111 | 112 | A node moves to the next round R+1 if it either has received and voted for a valid proposal in R or received +2/3 votes in R. 113 | A timeout is scheduled at the beginning of each round and triggered if no proposal was received within timeout. Higher rounds have a higher timeout. timeout(R) = t_base * t_inc ^ R 114 | 115 | **Propose(H, R):** 116 | 117 | The proposer of the block at H,R is selected round robin from the set of validators. Only one validator must propose exactly one block at H,R. 118 | 119 | The proposer must collected at least +2/3 votes in R-1 in order to have a valid LockSet(R-1). 120 | 121 | If there is a QuorumPossible the proposer broadcasts a VoteInstruction(R, B). 122 | 123 | Otherwise it broadcasts a new proposal, which is for a new block height if it knows a Quorum. 124 | 125 | Valid new proposals for H, R0 are blocks that 126 | 127 | - contain a Quorum-LockSet on a block H-1 128 | - describe a valid state change from H-1 > H 129 | 130 | Valid new proposals for H, R > R0 are blocks that additionally 131 | 132 | - include a NoQuorum(R-1) 133 | 134 | 135 | A proposal is broadcasted as soon, as there is either a LockSet from the last round which can be one of: 136 | 137 | - Quorum: consensus 138 | -> broadcast new block B(H, R0) 139 | - NoQuorum: at most -1/3 of all votes locked on the same block 140 | -> broadcast new block B(H, R) 141 | - QuorumPossible: +1/3 of all votes locked on a block B(H, Rn < R) 142 | -> broadcast VoteInstruction B(H, Rn < R) 143 | 144 | 145 | **Commit(H, R):** 146 | 147 | Is entered by a node whenever it learns about a Quorum(H,R) for the first time. This can be by receiving a LockSet within a proposal or by having collected enough votes in its local LockSet. 148 | 149 | If the parent of block is unknown, the node goes into sync mode and request the missing block. 150 | 151 | If the parent block is available: 152 | 153 | - Commit the block referenced by the Quorum 154 | - unlock 155 | - implicitly move to new height H+1 156 | 157 | **On Timeout(H, R):** 158 | 159 | - If validator is locked on a block B from a previous round, it 160 | broadcasts Lock(H, R, B) 161 | - If validator is not locked, it broadcasts NotLocked(H, R) 162 | - timeout for the current height is increased 163 | 164 | **Vote(H, R):** 165 | 166 | Case: Validator receives a valid proposal B for H, R. 167 | 168 | - If locked on a block: unlock 169 | - lock on the new proposal 170 | - broadcasts a new Lock(H, R, B) 171 | 172 | Case: Validator receives an invalid proposal for H, R 173 | 174 | - If locked, broadcast Locked(H, R, B) 175 | - If not locked, broadcast NotLocked(H, R) 176 | 177 | Case: Validator receives a valid VoteInstruction(H, R, B) 178 | 179 | - If locked on a block it unlocks 180 | - lock on B from the VoteInstruction 181 | - broadcast a Lock(H, R, B) 182 | 183 | Valid Proposals/VoteInstructions must be signed by the designated proposer of H,R. They must come with a valid LockSet(R-1) for R>0. 184 | 185 | The state transitions of proposed blocks are validated before a vote is given. 186 | 187 | Invalid proposals are ignored. 188 | 189 | 190 | 191 | Proof of Safety 192 | --------------- 193 | 194 | Assumptions: 195 | 196 | - there are at most -1/3 byzantine nodes 197 | - no node does vote twice on a height 198 | 199 | If a validator commits block B(H, R), it's because it saw a Quorum with +2/3 of votes for it at round R. This implies, that no proposer can propose a new valid block B(H, R+1) since it had to include a NoQuorum LockSet from R in order to prove, that no more than -1/3 nodes were locked on the same block in R. This is not possible. 200 | Therefore on H the at least +1/3 honest nodes will never vote for a different block and so no other block can reach a Quorum. 201 | 202 | Proof of Liveness 203 | ----------------- 204 | 205 | If +1/3 honest validators are locked on two different blocks from different rounds, a proposers' NoQuorum- or QuorumPossible-LockSet will eventually cause nodes locked from the earlier round to unlock. 206 | 207 | As timeout length increases with every new round, while the size of blocks and the LockSet are capped, the network will eventually be able to transport the whole proposal in time. 208 | 209 | Notes: 210 | ------ 211 | 212 | The protocol is initially inspired by the [Tendermint Byzantine Consensus algorithm](https://github.com/tendermint/tendermint/wiki/Byzantine-Consensus-Algorithm). The main difference is the lower communication overhead of this algorithm during normal operation. 213 | 214 | -------------------------------------------------------------------------------- /hydrachain/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # ############# version ################## 3 | from pkg_resources import get_distribution, DistributionNotFound 4 | import os.path 5 | import subprocess 6 | import re 7 | 8 | 9 | GIT_DESCRIBE_RE = re.compile('^(?Pv\d+\.\d+\.\d+)-(?P\d+-g[a-fA-F0-9]+(?:-dirty)?)$') 10 | 11 | 12 | __version__ = None 13 | try: 14 | _dist = get_distribution('hydrachain') 15 | # Normalize case for Windows systems 16 | dist_loc = os.path.normcase(_dist.location) 17 | here = os.path.normcase(__file__) 18 | if not here.startswith(os.path.join(dist_loc, 'hydrachain')): 19 | # not installed, but there is another version that *is* 20 | raise DistributionNotFound 21 | __version__ = _dist.version 22 | except DistributionNotFound: 23 | pass 24 | 25 | if not __version__: 26 | try: 27 | rev = subprocess.check_output(['git', 'describe', '--tags', '--dirty'], 28 | stderr=subprocess.STDOUT) 29 | match = GIT_DESCRIBE_RE.match(rev) 30 | if match: 31 | __version__ = "{}+git-{}".format(match.group("version"), match.group("git")) 32 | except: 33 | pass 34 | 35 | if not __version__: 36 | __version__ = 'undefined' 37 | 38 | # ########### endversion ################## 39 | -------------------------------------------------------------------------------- /hydrachain/app.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | import signal 4 | import sys 5 | 6 | import click 7 | import ethereum.slogging as slogging 8 | import gevent 9 | import pyethapp.app as pyethapp_app 10 | import pyethapp.config as konfig 11 | from click.exceptions import BadParameter 12 | from click.types import IntRange 13 | from devp2p.app import BaseApp 14 | from devp2p.crypto import privtopub as privtopub_raw 15 | from devp2p.discovery import NodeDiscovery 16 | from devp2p.peermanager import PeerManager 17 | from devp2p.service import BaseService 18 | from devp2p.utils import host_port_pubkey_to_uri 19 | from ethereum.keys import privtoaddr, PBKDF2_CONSTANTS 20 | from ethereum.utils import denoms 21 | from gevent.event import Event 22 | from pyethapp.accounts import AccountsService, Account 23 | from pyethapp.accounts import mk_privkey 24 | from pyethapp.console_service import Console 25 | from pyethapp.db_service import DBService 26 | from pyethapp.jsonrpc import JSONRPCServer 27 | 28 | from hydrachain import __version__ 29 | from hydrachain.hdc_service import ChainService 30 | 31 | 32 | log = slogging.get_logger('app') 33 | 34 | 35 | services = [DBService, 36 | AccountsService, 37 | NodeDiscovery, 38 | PeerManager, 39 | ChainService, 40 | JSONRPCServer, 41 | Console] 42 | 43 | pyethapp_app.services = services 44 | 45 | 46 | class HPCApp(pyethapp_app.EthApp): 47 | client_name = 'HydraChain' 48 | client_version = '%s/%s/%s' % (__version__, sys.platform, 49 | 'py%d.%d.%d' % sys.version_info[:3]) 50 | client_version_string = '%s/v%s' % (client_name, client_version) 51 | default_config = dict(BaseApp.default_config) 52 | default_config['client_version_string'] = client_version_string 53 | default_config['post_app_start_callbacks'] = [] 54 | 55 | # option to easily specify some unlocked and funded accounts 56 | default_config['test_privkeys'] = [] 57 | default_config['test_privkeys_endowment'] = 1024 * denoms.ether 58 | 59 | 60 | pyethapp_app.EthApp = HPCApp 61 | pyethapp_app.app.help = b'Welcome to %s' % HPCApp.client_version_string 62 | 63 | 64 | # set morden profile 65 | for p in pyethapp_app.app.params: 66 | if p.name == 'profile': 67 | p.default = 'testnet' 68 | 69 | 70 | @pyethapp_app.app.command(help='run in a zero config default configuration') 71 | @click.option('num_validators', '--num_validators', '-v', multiple=False, 72 | type=int, default=3, help='number of validators') 73 | @click.option('node_num', '--node_num', '-n', multiple=False, 74 | type=int, default=0, help='the node_num') 75 | @click.option('seed', '--seed', '-s', multiple=False, 76 | type=int, default=42, help='the seed') 77 | @click.pass_context 78 | def rundummy(ctx, num_validators, node_num, seed): 79 | base_port = 29870 80 | 81 | # reduce key derivation iterations 82 | PBKDF2_CONSTANTS['c'] = 100 83 | 84 | config = ctx.obj['config'] 85 | 86 | config['discovery']['bootstrap_nodes'] = [get_bootstrap_node(seed, base_port)] 87 | 88 | config, account = _configure_node_network(config, num_validators, node_num, seed) 89 | 90 | # set ports based on node 91 | config['discovery']['listen_port'] = base_port + node_num 92 | config['p2p']['listen_port'] = base_port + node_num 93 | config['p2p']['min_peers'] = 2 94 | config['jsonrpc']['listen_port'] += node_num 95 | 96 | app = start_app(config, [account]) 97 | serve_until_stopped(app) 98 | 99 | 100 | @pyethapp_app.app.command(help='run multiple nodes in a zero config default configuration') 101 | @click.option('num_validators', '--num_validators', '-v', multiple=False, 102 | type=int, default=3, help='number of validators') 103 | @click.option('seed', '--seed', '-s', multiple=False, 104 | type=int, default=42, help='the seed') 105 | @click.pass_context 106 | def runmultiple(ctx, num_validators, seed): 107 | gevent.get_hub().SYSTEM_ERROR = BaseException 108 | base_port = 29870 109 | 110 | # reduce key derivation iterations 111 | PBKDF2_CONSTANTS['c'] = 100 112 | 113 | config = ctx.obj['config'] 114 | config['discovery']['bootstrap_nodes'] = [get_bootstrap_node(seed, base_port)] 115 | 116 | apps = [] 117 | for node_num in range(num_validators): 118 | n_config = copy.deepcopy(config) 119 | n_config, account = _configure_node_network(n_config, num_validators, node_num, seed) 120 | # set ports based on node 121 | n_config['discovery']['listen_port'] = base_port + node_num 122 | n_config['p2p']['listen_port'] = base_port + node_num 123 | n_config['p2p']['min_peers'] = min(10, num_validators - 1) 124 | n_config['p2p']['max_peers'] = num_validators * 2 125 | n_config['jsonrpc']['listen_port'] += node_num 126 | n_config['client_version_string'] = 'NODE{}'.format(node_num) 127 | 128 | # have multiple datadirs 129 | n_config['data_dir'] = os.path.join(n_config['data_dir'], str(node_num)) 130 | konfig.setup_data_dir(n_config['data_dir']) 131 | 132 | # activate ipython console for the first validator 133 | if node_num != 0: 134 | n_config['deactivated_services'].append(Console.name) 135 | # n_config['deactivated_services'].append(ChainService.name) 136 | app = start_app(n_config, [account]) 137 | apps.append(app) 138 | # hack to enable access to all apps in the console 139 | app.apps = apps 140 | serve_until_stopped(*apps) 141 | 142 | 143 | @pyethapp_app.app.command(help='run in a zero config default configuration') 144 | @click.option('num_validators', '--num_validators', '-v', multiple=False, 145 | type=IntRange(min=4), default=4, show_default=True, 146 | help='number of validators; min. 4') 147 | @click.option('node_num', '--node_num', '-n', multiple=False, 148 | type=int, default=0, help='the node_num') 149 | @click.option('seed', '--seed', '-s', multiple=False, 150 | type=int, default=42, help='the seed') 151 | @click.option('--nodial/--dial', default=False, help='do not dial nodes') 152 | @click.pass_context 153 | def runlocal(ctx, num_validators, node_num, seed, nodial): 154 | if not 0 <= node_num < num_validators: 155 | raise BadParameter("Node number must be between 0 and number of validators - 1") 156 | 157 | # reduce key derivation iterations 158 | config = ctx.obj['config'] 159 | config, account = _configure_node_network(config, num_validators, node_num, seed) 160 | 161 | config['p2p']['min_peers'] = 2 162 | 163 | if nodial: 164 | config['discovery']['bootstrap_nodes'] = [] 165 | config['p2p']['min_peers'] = 0 166 | 167 | app = start_app(config, [account]) 168 | serve_until_stopped(app) 169 | 170 | 171 | def _configure_node_network(config, num_validators, node_num, seed): 172 | assert node_num < num_validators 173 | 174 | # reduce key derivation iterations 175 | PBKDF2_CONSTANTS['c'] = 100 176 | 177 | # create this node priv_key 178 | config['node']['privkey_hex'] = mk_privkey('%d:udp:%d' % (seed, node_num)).encode('hex') 179 | 180 | # create validator addresses 181 | validators = [privtoaddr(mk_privkey('%d:account:%d' % (seed, i))) 182 | for i in range(num_validators)] 183 | config['hdc']['validators'] = validators 184 | 185 | # create this node account 186 | account = Account.new(password='', key=mk_privkey('%d:account:%d' % (seed, node_num))) 187 | assert account.address in validators 188 | return config, account 189 | 190 | 191 | def start_app(config, accounts): 192 | 193 | # create app 194 | app = HPCApp(config) 195 | 196 | # development mode 197 | if False: 198 | gevent.get_hub().SYSTEM_ERROR = BaseException 199 | 200 | if config['test_privkeys']: 201 | # init accounts first, as we need (and set by copy) the coinbase early FIXME 202 | genesis_config = dict(alloc=dict()) 203 | for privkey in config['test_privkeys']: 204 | assert len(privkey) == 32 205 | address = privtoaddr(privkey) 206 | account = Account.new(password='', key=privkey) 207 | accounts.append(account) 208 | # add to genesis alloc 209 | genesis_config['alloc'][address] = {'wei': config['test_privkeys_endowment']} 210 | 211 | if config['test_privkeys'] and config['eth'].get('genesis_hash'): 212 | del config['eth']['genesis_hash'] 213 | 214 | konfig.update_config_from_genesis_json(config, genesis_config) 215 | 216 | # dump config 217 | pyethapp_app.dump_config(config) 218 | 219 | if AccountsService in services: 220 | AccountsService.register_with_app(app) 221 | 222 | # add account 223 | for account in accounts: 224 | app.services.accounts.add_account(account, store=False) 225 | 226 | if config['hdc']['validators']: 227 | assert app.services.accounts.coinbase in config['hdc']['validators'] 228 | 229 | # register services 230 | for service in services: 231 | assert issubclass(service, BaseService) 232 | if service.name not in app.config['deactivated_services'] + [AccountsService.name]: 233 | assert service.name not in app.services 234 | service.register_with_app(app) 235 | assert hasattr(app.services, service.name) 236 | 237 | # start app 238 | log.info('starting') 239 | app.start() 240 | for cb in config['post_app_start_callbacks']: 241 | cb(app) 242 | return app 243 | 244 | 245 | def serve_until_stopped(*apps): 246 | # wait for interrupt 247 | evt = Event() 248 | gevent.signal(signal.SIGQUIT, evt.set) 249 | gevent.signal(signal.SIGTERM, evt.set) 250 | evt.wait() 251 | # finally stop 252 | for app in apps: 253 | app.stop() 254 | 255 | 256 | def get_bootstrap_node(seed, base_port=29870, host=b'0.0.0.0'): 257 | # create bootstrap node priv_key and enode 258 | bootstrap_node_privkey = mk_privkey('%d:udp:%d' % (seed, 0)) 259 | bootstrap_node_pubkey = privtopub_raw(bootstrap_node_privkey) 260 | return host_port_pubkey_to_uri(host, base_port, bootstrap_node_pubkey) 261 | 262 | 263 | def app(): 264 | pyethapp_app.app() 265 | 266 | if __name__ == '__main__': 267 | # python app.py 2>&1 | less +F 268 | app() 269 | -------------------------------------------------------------------------------- /hydrachain/consensus/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HydraChain/hydrachain/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/__init__.py -------------------------------------------------------------------------------- /hydrachain/consensus/base.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015 Heiko Hees 2 | 3 | from collections import Counter 4 | 5 | from bitcoin.main import encode_privkey 6 | from ethereum.blocks import Block 7 | from ethereum.utils import big_endian_to_int, zpad, int_to_32bytearray 8 | from bitcoin import encode_pubkey, N, P 9 | import rlp 10 | from rlp.sedes import big_endian_int, binary 11 | from rlp.sedes import CountableList 12 | from rlp.utils import encode_hex 13 | from ethereum.blocks import BlockHeader 14 | from ethereum.transactions import Transaction 15 | from secp256k1 import PrivateKey, PublicKey, ALL_FLAGS 16 | 17 | from hydrachain.utils import sha3, phx 18 | 19 | 20 | def ishash(h): 21 | return isinstance(h, bytes) and len(h) == 32 22 | 23 | 24 | def isaddress(a): 25 | return isinstance(a, bytes) and len(a) == 20 26 | 27 | 28 | class InvalidSignature(Exception): 29 | pass 30 | 31 | 32 | class RLPHashable(rlp.Serializable): 33 | 34 | @property 35 | def hash(self): 36 | return sha3(rlp.encode(self)) 37 | 38 | def __eq__(self, other): 39 | return isinstance(other, self.__class__) and self.hash == other.hash 40 | 41 | def __hash__(self): 42 | return big_endian_to_int(self.hash) 43 | 44 | def __ne__(self, other): 45 | return not self.__eq__(other) 46 | 47 | def __repr__(self): 48 | try: 49 | return '<%s(%s)>' % (self.__class__.__name__, encode_hex(self.hash)[:4]) 50 | except: 51 | return '<%s>' % (self.__class__.__name__) 52 | 53 | 54 | class MissingSignatureError(Exception): 55 | pass 56 | 57 | 58 | class Signed(RLPHashable): 59 | 60 | fields = [ 61 | ('v', big_endian_int), 62 | ('r', big_endian_int), 63 | ('s', big_endian_int), 64 | ] 65 | 66 | _sender = None 67 | 68 | def __init__(self, *args, **kargs): 69 | super(Signed, self).__init__(*args, **kargs) 70 | 71 | def sign(self, privkey): 72 | """Sign this with a private key""" 73 | if self.v: 74 | raise InvalidSignature("already signed") 75 | 76 | if privkey in (0, '', '\x00' * 32): 77 | raise InvalidSignature("Zero privkey cannot sign") 78 | rawhash = sha3(rlp.encode(self, self.__class__.exclude(['v', 'r', 's']))) 79 | 80 | if len(privkey) == 64: 81 | privkey = encode_privkey(privkey, 'bin') 82 | 83 | pk = PrivateKey(privkey, raw=True) 84 | signature = pk.ecdsa_recoverable_serialize(pk.ecdsa_sign_recoverable(rawhash, raw=True)) 85 | 86 | signature = signature[0] + chr(signature[1]) 87 | 88 | self.v = ord(signature[64]) + 27 89 | self.r = big_endian_to_int(signature[0:32]) 90 | self.s = big_endian_to_int(signature[32:64]) 91 | 92 | self._sender = None 93 | return self 94 | 95 | @property 96 | def sender(self): 97 | if not self._sender: 98 | self._sender = self.recover_sender() 99 | return self._sender 100 | 101 | def recover_sender(self): 102 | if self.v: 103 | if self.r >= N or self.s >= P or self.v < 27 or self.v > 28 \ 104 | or self.r == 0 or self.s == 0: 105 | raise InvalidSignature() 106 | rlpdata = rlp.encode(self, self.__class__.exclude(['v', 'r', 's'])) 107 | rawhash = sha3(rlpdata) 108 | pk = PublicKey(flags=ALL_FLAGS) 109 | try: 110 | pk.public_key = pk.ecdsa_recover( 111 | rawhash, 112 | pk.ecdsa_recoverable_deserialize( 113 | zpad( 114 | "".join(chr(c) for c in int_to_32bytearray(self.r)), 115 | 32 116 | ) + zpad( 117 | "".join(chr(c) for c in int_to_32bytearray(self.s)), 118 | 32 119 | ), 120 | self.v - 27 121 | ), 122 | raw=True 123 | ) 124 | pub = pk.serialize(compressed=False) 125 | except Exception: 126 | raise InvalidSignature() 127 | if pub[1:] == "\x00" * 32: 128 | raise InvalidSignature() 129 | pub = encode_pubkey(pub, 'bin') 130 | return sha3(pub[1:])[-20:] 131 | 132 | @property 133 | def hash(self): 134 | "signatures are non deterministic" 135 | if self.sender is None: 136 | raise MissingSignatureError() 137 | 138 | class HashSerializable(rlp.Serializable): 139 | fields = [(field, sedes) for field, sedes in self.fields 140 | if field not in ('v', 'r', 's')] + [('_sender', binary)] 141 | _sedes = None 142 | return sha3(rlp.encode(self, HashSerializable)) 143 | 144 | # Votes 145 | 146 | 147 | class Vote(Signed): 148 | 149 | """A signed Vote""" 150 | 151 | fields = [ 152 | ('height', big_endian_int), 153 | ('round', big_endian_int), 154 | ('blockhash', binary), 155 | ] + Signed.fields 156 | 157 | def __init__(self, height, round, blockhash='', v=0, r=0, s=0): 158 | super(Vote, self).__init__(height, round, blockhash, v=v, r=r, s=s) 159 | 160 | # restore class when deserialized 161 | if blockhash: 162 | assert ishash(blockhash) 163 | self.blockhash = blockhash 164 | self.__class__ = VoteBlock 165 | else: 166 | self.__class__ = VoteNil 167 | 168 | def __repr__(self): 169 | return '<%s(S:%s BH:%s)>' % (self.__class__.__name__, 170 | phx(self.sender), phx(self.blockhash)) 171 | 172 | @property 173 | def hr(self): 174 | return self.height, self.round 175 | 176 | 177 | class VoteBlock(Vote): 178 | pass 179 | 180 | 181 | class VoteNil(Vote): 182 | pass 183 | 184 | 185 | # LockSets 186 | 187 | class InvalidVoteError(Exception): 188 | pass 189 | 190 | 191 | class DoubleVotingError(InvalidVoteError): 192 | pass 193 | 194 | 195 | class LockSet(RLPHashable): # careful, is mutable! 196 | 197 | fields = [ 198 | ('num_eligible_votes', big_endian_int), 199 | ('votes', CountableList(Vote)) 200 | ] 201 | 202 | processed = False 203 | 204 | def __init__(self, num_eligible_votes, votes=None): 205 | self.num_eligible_votes = num_eligible_votes 206 | self.votes = [] 207 | for v in votes or []: 208 | self.add(v) 209 | 210 | # @property 211 | # def size(self): 212 | # return len(self.votes) * 67 + 5 213 | 214 | def copy(self): 215 | return LockSet(self.num_eligible_votes, self.votes) 216 | 217 | @property 218 | def state(self): 219 | if not self.is_valid: 220 | s = 'I' 221 | elif self.has_quorum: 222 | s = 'Q' 223 | elif self.has_quorum_possible: 224 | s = 'P' 225 | elif self.has_noquorum: 226 | s = 'N' 227 | else: 228 | raise Exception('no valid state') 229 | return '%s:%d' % (s, len(self)) 230 | 231 | def __repr__(self): 232 | if self.votes: 233 | return '' % (self.state, self.height, self.round) 234 | return '' 235 | 236 | def add(self, vote, force_replace=False): 237 | assert isinstance(vote, Vote) 238 | if not vote.sender: 239 | raise InvalidVoteError('no signature') 240 | if vote not in self.votes: 241 | if len(self) and self.hr != vote.hr: 242 | raise InvalidVoteError('inconsistent height, round') 243 | signee = self.signee 244 | if vote.sender in signee: 245 | if not force_replace: 246 | raise DoubleVotingError(vote.sender) # different votes on the same H,R 247 | self.votes.remove(self.votes[signee.index(vote.sender)]) 248 | self.votes.append(vote) 249 | return True 250 | 251 | def __len__(self): 252 | return len(self.votes) 253 | 254 | def __iter__(self): 255 | return iter(self.votes) 256 | 257 | @property 258 | def signee(self): 259 | return [v.sender for v in self.votes] 260 | 261 | def blockhashes(self): 262 | assert self.is_valid 263 | c = Counter(v.blockhash for v in self.votes if isinstance(v, VoteBlock)) 264 | # deterministc sort necessary 265 | return sorted(c.most_common(), cmp=lambda a, b: cmp((b[1], b[0]), (a[1], a[0]))) 266 | 267 | @property 268 | def hr(self): 269 | """compute (height,round) 270 | We might have multiple rounds before we see consensus for a certain height. 271 | If everything is good, round should always be 0. 272 | """ 273 | assert len(self), 'no votes, can not determine height' 274 | h = set([(v.height, v.round) for v in self.votes]) 275 | assert len(h) == 1, len(h) 276 | return h.pop() 277 | 278 | height = property(lambda self: self.hr[0]) 279 | round = property(lambda self: self.hr[1]) 280 | 281 | @property 282 | def is_valid(self): 283 | return len(self) > 2 / 3. * self.num_eligible_votes and self.hr 284 | 285 | @property 286 | def has_quorum(self): 287 | """ 288 | we've seen +2/3 of all eligible votes voting for one block. 289 | there is a quorum. 290 | """ 291 | assert self.is_valid 292 | bhs = self.blockhashes() 293 | if bhs and bhs[0][1] > 2 / 3. * self.num_eligible_votes: 294 | return bhs[0][0] 295 | 296 | @property 297 | def has_noquorum(self): 298 | """ 299 | less than 1/3 of the known votes are on the same block 300 | """ 301 | assert self.is_valid 302 | bhs = self.blockhashes() 303 | if not bhs or bhs[0][1] <= 1 / 3. * self.num_eligible_votes: 304 | assert not self.has_quorum_possible 305 | return True 306 | 307 | @property 308 | def has_quorum_possible(self): 309 | """ 310 | we've seen +1/3 of all eligible votes voting for one block. 311 | at least one vote was from a honest node. 312 | we can assume that this block is agreeable. 313 | """ 314 | if self.has_quorum: 315 | return 316 | assert self.is_valid # we could tell that earlier 317 | bhs = self.blockhashes() 318 | if bhs and bhs[0][1] > 1 / 3. * self.num_eligible_votes: 319 | return bhs[0][0] 320 | 321 | def check(self): 322 | "either invalid or one of quorum, noquorum, quorumpossible" 323 | if not self.is_valid: 324 | return True 325 | test = (self.has_quorum, self.has_quorum_possible, self.has_noquorum) 326 | assert 1 == len([x for x in test if x is not None]) 327 | return True 328 | 329 | ############ 330 | 331 | 332 | def genesis_signing_lockset(genesis, privkey): 333 | """ 334 | in order to avoid a complicated bootstrapping, we define 335 | the genesis_signing_lockset as a lockset with one vote by any validator. 336 | """ 337 | v = VoteBlock(0, 0, genesis.hash) 338 | v.sign(privkey) 339 | ls = LockSet(num_eligible_votes=1) 340 | ls.add(v) 341 | assert ls.has_quorum 342 | return ls 343 | 344 | 345 | ######## 346 | 347 | class Ready(Signed): 348 | 349 | """ 350 | Used to sync during the startup sequence 351 | """ 352 | fields = [ 353 | ('nonce', big_endian_int), 354 | ('current_lockset', LockSet) 355 | ] + Signed.fields 356 | 357 | def __init__(self, nonce, current_lockset, v=0, r=0, s=0): 358 | super(Ready, self).__init__(nonce, current_lockset, v, r, s) 359 | 360 | def __repr__(self): 361 | return ''.format(self.nonce) 362 | 363 | 364 | class InvalidProposalError(Exception): 365 | pass 366 | 367 | 368 | class HDCBlockHeader(BlockHeader): 369 | 370 | def check_pow(self, nonce=None): 371 | return True 372 | 373 | 374 | class HDCBlock(Block): 375 | pass 376 | 377 | 378 | class TransientBlock(rlp.Serializable): 379 | 380 | """A partially decoded, unvalidated block.""" 381 | 382 | fields = [ 383 | ('header', HDCBlockHeader), 384 | ('transaction_list', rlp.sedes.CountableList(Transaction)), 385 | ('uncles', rlp.sedes.CountableList(BlockHeader)) 386 | ] 387 | 388 | def __init__(self, header, transaction_list, uncles): 389 | self.header = header 390 | self.transaction_list = transaction_list 391 | self.uncles = uncles 392 | 393 | def to_block(self, env, parent=None): 394 | """Convert the transient block to a :class:`ethereum.blocks.Block`""" 395 | return Block(self.header, self.transaction_list, self.uncles, env=env, parent=parent) 396 | 397 | @property 398 | def hash(self): 399 | """The binary block hash 400 | This is equivalent to ``header.hash``. 401 | """ 402 | return sha3(rlp.encode(self.header)) 403 | 404 | @property 405 | def number(self): 406 | return self.header.number 407 | 408 | @property 409 | def prevhash(self): 410 | return self.header.prevhash 411 | 412 | 413 | class Proposal(Signed): 414 | pass 415 | 416 | 417 | class BlockProposal(Proposal): 418 | 419 | fields = [ 420 | ('height', big_endian_int), 421 | ('round', big_endian_int), 422 | ('block', TransientBlock), 423 | ('signing_lockset', LockSet), 424 | ('round_lockset', LockSet) 425 | ] + Signed.fields 426 | 427 | def __init__(self, height, round, block, signing_lockset, 428 | round_lockset=None, v=0, r=0, s=0): 429 | """ 430 | if round == 0 the signing_lockset also proves, 431 | that proposal is eligible and we need not round_lockset 432 | """ 433 | assert isinstance(block, (Block, TransientBlock)) 434 | assert isinstance(signing_lockset, LockSet) 435 | assert round_lockset is None or isinstance(round_lockset, LockSet) 436 | assert round >= 0 437 | assert height > 0 438 | if round > 0 and not round_lockset: 439 | raise InvalidProposalError('R>0 needs a round lockset') 440 | if round == 0 and round_lockset: 441 | raise InvalidProposalError('R0 must not have a round lockset') 442 | self.height = height 443 | self.round = round 444 | self.block = block 445 | self.signing_lockset = signing_lockset 446 | self.round_lockset = round_lockset or LockSet(0) 447 | 448 | super(BlockProposal, self).__init__(height, round, block, signing_lockset, 449 | self.round_lockset, v, r, s) 450 | 451 | if block.header.number != self.height: 452 | raise InvalidProposalError('lockset.height / block.number mismatch') 453 | if self.round_lockset and height != self.round_lockset.height: 454 | raise InvalidProposalError('height mismatch') 455 | if not (round > 0 or self.lockset.has_quorum): 456 | raise InvalidProposalError('R0 lockset == signing lockset needs quorum') 457 | if not (round > 0 or self.lockset.height == block.header.number - 1): 458 | raise InvalidProposalError('R0 round lockset must be from previous height') 459 | if not (round == 0 or round == self.lockset.round + 1): 460 | raise InvalidProposalError('Rn round lockset must be from previous round') 461 | if not self.signing_lockset.has_quorum: 462 | raise InvalidProposalError('signing lockset needs quorum') 463 | if not (self.signing_lockset.height == self.height - 1): 464 | raise InvalidProposalError('signing lockset height mismatch') 465 | if self.round_lockset and not round_lockset.has_noquorum: 466 | raise InvalidProposalError('at R>0 can only propose if there is a NoQuorum for R-1') 467 | 468 | self.rawhash = sha3(rlp.encode(self, self.__class__.exclude(['v', 'r', 's']))) 469 | if self.v: # validate sender == block.coinbase 470 | assert self.sender 471 | 472 | @property 473 | def lockset(self): 474 | return self.round_lockset or self.signing_lockset 475 | 476 | @property 477 | def sender(self): 478 | # double check unmutable 479 | s = super(BlockProposal, self).sender 480 | if not s: 481 | raise InvalidProposalError('signature missing') 482 | assert self.rawhash 483 | assert self.v 484 | _rawhash = sha3(rlp.encode(self, self.__class__.exclude(['v', 'r', 's']))) 485 | assert self.rawhash == _rawhash 486 | assert len(s) == 20 487 | assert len(self.block.header.coinbase) == 20 488 | if s != self.block.header.coinbase: 489 | raise InvalidProposalError('signature does not match coinbase') 490 | return s 491 | 492 | def sign(self, privkey): 493 | super(BlockProposal, self).sign(privkey) 494 | if self.sender != self.block.header.coinbase: 495 | raise InvalidProposalError('signature does not match coinbase') 496 | 497 | def validate_votes(self, validators_H, validators_prevH): 498 | "set of validators may change between heights" 499 | assert self.sender 500 | 501 | def check(lockset, validators): 502 | if not lockset.num_eligible_votes == len(validators): 503 | raise InvalidProposalError('lockset num_eligible_votes mismatch') 504 | for v in lockset: 505 | if v.sender not in validators: 506 | raise InvalidProposalError('invalid signer') 507 | if self.round_lockset: 508 | check(self.round_lockset, validators_H) 509 | check(self.signing_lockset, validators_prevH) 510 | 511 | return True 512 | 513 | def __repr__(self): 514 | return "<%s S:%r H:%d R:%d BH:%s>" % (self.__class__.__name__, phx(self._sender), 515 | self.height, self.round, phx(self.blockhash)) 516 | 517 | @property 518 | def blockhash(self): 519 | return self.block.hash 520 | 521 | 522 | class VotingInstruction(Proposal): 523 | 524 | fields = [ 525 | ('height', big_endian_int), 526 | ('round', big_endian_int), 527 | ('round_lockset', LockSet) 528 | ] + Signed.fields 529 | 530 | def __init__(self, height, round, round_lockset, v=0, r=0, s=0): 531 | super(VotingInstruction, self).__init__(height, round, round_lockset, v, r, s) 532 | if not round > 0: 533 | raise InvalidProposalError('VotingInstructions must have R>0') 534 | if not self.lockset.has_quorum_possible: 535 | raise InvalidProposalError('VotingInstruction requires quorum possible') 536 | if not (round == self.lockset.round + 1): 537 | raise InvalidProposalError('Rn round lockset must be from previous round') 538 | if not (height == self.lockset.height): 539 | raise InvalidProposalError('height mismatch') 540 | if not round > 0: 541 | raise InvalidProposalError('VotingInstructions must have R>0') 542 | assert round == round_lockset.round + 1 543 | assert height == round_lockset.height 544 | 545 | @property 546 | def blockhash(self): 547 | return self.round_lockset.has_quorum_possible 548 | 549 | @property 550 | def lockset(self): 551 | return self.round_lockset 552 | 553 | def __repr__(self): 554 | return "<%s %r BH:%s>" % (self.__class__.__name__, phx(self.sender), phx(self.blockhash)) 555 | 556 | def validate_votes(self, validators_H): 557 | "set of validators may change between heights" 558 | assert self.sender 559 | if not self.round_lockset.num_eligible_votes == len(validators_H): 560 | raise InvalidProposalError('round_lockset num_eligible_votes mismatch') 561 | for v in self.round_lockset: 562 | if v.sender not in validators_H: 563 | raise InvalidProposalError('invalid signer') 564 | -------------------------------------------------------------------------------- /hydrachain/consensus/contract.py: -------------------------------------------------------------------------------- 1 | from .base import Proposal, isaddress 2 | 3 | 4 | class ConsensusContract(object): 5 | 6 | def __init__(self, validators): 7 | for v in validators: 8 | assert isaddress(v) 9 | self.validators = validators 10 | 11 | def proposer(self, height, round_): 12 | v = abs(hash(repr((height, round_)))) 13 | return self.validators[v % len(self.validators)] 14 | 15 | def isvalidator(self, address, height=0): 16 | assert isaddress(address) 17 | assert len(self.validators) 18 | return address in self.validators 19 | 20 | def isproposer(self, p): 21 | assert isinstance(p, Proposal) 22 | return p.sender == self.proposer(p.height, p.round) 23 | 24 | def num_eligible_votes(self, height): 25 | if height == 0: 26 | return 0 27 | return len(self.validators) 28 | -------------------------------------------------------------------------------- /hydrachain/consensus/manager.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015 Heiko Hees 2 | import sys 3 | import rlp 4 | from .base import LockSet, Vote, VoteBlock, VoteNil, Signed, Ready 5 | from .base import BlockProposal, VotingInstruction, DoubleVotingError, InvalidVoteError 6 | from .base import Block, Proposal, HDCBlockHeader, InvalidProposalError 7 | from .protocol import HDCProtocol 8 | from .utils import cstr, phx 9 | from .synchronizer import Synchronizer 10 | from ethereum.slogging import get_logger 11 | log = get_logger('hdc.consensus') 12 | 13 | 14 | class ManagerDict(object): 15 | 16 | def __init__(self, dklass, parent): 17 | self.d = dict() 18 | self.dklass = dklass 19 | self.parent = parent 20 | 21 | def __getitem__(self, k): 22 | if k not in self.d: 23 | self.d[k] = self.dklass(self.parent, k) 24 | return self.d[k] 25 | 26 | def __iter__(self): 27 | return iter(sorted(self.d, reverse=True)) 28 | 29 | def pop(self, k): 30 | self.d.pop(k) 31 | 32 | 33 | class MissingParent(Exception): 34 | pass 35 | 36 | 37 | class ProtocolFailureEvidence(object): 38 | protocol = None 39 | evidence = None 40 | 41 | def __repr__(self): 42 | return '<%s protocol=%r evidence=%r>' % (self.__class__.__name__, 43 | self.protocol, self.evidence) 44 | 45 | 46 | class InvalidProposalEvidence(ProtocolFailureEvidence): 47 | 48 | def __init__(self, protocol, proposal): 49 | self.protocol = protocol 50 | self.evidence = proposal 51 | 52 | 53 | class DoubleVotingEvidence(ProtocolFailureEvidence): 54 | 55 | def __init__(self, protocol, vote, othervote): 56 | self.protocol = protocol 57 | self.evidence = (vote, othervote) 58 | 59 | 60 | class InvalidVoteEvidence(ProtocolFailureEvidence): 61 | 62 | def __init__(self, protocol, vote): 63 | self.protocol = protocol 64 | self.evidence = vote 65 | 66 | 67 | class FailedToProposeEvidence(ProtocolFailureEvidence): 68 | 69 | def __init__(self, protocol, round_lockset): 70 | self.protocol = protocol 71 | self.evidence = round_lockset 72 | 73 | 74 | class ForkDetectedEvidence(ProtocolFailureEvidence): 75 | 76 | def __init__(self, protocol, prevblock, block, committing_lockset): 77 | self.protocol = protocol 78 | self.evidence = (prevblock, block, committing_lockset) 79 | 80 | 81 | class ConsensusManager(object): 82 | 83 | allow_empty_blocks = False 84 | num_initial_blocks = 10 85 | round_timeout = 3 # timeout when waiting for proposal 86 | round_timeout_factor = 1.5 # timeout increase per round 87 | transaction_timeout = 0.5 # delay when waiting for new transaction 88 | 89 | def __init__(self, chainservice, consensus_contract, privkey): 90 | self.chainservice = chainservice 91 | self.chain = chainservice.chain 92 | self.contract = consensus_contract 93 | self.privkey = privkey 94 | 95 | self.synchronizer = Synchronizer(self) 96 | self.heights = ManagerDict(HeightManager, self) 97 | self.block_candidates = dict() # blockhash : BlockProposal 98 | 99 | self.tracked_protocol_failures = list() 100 | 101 | # wait for enough validators in order to start 102 | self.ready_validators = set() # addresses 103 | self.ready_nonce = 0 104 | 105 | assert self.contract.isvalidator(self.coinbase) 106 | self.initialize_locksets() 107 | 108 | self.ready_validators = set([self.coinbase]) # old votes dont count 109 | 110 | def initialize_locksets(self): 111 | log.debug('initializing locksets') 112 | # sign genesis 113 | v = self.sign(VoteBlock(0, 0, self.chainservice.chain.genesis.hash)) 114 | self.add_vote(v) 115 | 116 | # add initial lockset 117 | head_proposal = self.load_proposal(self.head.hash) 118 | # assert head_proposal 119 | if head_proposal: 120 | assert head_proposal.blockhash == self.head.hash 121 | for v in head_proposal.signing_lockset: 122 | self.add_vote(v) # head - 1 , height -2 123 | assert self.heights[self.head.header.number - 1].has_quorum 124 | last_committing_lockset = self.load_last_committing_lockset() 125 | if last_committing_lockset: 126 | assert last_committing_lockset.has_quorum == self.head.hash 127 | for v in last_committing_lockset.votes: 128 | self.add_vote(v) # head , height - 1 129 | assert self.heights[self.head.header.number].has_quorum 130 | else: 131 | assert self.head.header.number == 0 132 | assert self.highest_committing_lockset 133 | assert self.last_committing_lockset 134 | assert self.last_valid_lockset 135 | 136 | # persist proposals and last committing lockset 137 | 138 | def store_last_committing_lockset(self, ls): 139 | assert isinstance(ls, LockSet) 140 | assert ls.has_quorum 141 | self.chainservice.db.put('last_committing_lockset', rlp.encode(ls)) 142 | 143 | def load_last_committing_lockset(self): 144 | try: 145 | data = self.chainservice.db.get('last_committing_lockset') 146 | except KeyError: 147 | self.log('no last_committing_lockset could be loaded') 148 | return 149 | return rlp.decode(data, sedes=LockSet) 150 | 151 | def store_proposal(self, p): 152 | assert isinstance(p, BlockProposal) 153 | self.chainservice.db.put('blockproposal:%s' % p.blockhash, rlp.encode(p)) 154 | 155 | def load_proposal_rlp(self, blockhash): 156 | try: 157 | prlp = self.chainservice.db.get('blockproposal:%s' % blockhash) 158 | assert isinstance(prlp, bytes) 159 | return prlp 160 | except KeyError: 161 | return None 162 | 163 | def load_proposal(self, blockhash): 164 | prlp = self.load_proposal_rlp(blockhash) 165 | if prlp: 166 | return rlp.decode(prlp, sedes=BlockProposal) 167 | 168 | def get_blockproposal(self, blockhash): 169 | return self.block_candidates.get(blockhash) or self.load_proposal(blockhash) 170 | 171 | def has_blockproposal(self, blockhash): 172 | return bool(self.load_proposal_rlp(blockhash)) 173 | 174 | def get_blockproposal_rlp_by_height(self, height): 175 | assert 0 < height < self.height 176 | bh = self.chainservice.chain.index.get_block_by_number(height) 177 | return self.load_proposal_rlp(bh) 178 | 179 | @property 180 | def coinbase(self): 181 | return self.chain.coinbase 182 | 183 | def set_proposal_lock(self, block): 184 | self.chainservice.set_proposal_lock(block) 185 | 186 | def __repr__(self): 187 | return '' % (phx(self.coinbase), self.height, self.round, 188 | self.active_round.lock, 189 | self.active_round.lockset.state) 190 | 191 | def log(self, tag, **kargs): 192 | # if self.coinbase != 0: return 193 | t = int(self.chainservice.now) 194 | c = lambda x: cstr(self.coinbase, x) 195 | msg = ' '.join([str(t), c(repr(self)), tag, (' %r' % kargs if kargs else '')]) 196 | log.debug(msg) 197 | 198 | @property 199 | def head(self): 200 | return self.chain.head 201 | 202 | @property 203 | def height(self): 204 | return self.head.number + 1 205 | 206 | @property 207 | def round(self): 208 | return self.heights[self.height].round 209 | 210 | # message handling 211 | 212 | def broadcast(self, m): 213 | self.log('broadcasting', message=m) 214 | self.chainservice.broadcast(m) 215 | 216 | # validator ready handling 217 | 218 | @property 219 | def is_ready(self): 220 | return len(self.ready_validators) > len(self.contract.validators) * 2 / 3. 221 | 222 | def send_ready(self): 223 | self.log('cm.send_ready') 224 | assert not self.is_ready 225 | r = Ready(self.ready_nonce, self.active_round.lockset) 226 | self.sign(r) 227 | self.broadcast(r) 228 | self.ready_nonce += 1 229 | 230 | def add_ready(self, ready, proto=None): 231 | assert isinstance(ready, Ready) 232 | assert self.contract.isvalidator(ready.sender) 233 | self.ready_validators.add(ready.sender) 234 | self.log('cm.add_ready', validator=ready.sender) 235 | if self.is_ready: 236 | self.log('cm.add_ready, sufficient count of validators ready', 237 | num=len(self.ready_validators)) 238 | else: 239 | self.send_ready() 240 | 241 | def add_vote(self, v, proto=None): 242 | assert isinstance(v, Vote) 243 | assert self.contract.isvalidator(v.sender) 244 | self.ready_validators.add(v.sender) 245 | # exception for externaly received votes signed by self, necessary for resyncing 246 | is_own_vote = bool(v.sender == self.coinbase) 247 | try: 248 | success = self.heights[v.height].add_vote(v, force_replace=is_own_vote) 249 | except DoubleVotingError: 250 | ls = self.heights[v.height].rounds[v.round].lockset 251 | self.tracked_protocol_failures.append(DoubleVotingEvidence(proto, v, ls)) 252 | log.warn('double voting detected', vote=v, ls=ls) 253 | return success 254 | 255 | def add_proposal(self, p, proto=None): 256 | assert isinstance(p, Proposal) 257 | assert proto is None or isinstance(proto, HDCProtocol) 258 | 259 | def check(valid): 260 | if not valid: 261 | self.tracked_protocol_failures.append(InvalidProposalEvidence(None, p)) 262 | log.warn('invalid proposal', p=p) 263 | raise InvalidProposalError() 264 | return True 265 | 266 | self.log('cm.add_proposal', p=p) 267 | if p.height < self.height: 268 | self.log('proposal from the past') 269 | return 270 | 271 | if not check(self.contract.isvalidator(p.sender) and self.contract.isproposer(p)): 272 | return 273 | self.ready_validators.add(p.sender) 274 | 275 | if not check(p.lockset.is_valid): 276 | return 277 | if not check(p.lockset.height == p.height or p.round == 0): 278 | return 279 | if not check(p.round - p.lockset.round == 1 or p.round == 0): 280 | return 281 | 282 | # proposal is valid 283 | if proto is not None: # inactive proto is False 284 | self.synchronizer.on_proposal(p, proto) 285 | 286 | for v in p.lockset: 287 | self.add_vote(v) # implicitly checks their validity 288 | if isinstance(p, BlockProposal): 289 | if not check(p.block.number == p.height): 290 | return 291 | if not check(p.lockset.has_noquorum or p.round == 0): 292 | return 293 | # validation 294 | if p.height > self.height: 295 | self.log('proposal from the future, not in sync', p=p) 296 | return # note: we are not broadcasting this, as we could not validate 297 | blk = self.chainservice.link_block(p.block) 298 | if not check(blk): 299 | # safeguard for forks: 300 | # if there is a quorum on a block which can not be applied: panic! 301 | ls = self.heights[p.height].last_quorum_lockset 302 | if ls and ls.has_quorum == p.blockhash: 303 | raise ForkDetectedEvidence(proto, (self.head, p, ls)) 304 | sys.exit(1) 305 | return 306 | p._mutable = True 307 | p._cached_rlp = None 308 | p.block = blk # block linked to chain 309 | self.log('successfully linked block') 310 | self.add_block_proposal(p) # implicitly checks the votes validity 311 | else: 312 | assert isinstance(p, VotingInstruction) 313 | assert p.lockset.round == p.round - 1 and p.height == p.lockset.height 314 | assert p.round > 0 315 | assert p.lockset.has_quorum_possible 316 | assert not p.lockset.has_quorum 317 | if not check(p.lockset.has_quorum_possible and not p.lockset.has_quorum): 318 | return 319 | is_valid = self.heights[p.height].add_proposal(p) 320 | return is_valid # can be broadcasted 321 | 322 | def add_lockset(self, ls, proto=None): 323 | assert ls.is_valid 324 | for v in ls: 325 | self.add_vote(v) # implicitly checks their validity 326 | 327 | def add_block_proposal(self, p): 328 | assert isinstance(p, BlockProposal) 329 | if self.has_blockproposal(p.blockhash): 330 | self.log('known block_proposal') 331 | return 332 | assert p.signing_lockset.has_quorum # on previous block 333 | assert p.signing_lockset.height == p.height - 1 334 | for v in p.signing_lockset: 335 | self.add_vote(v) 336 | self.block_candidates[p.blockhash] = p 337 | 338 | @property 339 | def last_committing_lockset(self): 340 | return self.heights[self.height - 1].last_quorum_lockset 341 | 342 | @property 343 | def highest_committing_lockset(self): 344 | for height in self.heights: 345 | ls = self.heights[height].last_quorum_lockset 346 | if ls: 347 | return ls 348 | 349 | @property 350 | def last_valid_lockset(self): 351 | return self.heights[self.height].last_valid_lockset or self.last_committing_lockset 352 | 353 | @property 354 | def last_lock(self): 355 | return self.heights[self.height].last_lock 356 | 357 | @property 358 | def last_blockproposal(self): 359 | # valid block proposal on currrent height 360 | p = self.heights[self.height].last_voted_blockproposal 361 | if p: 362 | return p 363 | elif self.height > 1: # or last block 364 | return self.get_blockproposal(self.head.hash) 365 | 366 | @property 367 | def active_round(self): 368 | hm = self.heights[self.height] 369 | return hm.rounds[hm.round] 370 | 371 | def setup_alarm(self): 372 | ar = self.active_round 373 | delay = ar.get_timeout() 374 | self.log('in set up alarm', delay=delay) 375 | if self.is_waiting_for_proposal: 376 | if delay is not None: 377 | self.chainservice.setup_alarm(delay, self.on_alarm, ar) 378 | self.log('set up alarm on timeout', now=self.chainservice.now, 379 | delay=delay, triggered=delay + self.chainservice.now) 380 | else: 381 | self.chainservice.setup_transaction_alarm(self.on_alarm, ar) 382 | self.log('set up alarm on tx', now=self.chainservice.now) 383 | 384 | def on_alarm(self, ar): 385 | assert isinstance(ar, RoundManager) 386 | if self.active_round == ar: 387 | self.log('on alarm, matched', ts=self.chainservice.now) 388 | if not self.is_ready: 389 | # defer alarm if not ready 390 | self.log('not ready defering alarm', ts=self.chainservice.now) 391 | self.setup_alarm() 392 | elif not self.is_waiting_for_proposal: 393 | # defer alarm if there are no pending transactions 394 | self.log('no txs defering alarm', ts=self.chainservice.now) 395 | self.setup_alarm() 396 | else: 397 | self.process() 398 | 399 | @property 400 | def is_waiting_for_proposal(self): 401 | return self.allow_empty_blocks \ 402 | or self.has_pending_transactions \ 403 | or self.height <= self.num_initial_blocks 404 | 405 | @property 406 | def has_pending_transactions(self): 407 | return self.chain.head_candidate.num_transactions() > 0 408 | 409 | def process(self): 410 | r = self._process() 411 | return r 412 | 413 | def _process(self): 414 | self.log('-' * 40) 415 | self.log('in process') 416 | if not self.is_ready: 417 | self.log('not ready ') 418 | self.setup_alarm() 419 | return 420 | self.commit() 421 | self.heights[self.height].process() 422 | if self.commit(): # re enter process if we did commit (e.g. to immediately propose) 423 | return self._process() 424 | self.cleanup() 425 | self.synchronizer.process() 426 | self.setup_alarm() 427 | 428 | for f in self.tracked_protocol_failures: 429 | if not isinstance(f, FailedToProposeEvidence): 430 | log.warn('protocol failure', incident=f) 431 | 432 | start = process 433 | 434 | def commit(self): 435 | self.log('in commit') 436 | for p in [c for c in self.block_candidates.values() if c.block.prevhash == self.head.hash]: 437 | assert isinstance(p, BlockProposal) 438 | ls = self.heights[p.height].last_quorum_lockset 439 | if ls and ls.has_quorum == p.blockhash: 440 | self.store_proposal(p) 441 | self.store_last_committing_lockset(ls) 442 | success = self.chainservice.commit_block(p.block) 443 | assert success 444 | if success: 445 | self.log('commited', p=p, hash=phx(p.blockhash)) 446 | assert self.head == p.block 447 | self.commit() # commit all possible 448 | return True 449 | else: 450 | self.log('could not commit', p=p) 451 | else: 452 | self.log('no quorum for', p=p) 453 | if ls: 454 | self.log('votes', votes=ls.votes) 455 | 456 | def cleanup(self): 457 | self.log('in cleanup') 458 | for p in self.block_candidates.values(): 459 | if self.head.number >= p.height: 460 | self.block_candidates.pop(p.blockhash) 461 | for h in list(self.heights): 462 | if self.heights[h].height < self.head.number: 463 | self.heights.pop(h) 464 | 465 | def mk_lockset(self, height): 466 | return LockSet(num_eligible_votes=self.contract.num_eligible_votes(height)) 467 | 468 | def sign(self, o): 469 | assert isinstance(o, Signed) 470 | return o.sign(self.privkey) 471 | 472 | 473 | class HeightManager(object): 474 | 475 | def __init__(self, consensusmanager, height=0): 476 | self.cm = consensusmanager 477 | self.log = self.cm.log 478 | self.height = height 479 | self.rounds = ManagerDict(RoundManager, self) 480 | log.debug('A:%s Created HeightManager H:%d' % (phx(self.cm.coinbase), self.height)) 481 | 482 | @property 483 | def round(self): 484 | l = self.last_valid_lockset 485 | if l: 486 | return l.round + 1 487 | return 0 488 | 489 | @property 490 | def last_lock(self): 491 | "highest lock on height" 492 | rs = list(self.rounds) 493 | assert len(rs) < 2 or rs[0] > rs[1] # FIXME REMOVE 494 | for r in self.rounds: # is sorted highest to lowest 495 | if self.rounds[r].lock is not None: 496 | return self.rounds[r].lock 497 | 498 | @property 499 | def last_voted_blockproposal(self): 500 | "the last block proposal node voted on" 501 | for r in self.rounds: 502 | if isinstance(self.rounds[r].proposal, BlockProposal): 503 | assert isinstance(self.rounds[r].lock, Vote) 504 | if self.rounds[r].proposal.blockhash == self.rounds[r].lock.blockhash: 505 | return self.rounds[r].proposal 506 | 507 | @property 508 | def last_valid_lockset(self): 509 | "highest valid lockset on height" 510 | for r in self.rounds: 511 | ls = self.rounds[r].lockset 512 | if ls.is_valid: 513 | return ls 514 | return None 515 | 516 | @property 517 | def last_quorum_lockset(self): 518 | found = None 519 | for r in sorted(self.rounds): # search from lowest round first 520 | ls = self.rounds[r].lockset 521 | if ls.is_valid and ls.has_quorum: 522 | if found is not None: # consistency check, only one quorum on block allowed 523 | for r in sorted(self.rounds): # dump all locksets 524 | self.log('multiple valid locksets', round=r, ls=self.rounds[r].lockset, 525 | votes=self.rounds[r].lockset.votes) 526 | if found.has_quorum != ls.has_quorum: 527 | log.error('FATAL: multiple valid locksets on different proposals') 528 | import sys 529 | sys.exit(1) 530 | found = ls 531 | return found 532 | 533 | @property 534 | def has_quorum(self): 535 | ls = self.last_quorum_lockset 536 | if ls: 537 | return ls.has_quorum 538 | 539 | def add_vote(self, v, force_replace=False): 540 | return self.rounds[v.round].add_vote(v, force_replace) 541 | 542 | def add_proposal(self, p): 543 | assert p.height == self.height 544 | assert p.lockset.is_valid 545 | if p.round > self.round: 546 | self.round = p.round 547 | return self.rounds[p.round].add_proposal(p) 548 | 549 | def process(self): 550 | self.log('in hm.process', height=self.height) 551 | self.rounds[self.round].process() 552 | 553 | 554 | class RoundManager(object): 555 | 556 | def __init__(self, heightmanager, round_=0): 557 | 558 | assert isinstance(round_, int) 559 | self.round = round_ 560 | 561 | self.hm = heightmanager 562 | self.cm = heightmanager.cm 563 | self.log = self.hm.log 564 | self.height = heightmanager.height 565 | self.lockset = self.cm.mk_lockset(self.height) 566 | self.proposal = None 567 | self.lock = None 568 | self.timeout_time = None 569 | log.debug('A:%s Created RoundManager H:%d R:%d' % 570 | (phx(self.cm.coinbase), self.hm.height, self.round)) 571 | 572 | def get_timeout(self): 573 | "setup a timeout for waiting for a proposal" 574 | if self.timeout_time is not None or self.proposal: 575 | return 576 | now = self.cm.chainservice.now 577 | round_timeout = ConsensusManager.round_timeout 578 | round_timeout_factor = ConsensusManager.round_timeout_factor 579 | delay = round_timeout * round_timeout_factor ** self.round 580 | self.timeout_time = now + delay 581 | return delay 582 | 583 | def add_vote(self, v, force_replace=False): 584 | if v in self.lockset: 585 | return 586 | self.log('rm.adding', vote=v, received_proposal=self.proposal) 587 | try: 588 | success = self.lockset.add(v, force_replace) 589 | except InvalidVoteError: 590 | self.cm.tracked_protocol_failures.append(InvalidVoteEvidence(None, v)) 591 | return 592 | # report failed proposer 593 | if self.lockset.is_valid: 594 | self.log('lockset is valid', ls=self.lockset) 595 | if not self.proposal and self.lockset.has_noquorum: 596 | self.cm.tracked_protocol_failures.append( 597 | FailedToProposeEvidence(None, self.lockset)) 598 | return success 599 | 600 | def add_proposal(self, p): 601 | self.log('rm.adding', proposal=p, old=self.proposal) 602 | assert isinstance(p, Proposal) 603 | assert isinstance(p, VotingInstruction) or isinstance(p.block, Block) # already linked 604 | assert not self.proposal or self.proposal == p 605 | self.proposal = p 606 | return True 607 | 608 | def process(self): 609 | self.log('in rm.process', height=self.hm.height, round=self.round) 610 | 611 | assert self.cm.round == self.round 612 | assert self.cm.height == self.hm.height == self.height 613 | p = self.propose() 614 | if isinstance(p, BlockProposal): 615 | self.cm.add_block_proposal(p) 616 | if p: 617 | self.cm.broadcast(p) 618 | v = self.vote() 619 | if v: 620 | self.cm.broadcast(v) 621 | assert not self.proposal or self.lock 622 | 623 | def mk_proposal(self, round_lockset=None): 624 | signing_lockset = self.cm.last_committing_lockset.copy() # quorum which signs prev block 625 | if self.round > 0: 626 | round_lockset = self.cm.last_valid_lockset.copy() 627 | assert round_lockset.has_noquorum 628 | else: 629 | round_lockset = None 630 | assert signing_lockset.has_quorum 631 | # for R0 (std case) we only need one lockset! 632 | assert round_lockset is None or self.round > 0 633 | block = self.cm.chain.head_candidate 634 | # fix pow 635 | block.header.__class__ = HDCBlockHeader 636 | block.should_be_locked = True 637 | bp = BlockProposal(self.height, self.round, block, signing_lockset, round_lockset) 638 | self.cm.sign(bp) 639 | self.cm.set_proposal_lock(block) 640 | assert self.cm.chainservice.proposal_lock.locked() 641 | return bp 642 | 643 | def propose(self): 644 | if not self.cm.is_waiting_for_proposal: 645 | return 646 | proposer = self.cm.contract.proposer(self.height, self.round) 647 | self.log('in propose', proposer=phx(proposer), proposal=self.proposal, lock=self.lock) 648 | if proposer != self.cm.coinbase: 649 | return 650 | self.log('is proposer') 651 | if self.proposal: 652 | assert self.proposal.sender == self.cm.coinbase 653 | assert self.lock 654 | return 655 | 656 | round_lockset = self.cm.last_valid_lockset 657 | if not round_lockset: 658 | self.log('no valid round lockset for height') 659 | return 660 | 661 | self.log('in creating proposal', round_lockset=round_lockset) 662 | 663 | if round_lockset.height == self.height and round_lockset.has_quorum: 664 | self.log('have quorum on height, not proposing') 665 | return 666 | elif self.round == 0 or round_lockset.has_noquorum: 667 | proposal = self.mk_proposal() 668 | elif round_lockset.has_quorum_possible: 669 | proposal = VotingInstruction(self.height, self.round, round_lockset.copy()) 670 | self.cm.sign(proposal) 671 | else: 672 | raise Exception('invalid round_lockset') 673 | 674 | self.log('created proposal', p=proposal, bh=phx(proposal.blockhash)) 675 | self.proposal = proposal 676 | return proposal 677 | 678 | def vote(self): 679 | if self.lock: 680 | return # voted in this round 681 | self.log('in vote', proposal=self.proposal, pid=id(self.proposal)) 682 | 683 | # get last lock on height 684 | last_lock = self.hm.last_lock 685 | 686 | if self.proposal: 687 | if isinstance(self.proposal, VotingInstruction): 688 | assert self.proposal.lockset.has_quorum_possible 689 | self.log('voting on instruction') 690 | v = VoteBlock(self.height, self.round, self.proposal.blockhash) 691 | elif not isinstance(last_lock, VoteBlock): 692 | assert isinstance(self.proposal, BlockProposal) 693 | assert isinstance(self.proposal.block, Block) # already linked to chain 694 | assert self.proposal.lockset.has_noquorum or self.round == 0 695 | assert self.proposal.block.prevhash == self.cm.head.hash 696 | self.log('voting proposed block') 697 | v = VoteBlock(self.height, self.round, self.proposal.blockhash) 698 | else: # repeat vote 699 | self.log('voting on last vote') 700 | v = VoteBlock(self.height, self.round, last_lock.blockhash) 701 | elif self.timeout_time is not None and self.cm.chainservice.now >= self.timeout_time: 702 | if isinstance(last_lock, VoteBlock): # repeat vote 703 | self.log('timeout voting on last vote') 704 | v = VoteBlock(self.height, self.round, last_lock.blockhash) 705 | else: 706 | self.log('timeout voting not locked') 707 | v = VoteNil(self.height, self.round) 708 | else: 709 | return 710 | self.cm.sign(v) 711 | 712 | self.log('voted', vote=v) 713 | self.lock = v 714 | assert self.hm.last_lock == self.lock 715 | self.lockset.add(v) 716 | return v 717 | -------------------------------------------------------------------------------- /hydrachain/consensus/protocol.py: -------------------------------------------------------------------------------- 1 | import rlp 2 | import gevent 3 | from devp2p.protocol import BaseProtocol, SubProtocolError 4 | from ethereum.transactions import Transaction 5 | from hydrachain.consensus.base import BlockProposal, VotingInstruction, Vote, LockSet, Ready 6 | from ethereum import slogging 7 | log = slogging.get_logger('protocol.hdc') 8 | 9 | 10 | class HDCProtocolError(SubProtocolError): 11 | pass 12 | 13 | 14 | class HDCProtocol(BaseProtocol): 15 | 16 | """ 17 | HydraChain Wire Protocol 18 | """ 19 | protocol_id = 1 20 | network_id = 0 21 | max_cmd_id = 15 # FIXME 22 | name = 'hdc' 23 | version = 1 24 | max_getproposals_count = 10 25 | 26 | def __init__(self, peer, service): 27 | # required by P2PProtocol 28 | self.config = peer.config 29 | BaseProtocol.__init__(self, peer, service) 30 | 31 | class status(BaseProtocol.command): 32 | 33 | """ 34 | protocolVersion: The version of the HydraChain protocol this peer implements. 35 | networkID: The network version of Ethereum for this peer. 36 | GenesisHash: The hash of the Genesis block. 37 | current_lockset: The lockset of the current round from the responding peer 38 | """ 39 | cmd_id = 0 40 | sent = False 41 | 42 | structure = [ 43 | ('eth_version', rlp.sedes.big_endian_int), 44 | ('network_id', rlp.sedes.big_endian_int), 45 | ('genesis_hash', rlp.sedes.binary), 46 | ('current_lockset', LockSet) 47 | ] 48 | 49 | def create(self, proto, genesis_hash, current_lockset): 50 | self.sent = True 51 | network_id = proto.service.app.config['eth'].get('network_id', proto.network_id) 52 | return [proto.version, network_id, genesis_hash, current_lockset] 53 | 54 | class transactions(BaseProtocol.command): 55 | 56 | """ 57 | Specify (a) transaction(s) that the peer should make sure is included on its transaction 58 | queue. The items in the list (following the first item 0x12) are transactions in the 59 | format described in the main Ethereum specification. Nodes must not resend the same 60 | transaction to a peer in the same session. This packet must contain at least one (new) 61 | transaction. 62 | """ 63 | cmd_id = 1 64 | structure = rlp.sedes.CountableList(Transaction) 65 | 66 | # todo: bloomfilter: so we don't send tx to the originating peer 67 | 68 | @classmethod 69 | def decode_payload(cls, rlp_data): 70 | # convert to dict 71 | txs = [] 72 | for i, tx in enumerate(rlp.decode_lazy(rlp_data)): 73 | txs.append(Transaction.deserialize(tx)) 74 | if not i % 10: 75 | gevent.sleep(0.0001) 76 | return txs 77 | 78 | class getblockproposals(BaseProtocol.command): 79 | 80 | """ 81 | Requests a BlockProposals message detailing a number of blocks to be sent, each referred to 82 | by block number. Note: Don't expect that the peer necessarily give you all these blocks 83 | in a single message - you might have to re-request them. 84 | """ 85 | cmd_id = 2 86 | structure = rlp.sedes.CountableList(rlp.sedes.big_endian_int) 87 | 88 | class blockproposals(BaseProtocol.command): 89 | 90 | """ 91 | BlockProposals sent in response to a getproposals request 92 | """ 93 | cmd_id = 3 94 | structure = rlp.sedes.CountableList(BlockProposal) 95 | 96 | @classmethod 97 | def encode_payload(cls, list_of_rlp): 98 | """ 99 | rlp data directly from the database 100 | """ 101 | assert isinstance(list_of_rlp, tuple) 102 | assert not list_of_rlp or isinstance(list_of_rlp[0], bytes) 103 | return rlp.encode([rlp.codec.RLPData(x) for x in list_of_rlp], infer_serializer=False) 104 | 105 | class newblockproposal(BaseProtocol.command): 106 | 107 | """ 108 | Specify a single BlockProposal that the peer should know about. 109 | """ 110 | cmd_id = 4 111 | structure = [('proposal', BlockProposal)] 112 | 113 | class votinginstruction(BaseProtocol.command): 114 | 115 | """ 116 | Specify a single VotingInstruction that the peer should know about. 117 | """ 118 | cmd_id = 5 119 | structure = [('votinginstruction', VotingInstruction)] 120 | 121 | class vote(BaseProtocol.command): 122 | 123 | """ 124 | Specify a single Vote that the peer should know about. 125 | """ 126 | cmd_id = 6 127 | structure = [('vote', Vote)] 128 | 129 | class ready(BaseProtocol.command): 130 | cmd_id = 7 131 | structure = [('ready', Ready)] 132 | -------------------------------------------------------------------------------- /hydrachain/consensus/simulation.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015 Heiko Hees 2 | import copy 3 | import random 4 | import tempfile 5 | import time 6 | from collections import Counter 7 | 8 | import ethereum.keys 9 | import gevent 10 | import simpy 11 | from ethereum import slogging 12 | from ethereum.db import EphemDB 13 | from ethereum.utils import big_endian_to_int, sha3, privtoaddr 14 | from pyethapp.accounts import Account, AccountsService 15 | 16 | from hydrachain import hdc_service 17 | from hydrachain.consensus import protocol as hdc_protocol 18 | from hydrachain.consensus.base import Block 19 | from hydrachain.consensus.manager import ConsensusManager 20 | from hydrachain.consensus.utils import num_colors, phx 21 | from hydrachain.hdc_service import ChainService 22 | 23 | 24 | log = slogging.get_logger('hdc.sim') 25 | 26 | # stop on exception 27 | gevent.get_hub().SYSTEM_ERROR = BaseException 28 | 29 | # reduce key derivation iterations 30 | ethereum.keys.PBKDF2_CONSTANTS['c'] = 100 31 | 32 | privkeys = [chr(i) * 32 for i in range(1, 11)] 33 | validators = privkeys[:] 34 | 35 | 36 | empty = object() 37 | 38 | random.seed(42) 39 | 40 | 41 | def mk_privkeys(num): 42 | "make privkeys that support coloring, see utils.cstr" 43 | privkeys = [] 44 | assert num <= num_colors 45 | for i in range(num): 46 | j = 0 47 | while True: 48 | k = sha3(str(j)) 49 | a = privtoaddr(k) 50 | an = big_endian_to_int(a) 51 | if an % num_colors == i: 52 | break 53 | j += 1 54 | privkeys.append(k) 55 | return privkeys 56 | 57 | 58 | class Transport(object): 59 | 60 | def __init__(self, simenv=None): 61 | self.simenv = simenv 62 | 63 | def delay(self, sender, receiver, packet, add_delay=0): 64 | """ 65 | bandwidths are inaccurate, as we don't account for parallel transfers here 66 | """ 67 | bw = min(sender.ul_bandwidth, receiver.dl_bandwidth) 68 | delay = sender.base_latency + receiver.base_latency 69 | delay += len(packet) / bw 70 | delay += add_delay 71 | return delay 72 | 73 | def deliver(self, sender, receiver, packet, add_delay=0): 74 | delay = self.delay(sender, receiver, packet, add_delay) 75 | if self.simenv: 76 | self.simenv_deliver(sender, receiver, packet, delay) 77 | else: 78 | self.gevent_deliver(sender, receiver, packet, delay) 79 | 80 | def gevent_deliver(self, sender, receiver, packet, delay): 81 | assert sender != receiver 82 | 83 | def transfer(): 84 | gevent.sleep(delay) 85 | receiver.receive_packet(sender, packet) 86 | gevent.spawn(transfer) 87 | 88 | def simenv_deliver(self, sender, receiver, packet, delay): 89 | def transfer(): 90 | yield self.simenv.timeout(delay) 91 | receiver.receive_packet(sender, packet) 92 | 93 | self.simenv.process(transfer()) 94 | 95 | 96 | class NoTransport(Transport): 97 | 98 | def deliver(self, sender, receiver, packet): 99 | pass 100 | 101 | 102 | class SlowTransport(Transport): 103 | 104 | def deliver(self, sender, receiver, packet): 105 | "deliver on edge of timeout_window" 106 | to = ConsensusManager.round_timeout 107 | assert to > 0 108 | print "in slow transport deliver" 109 | super(SlowTransport, self).deliver(sender, receiver, packet, add_delay=to) 110 | 111 | 112 | class PeerMock(object): 113 | 114 | ul_bandwidth = 1 * 10 ** 6 # bytes/s net bandwidth 115 | dl_bandwidth = 1 * 10 ** 6 # bytes/s net bandwidth 116 | base_latency = 0.05 # secs 117 | ingress_bytes = 0 118 | egress_bytes = 0 119 | 120 | def __init__(self, app, transport): 121 | self.app = app 122 | self.config = app.config 123 | self.remote_client_version = empty 124 | self.peer = None 125 | self.protocol = None 126 | self.transport = transport 127 | 128 | def __repr__(self): 129 | return " R:%s)>" % (phx(self.coinbase), phx(self.peer.coinbase)) 130 | 131 | @property 132 | def coinbase(self): 133 | return self.app.services.chainservice.chain.coinbase 134 | 135 | def send_packet(self, packet): 136 | assert self.peer 137 | # log.debug('send_packet', sender=self, receiver=self.peer, len=len(packet)) 138 | self.egress_bytes += len(packet) 139 | self.transport.deliver(self, self.peer, packet) 140 | 141 | def receive_packet(self, sender, packet): 142 | assert self.app.isactive 143 | assert sender != self 144 | # log.debug('receive_packet', sender=sender, receiver=self, len=len(packet)) 145 | self.ingress_bytes += len(packet) 146 | self.protocol.receive_packet(packet) 147 | 148 | def __eq__(self, other): 149 | return repr(self) == repr(other) 150 | 151 | def __hash__(self): 152 | return hash(repr(self)) 153 | 154 | 155 | class PeerManagerMock(object): 156 | 157 | def __init__(self, app): 158 | self.app = app 159 | self.peers = [] 160 | 161 | def __repr__(self): 162 | return "" % phx(self.coinbase) 163 | 164 | @property 165 | def coinbase(self): 166 | return self.app.services.chainservice.chain.coinbase 167 | 168 | def broadcast(self, protocol, command_name, args=[], kargs={}, 169 | num_peers=None, exclude_peers=[]): 170 | for p in self.peers: 171 | assert p.coinbase == self.coinbase 172 | assert p.peer.coinbase != self.coinbase 173 | assert isinstance(p.protocol, (protocol, hdc_protocol.HDCProtocol)) 174 | if not exclude_peers or p not in exclude_peers: 175 | log.debug('broadcasting', sender=self, receiver=p, obj=args) 176 | func = getattr(p.protocol, 'send_' + command_name) 177 | func(*args, **kargs) 178 | 179 | 180 | class SimChainService(ChainService): 181 | 182 | processing_time = 0.1 # time to validate block 183 | 184 | def __init__(self, *args, **kargs): 185 | self.simenv = kargs.pop('simenv') 186 | super(SimChainService, self).__init__(*args, **kargs) 187 | 188 | @property 189 | def now(self): 190 | assert self.simenv 191 | return self.simenv.now 192 | 193 | def setup_alarm(self, delay, cb, *args): 194 | assert self.simenv 195 | 196 | def _trigger(): 197 | yield self.simenv.timeout(delay) 198 | cb(*args) 199 | self.simenv.process(_trigger()) 200 | 201 | def on_receive_newblockproposal(self, proto, proposal): 202 | 203 | def process(): 204 | yield self.simenv.timeout(self.processing_time) 205 | super(SimChainService, self).on_receive_newblockproposal(proto, proposal) 206 | 207 | self.simenv.process(process()) 208 | 209 | 210 | class AppMock(object): 211 | 212 | class Services(dict): 213 | pass 214 | 215 | def __init__(self, privkey, validators, simenv=None): 216 | self.config = copy.deepcopy(hdc_service.ChainService.default_config) 217 | self.config['db'] = dict(path='_db') 218 | self.config['data_dir'] = tempfile.mkdtemp() 219 | self.config['hdc']['validators'] = validators 220 | 221 | initial_alloc = dict((a, dict(wei=2 ** 200)) for a in validators) 222 | self.config['eth']['block']['GENESIS_INITIAL_ALLOC'] = initial_alloc 223 | 224 | self.simenv = simenv 225 | self.services = self.Services() 226 | self.services.db = EphemDB() 227 | self.services.accounts = AccountsService(self) 228 | self.services.peermanager = PeerManagerMock(self) 229 | account = Account.new(password='', key=privkey) 230 | self.services.accounts.add_account(account, store=False) 231 | if simenv: 232 | self.services.chainservice = SimChainService(self, simenv=simenv) 233 | else: 234 | self.services.chainservice = hdc_service.ChainService(self) 235 | self.isactive = True 236 | 237 | def __repr__(self): 238 | return '' % phx(self.services.chainservice.chain.coinbase) 239 | 240 | def add_peer(self, peer): 241 | if peer in self.services.peermanager.peers: 242 | return 243 | self.services.peermanager.peers.append(peer) 244 | proto = hdc_protocol.HDCProtocol(peer, self.services.chainservice) 245 | peer.protocol = proto 246 | return True 247 | 248 | def connect_app(self, other): 249 | log.debug('connecting', node=self, other=other) 250 | transport = Transport(self.simenv) 251 | p = PeerMock(self, transport) 252 | op = PeerMock(other, transport) 253 | p.peer = op 254 | op.peer = p 255 | if self.add_peer(p): 256 | self.services.chainservice.on_wire_protocol_start(p.protocol) 257 | if other.add_peer(op): 258 | other.services.chainservice.on_wire_protocol_start(op.protocol) 259 | 260 | 261 | class Network(object): 262 | 263 | starttime = None 264 | 265 | def __init__(self, num_nodes=2, simenv=None): 266 | if simenv: 267 | self.simenv = simpy.Environment() 268 | else: 269 | self.simenv = None 270 | privkeys = mk_privkeys(num_nodes) 271 | validators = [privtoaddr(p) for p in privkeys] 272 | self.nodes = [] 273 | for i in range(num_nodes): 274 | app = AppMock(privkeys[i], validators, self.simenv) 275 | self.nodes.append(app) 276 | 277 | def connect_nodes(self): 278 | # connect nodes 279 | for i, n in enumerate(self.nodes): 280 | for o in self.nodes[i + 1:]: 281 | if n.isactive and o.isactive: 282 | n.connect_app(o) 283 | 284 | def start(self): 285 | # start nodes 286 | for n in self.nodes: 287 | if n.isactive: 288 | cm = n.services.chainservice.consensus_manager 289 | if True or self.simenv: # set dummy ready!" 290 | cm.ready_validators = set(range(int(len(cm.contract.validators)))) 291 | assert cm.is_ready 292 | cm.process() 293 | 294 | def run(self, duration): 295 | if self.simenv: 296 | self.simenv.run(until=self.elapsed + duration) 297 | else: 298 | if not self.starttime: 299 | self.starttime = time.time() 300 | gevent.sleep(duration) 301 | 302 | @property 303 | def elapsed(self): 304 | if self.simenv: 305 | return self.simenv.now 306 | else: 307 | return time.time() - self.starttime 308 | 309 | def disable_validators(self, num): 310 | assert num <= len(self.nodes) 311 | for i in range(num): 312 | n = self.nodes[i] 313 | for p in n.services.peermanager.peers: 314 | p.transport = NoTransport(self.simenv) 315 | 316 | def throttle_validators(self, num): 317 | assert num <= len(self.nodes) 318 | for i in reversed(range(num)): 319 | n = self.nodes[i] 320 | for p in n.services.peermanager.peers: 321 | p.transport = SlowTransport(self.simenv) 322 | 323 | def normvariate_base_latencies(self, sigma_factor=0.5, base_latency=None): 324 | min_latency = 0.001 325 | for n in self.nodes: 326 | for p in n.services.peermanager.peers: 327 | p.base_latency = base_latency or p.base_latency 328 | sigma = p.base_latency * sigma_factor 329 | p.base_latency = max(min_latency, random.normalvariate(p.base_latency, sigma)) 330 | assert p.base_latency > 0 331 | 332 | def consensus_managers(self): 333 | return [n.services.chainservice.consensus_manager for n in self.nodes] 334 | 335 | def check_consistency(self): 336 | if self.simenv: 337 | elapsed = self.simenv.now 338 | else: 339 | elapsed = time.time() - self.starttime 340 | cs = self.consensus_managers() 341 | # check they are all on the same block or the previous one 342 | heights = Counter(c.chain.head.number for c in cs) 343 | height_distance = max(heights.keys()) - min(heights.keys()) 344 | max_height = height = max(heights) 345 | 346 | # check they are all using the same block 347 | while height > 0: 348 | bs = list(set(c.chain.index.get_block_by_number(height) for c in cs 349 | if c.chain.index.has_block_by_number(height))) 350 | assert len(bs) == 1 or (len(bs) == 2 and None in bs), bs 351 | height -= 1 352 | 353 | # highest round seen (i.e. number of failed proposers) 354 | max_rounds = 0 355 | for c in cs: 356 | blk = c.chain.head 357 | while blk.number > 0: 358 | p = c.load_proposal(blk.hash) 359 | max_rounds = max(max_rounds, p.signing_lockset.round) 360 | bh = c.chain.index.get_block_by_number(blk.number - 1) 361 | blk = c.chain.get(bh) 362 | assert isinstance(blk, Block) 363 | max_rounds += 1 364 | 365 | # messages 366 | ingress_bytes_transfered = 0 367 | egress_bytes_transfered = 0 368 | 369 | for n in self.nodes: 370 | for p in n.services.peermanager.peers: 371 | ingress_bytes_transfered += p.ingress_bytes 372 | egress_bytes_transfered += p.egress_bytes 373 | 374 | r = dict(max_height=max_height, 375 | max_rounds=max_rounds, 376 | heights=heights, 377 | ingress_bytes_transfered=ingress_bytes_transfered, 378 | egress_bytes_transfered=egress_bytes_transfered, 379 | elapsed=elapsed, 380 | height_distance=height_distance) 381 | log.debug('checked consistency', r=r) 382 | return r 383 | 384 | # funcs making assumptions on the return value of Network.check_consistency 385 | 386 | 387 | def assert_blocktime(r, max_avg_blocktime=1): 388 | print r 389 | assert r['elapsed'] / r['max_height'] < max_avg_blocktime 390 | 391 | 392 | def assert_maxrounds(r, max_rounds=1): 393 | assert max_rounds > 0 394 | assert r['max_rounds'] == max_rounds 395 | 396 | 397 | def assert_heightdistance(r, max_distance=0): 398 | assert r['max_height'] > 0 399 | assert r['height_distance'] <= max_distance 400 | 401 | 402 | def main(num_nodes=10, sim_duration=10, timeout=0.5, 403 | base_latency=0.05, latency_sigma_factor=0.5, 404 | num_faulty_nodes=3, num_slow_nodes=0): 405 | 406 | slogging.configure(config_string=':debug') 407 | 408 | orig_timeout = ConsensusManager.round_timeout 409 | ConsensusManager.round_timeout = timeout 410 | network = Network(num_nodes, simenv=True) 411 | network.connect_nodes() 412 | network.normvariate_base_latencies(latency_sigma_factor, base_latency) 413 | network.disable_validators(num_faulty_nodes) 414 | network.throttle_validators(num_slow_nodes) 415 | network.start() 416 | network.run(sim_duration) 417 | network.check_consistency() 418 | ConsensusManager.round_timeout = orig_timeout 419 | return network 420 | 421 | if __name__ == '__main__': 422 | num_nodes = 10 423 | faulty_fraction = 1 / 3. * 0 # nodes not sending anything 424 | # nodes sending votes and proposals at the edge of the timeout window 425 | slow_fraction = 1 / 3. * 0 426 | 427 | network = main(num_nodes=num_nodes, 428 | sim_duration=10, 429 | timeout=0.5, 430 | base_latency=0.05, 431 | latency_sigma_factor=0.5, 432 | num_faulty_nodes=int(num_nodes * faulty_fraction), 433 | num_slow_nodes=int(num_nodes * slow_fraction) 434 | ) 435 | -------------------------------------------------------------------------------- /hydrachain/consensus/synchronizer.py: -------------------------------------------------------------------------------- 1 | import gevent 2 | from .base import Proposal 3 | from .protocol import HDCProtocol 4 | 5 | 6 | class Synchronizer(object): 7 | 8 | timeout = 5 9 | max_getproposals_count = HDCProtocol.max_getproposals_count 10 | max_queued = 3 * max_getproposals_count 11 | 12 | def __init__(self, consensusmanager): 13 | self.cm = consensusmanager 14 | self.requested = set() 15 | self.received = set() 16 | self.last_active_protocol = None # last protocol (peer) which sent a proposal 17 | self.add_proposals_lock = gevent.lock.Semaphore() 18 | 19 | def __repr__(self): 20 | status = 'syncing' if self.is_syncing else 'insync' 21 | return '' \ 22 | % (status, len(self.missing), len(self.requested), len(self.received)) 23 | 24 | @property 25 | def is_syncing(self): 26 | return len(self.requested) 27 | 28 | @property 29 | def missing(self): 30 | ls = self.cm.highest_committing_lockset 31 | if not ls: 32 | return [] 33 | max_height = ls.height 34 | if not max_height or max_height <= self.cm.head.number: 35 | return [] 36 | return range(self.cm.head.number + 1, max_height + 1) 37 | 38 | def request(self): 39 | """ 40 | sync the missing blocks between: 41 | head 42 | highest height with signing lockset 43 | 44 | we get these locksets by collecting votes on all heights 45 | """ 46 | missing = self.missing 47 | self.cm.log('sync.request', missing=len(missing), requested=len(self.requested), 48 | received=len(self.received)) 49 | if self.requested: 50 | self.cm.log('waiting for requested') 51 | return 52 | if len(self.received) + self.max_getproposals_count >= self.max_queued: 53 | self.cm.log('queue is full') 54 | return 55 | if not missing: 56 | self.cm.log('insync') 57 | return 58 | if self.last_active_protocol is None: # FIXME, check if it is active 59 | self.cm.log('no active protocol', last_active_protocol=self.last_active_protocol) 60 | return 61 | self.cm.log('collecting') 62 | blocknumbers = [] 63 | for h in missing: 64 | if h not in self.received and h not in self.requested: 65 | blocknumbers.append(h) 66 | self.requested.add(h) 67 | if len(blocknumbers) == self.max_getproposals_count: 68 | break 69 | self.cm.log('collected', num=len(blocknumbers)) 70 | if not blocknumbers: 71 | return 72 | self.cm.log('requesting', num=len(blocknumbers), 73 | requesting_range=(blocknumbers[0], blocknumbers[-1])) 74 | self.last_active_protocol.send_getblockproposals(*blocknumbers) 75 | # setup alarm 76 | self.cm.chainservice.setup_alarm(self.timeout, self.on_alarm, blocknumbers) 77 | 78 | def on_proposal(self, proposal, proto): 79 | "called to inform about synced peers" 80 | assert isinstance(proto, HDCProtocol) 81 | assert isinstance(proposal, Proposal) 82 | if proposal.height >= self.cm.height: 83 | assert proposal.lockset.is_valid 84 | self.last_active_protocol = proto 85 | 86 | def on_alarm(self, requested): 87 | # remove requested, so they can be rerequested 88 | self.requested.difference_update(set(self.requested)) 89 | self.request() 90 | 91 | def receive_blockproposals(self, proposals): 92 | self.cm.log('receive_blockproposals', p=proposals, received=self.received) 93 | for p in proposals: 94 | self.received.add(p.height) 95 | self.requested.remove(p.height) 96 | for v in p.signing_lockset: # add all votes, so we have locksets ready for committing 97 | self.cm.add_vote(v) 98 | 99 | # commit after we added new votes to commit a block from the last sync 100 | self.cm.process() 101 | 102 | # request next round 103 | self.request() 104 | self.add_proposals_lock.acquire() 105 | for p in proposals: 106 | self.cm.add_proposal(p) 107 | self.cm.process() 108 | self.cleanup() 109 | self.add_proposals_lock.release() 110 | 111 | not_added = [] 112 | for p in proposals: 113 | if p.height > self.cm.head.number: 114 | not_added.append(p) 115 | if p.height == self.cm.head.number: 116 | assert p.blockhash in self.cm.block_candidates 117 | 118 | # print 'not added', not_added 119 | # print 'received', self.received 120 | # for h in self.cm.heights: 121 | # if self.cm.heights[h].last_quorum_lockset: 122 | # print 'quorum @', h 123 | 124 | assert self.cm.height >= max(p.height for p in proposals) 125 | 126 | self.cm.log('done receive_blockproposals', sync=self) 127 | if len(not_added) > 1: 128 | raise Exception('more than one proposal not added') 129 | 130 | def cleanup(self): 131 | height = self.cm.height 132 | for h in list(self.received): 133 | if h < height: 134 | self.received.remove(h) 135 | for h in list(self.requested): 136 | if h < height: 137 | self.requested.remove(h) 138 | 139 | def process(self): 140 | self.request() 141 | -------------------------------------------------------------------------------- /hydrachain/consensus/utils.py: -------------------------------------------------------------------------------- 1 | from sha3 import sha3_256 2 | from ethereum.utils import big_endian_to_int 3 | 4 | 5 | def sha3(seed): 6 | return sha3_256(bytes(seed)).digest() 7 | 8 | # colors 9 | 10 | FAIL = '\033[91m' 11 | ENDC = '\033[0m' 12 | BOLD = '\033[1m' 13 | UNDERLINE = '\033[4m' 14 | 15 | 16 | def DEBUG(*args, **kargs): 17 | print(FAIL + repr(args) + repr(kargs) + ENDC) 18 | 19 | colors = ['\033[9%dm' % i for i in range(0, 7)] 20 | colors += ['\033[4%dm' % i for i in range(1, 8)] 21 | num_colors = len(colors) 22 | 23 | 24 | def cstr(num, txt): 25 | if isinstance(num, bytes): 26 | num = big_endian_to_int(num) 27 | return '%s%s%s' % (colors[num % len(colors)], txt, ENDC) 28 | 29 | 30 | def cprint(num, txt): 31 | print cstr(num, txt) 32 | 33 | 34 | def phx(x): 35 | return x.encode('hex')[:8] 36 | 37 | if __name__ == '__main__': 38 | for i in range(len(colors)): 39 | cprint(i, 'test') 40 | -------------------------------------------------------------------------------- /hydrachain/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HydraChain/hydrachain/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/examples/__init__.py -------------------------------------------------------------------------------- /hydrachain/examples/native/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HydraChain/hydrachain/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/examples/native/__init__.py -------------------------------------------------------------------------------- /hydrachain/examples/native/fungible/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HydraChain/hydrachain/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/examples/native/fungible/__init__.py -------------------------------------------------------------------------------- /hydrachain/examples/native/fungible/fungible_contract.py: -------------------------------------------------------------------------------- 1 | import ethereum.utils as utils 2 | import ethereum.slogging as slogging 3 | import hydrachain.native_contracts as nc 4 | from hydrachain.nc_utils import isaddress, STATUS, FORBIDDEN, OK, INSUFFICIENTFUNDS 5 | log = slogging.get_logger('contracts.fungible') 6 | 7 | 8 | class Transfer(nc.ABIEvent): 9 | 10 | """Triggered when tokens are transferred.""" 11 | args = [dict(name='from', type='address', indexed=True), 12 | dict(name='to', type='address', indexed=True), 13 | dict(name='value', type='uint256', indexed=True)] 14 | 15 | 16 | class Approval(nc.ABIEvent): 17 | 18 | """Triggered when Fungible.approved is called.""" 19 | args = [dict(name='owner', type='address', indexed=True), 20 | dict(name='spender', type='address', indexed=True), 21 | dict(name='value', type='uint256', indexed=True)] 22 | 23 | 24 | class Fungible(nc.NativeContract): 25 | 26 | """ 27 | based on 28 | https://github.com/ethereum/wiki/wiki/Standardized_Contract_APIs 29 | """ 30 | address = utils.int_to_addr(5000) 31 | events = [Transfer, Approval] 32 | 33 | owner = nc.Scalar('address') 34 | supply = nc.Scalar('uint256') 35 | # mapping (address => uint256) 36 | # here mapping betw address => balances 37 | accounts = nc.IterableDict('uint256') 38 | allowances = nc.Dict(nc.Dict('uint256')) 39 | 40 | def init(ctx, _supply='uint256', returns=STATUS): 41 | log.DEV('In Fungible.init') 42 | if isaddress(ctx.owner): 43 | return FORBIDDEN 44 | ctx.owner = ctx.tx_origin 45 | ctx.accounts[ctx.tx_origin] = _supply 46 | ctx.supply = _supply 47 | return OK 48 | 49 | def transfer(ctx, _to='address', _value='uint256', returns=STATUS): 50 | """ Standardized Contract API: 51 | function transfer(address _to, uint256 _value) returns (bool _success) 52 | """ 53 | log.DEV('In Fungible.transfer') 54 | if ctx.accounts[ctx.msg_sender] >= _value: 55 | ctx.accounts[ctx.msg_sender] -= _value 56 | ctx.accounts[_to] += _value 57 | ctx.Transfer(ctx.msg_sender, _to, _value) 58 | return OK 59 | else: 60 | return INSUFFICIENTFUNDS 61 | 62 | def transferFrom(ctx, _from='address', _to='address', _value='uint256', returns=STATUS): 63 | """ Standardized Contract API: 64 | function transferFrom(address _from, address _to, uint256 _value) returns (bool success) 65 | """ 66 | auth = ctx.allowances[_from][ctx.msg_sender] 67 | if ctx.accounts[_from] >= _value and auth >= _value: 68 | ctx.allowances[_from][ctx.msg_sender] -= _value 69 | ctx.accounts[_from] -= _value 70 | ctx.accounts[_to] += _value 71 | ctx.Transfer(_from, _to, _value) 72 | return OK 73 | else: 74 | return INSUFFICIENTFUNDS 75 | 76 | @nc.constant 77 | def totalSupply(ctx, returns='uint256'): 78 | """ Standardized Contract API: 79 | function totalSupply() constant returns (uint256 supply) 80 | """ 81 | return ctx.supply 82 | 83 | @nc.constant 84 | def balanceOf(ctx, _address='address', returns='uint256'): 85 | """ Standardized Contract API: 86 | function balanceOf(address _address) constant returns (uint256 balance) 87 | """ 88 | return ctx.accounts[_address] 89 | 90 | def approve(ctx, _spender='address', _value='uint256', returns=STATUS): 91 | """ Standardized Contract API: 92 | function approve(address _spender, uint256 _value) returns (bool success) 93 | """ 94 | ctx.allowances[ctx.msg_sender][_spender] += _value 95 | ctx.Approval(ctx.msg_sender, _spender, _value) 96 | return OK 97 | 98 | @nc.constant 99 | def allowance(ctx, _spender='address', returns='uint256'): 100 | """ Standardized Contract API: 101 | function allowance(address _owner, address _spender) constant returns (uint256 remaining) 102 | """ 103 | return ctx.allowances[ctx.msg_sender][_spender] 104 | 105 | @nc.constant 106 | def allowanceFrom(ctx, _from='address', _spender='address', returns='uint256'): 107 | return ctx.allowances[_from][_spender] 108 | 109 | # Other Functions 110 | @nc.constant 111 | def get_creator(ctx, returns='address'): 112 | return ctx.owner 113 | 114 | @nc.constant 115 | def num_accounts(ctx, returns='uint32'): 116 | return len(ctx.accounts) 117 | 118 | @nc.constant 119 | def get_accounts(ctx, returns='address[]'): 120 | return list(ctx.accounts.keys()) 121 | 122 | 123 | class Token(Fungible): 124 | address = utils.int_to_addr(5001) 125 | 126 | 127 | class Coin(Fungible): 128 | address = utils.int_to_addr(5002) 129 | 130 | 131 | class Currency(Fungible): 132 | address = utils.int_to_addr(5003) 133 | 134 | 135 | class Issuance(nc.ABIEvent): 136 | 137 | "Triggered when IOU.issue is called." 138 | args = [dict(name='issuer', type='address', indexed=True), 139 | dict(name='rtgs_hash', type='bytes32', indexed=True), 140 | dict(name='amount', type='uint256', indexed=True)] 141 | 142 | 143 | class IOU(Fungible): 144 | """ 145 | IOU fungible, can Issue its supply 146 | """ 147 | 148 | address = utils.int_to_addr(5004) 149 | events = [Transfer, Approval, Issuance] 150 | issued_amounts = nc.IterableDict('uint256') 151 | 152 | def init(ctx, returns=STATUS): 153 | log.DEV('In IOU.init') 154 | return super(IOU, ctx).init(0) 155 | 156 | def issue_funds(ctx, amount='uint256', rtgs_hash='bytes32', returns=STATUS): 157 | "In the IOU fungible the supply is set by Issuer, who issue funds." 158 | # allocate new issue as result of a new cash entry 159 | ctx.accounts[ctx.msg_sender] += amount 160 | ctx.issued_amounts[ctx.msg_sender] += amount 161 | # Store hash(rtgs) 162 | ctx.Issuance(ctx.msg_sender, rtgs_hash, amount) 163 | return OK 164 | 165 | # Other Functions 166 | @nc.constant 167 | def get_issued_amount(ctx, issuer='address', returns='uint256'): 168 | return ctx.issued_amounts[issuer] 169 | -------------------------------------------------------------------------------- /hydrachain/examples/native/fungible/test_fungible_contract.py: -------------------------------------------------------------------------------- 1 | from ethereum import tester 2 | import hydrachain.native_contracts as nc 3 | from fungible_contract import Fungible, Transfer, Approval 4 | import ethereum.slogging as slogging 5 | log = slogging.get_logger('test.fungible') 6 | 7 | 8 | def test_fungible_instance(): 9 | state = tester.state() 10 | creator_address = tester.a0 11 | creator_key = tester.k0 12 | 13 | nc.registry.register(Fungible) 14 | 15 | # Create proxy 16 | EUR_address = nc.tester_create_native_contract_instance(state, creator_key, Fungible) 17 | fungible_as_creator = nc.tester_nac(state, creator_key, EUR_address) 18 | # Initalize fungible with a fixed quantity of fungibles. 19 | fungible_total = 1000000 20 | fungible_as_creator.init(fungible_total) 21 | assert fungible_as_creator.balanceOf(creator_address) == fungible_total 22 | nc.registry.unregister(Fungible) 23 | 24 | 25 | def test_fungible_template(): 26 | """ 27 | Tests; 28 | Fungible initialization as Creator, 29 | Creator sends Fungibles to Alice, 30 | Alice sends Fungibles to Bob, 31 | Bob approves Creator to spend Fungibles on his behalf, 32 | Creator allocates these Fungibles from Bob to Alice, 33 | Testing of non-standardized functions of the Fungible contract. 34 | Events; 35 | Checking logs from Transfer and Approval Events 36 | """ 37 | 38 | # Register Contract Fungible 39 | nc.registry.register(Fungible) 40 | 41 | # Initialize Participants and Fungible contract 42 | state = tester.state() 43 | logs = [] 44 | creator_address = tester.a0 45 | creator_key = tester.k0 46 | alice_address = tester.a1 47 | alice_key = tester.k1 48 | bob_address = tester.a2 49 | bob_key = tester.k2 50 | # Create proxy 51 | nc.listen_logs(state, Transfer, callback=lambda e: logs.append(e)) 52 | nc.listen_logs(state, Approval, callback=lambda e: logs.append(e)) 53 | fungible_as_creator = nc.tester_nac(state, creator_key, Fungible.address) 54 | # Initalize fungible with a fixed quantity of fungibles. 55 | fungible_total = 1000000 56 | fungible_as_creator.init(fungible_total) 57 | assert fungible_as_creator.balanceOf(creator_address) == fungible_total 58 | 59 | # Creator transfers Fungibles to Alice 60 | send_amount_alice = 700000 61 | fungible_as_creator.transfer(alice_address, send_amount_alice) 62 | assert fungible_as_creator.balanceOf(creator_address) == fungible_total - send_amount_alice 63 | assert fungible_as_creator.balanceOf(alice_address) == send_amount_alice 64 | # Check logs data of Transfer Event 65 | assert len(logs) == 1 66 | l = logs[0] 67 | assert l['event_type'] == 'Transfer' 68 | assert l['from'] == creator_address 69 | assert l['to'] == alice_address 70 | # Build transaction Log arguments and check sent amount 71 | assert l['value'] == send_amount_alice 72 | 73 | # Alice transfers Fungibles to Bob 74 | send_amount_bob = 400000 75 | # Create proxy for Alice 76 | fungible_as_alice = nc.tester_nac(state, alice_key, Fungible.address) 77 | fungible_as_alice.transfer(bob_address, send_amount_bob) 78 | # Test balances of Creator, Alice and Bob 79 | creator_balance = fungible_total - send_amount_alice 80 | alice_balance = send_amount_alice - send_amount_bob 81 | bob_balance = send_amount_bob 82 | assert fungible_as_alice.balanceOf(creator_address) == creator_balance 83 | assert fungible_as_alice.balanceOf(alice_address) == alice_balance 84 | assert fungible_as_alice.balanceOf(bob_address) == bob_balance 85 | 86 | # Create proxy for Bob 87 | fungible_as_bob = nc.tester_nac(state, bob_key, Fungible.address) 88 | approved_amount_bob = 100000 89 | assert fungible_as_bob.allowance(creator_address) == 0 90 | # Bob approves Creator to spend Fungibles 91 | assert fungible_as_bob.allowance(creator_address) == 0 92 | fungible_as_bob.approve(creator_address, approved_amount_bob) 93 | assert fungible_as_bob.allowance(creator_address) == approved_amount_bob 94 | 95 | # Test transferFrom function, i.e. direct debit. 96 | fungible_as_creator.transferFrom(bob_address, alice_address, approved_amount_bob) 97 | # Test balances 98 | alice_balance += approved_amount_bob 99 | bob_balance -= approved_amount_bob 100 | assert fungible_as_alice.balanceOf(creator_address) == creator_balance 101 | assert fungible_as_alice.balanceOf(alice_address) == alice_balance 102 | assert fungible_as_alice.balanceOf(bob_address) == bob_balance 103 | # Check logs data of Transfer Event 104 | assert len(logs) == 4 105 | l = logs[-1] 106 | assert l['event_type'] == 'Transfer' 107 | assert l['from'] == bob_address 108 | assert l['to'] == alice_address 109 | # Build transaction Log arguments and check sent amount 110 | assert l['value'] == approved_amount_bob 111 | 112 | # Testing account information 113 | # Now we should have three Fungible accounts 114 | assert 3 == fungible_as_alice.num_accounts() 115 | r = fungible_as_creator.get_creator() 116 | assert r == creator_address 117 | r = fungible_as_creator.get_accounts() 118 | assert set(r) == set([creator_address, alice_address, bob_address]) 119 | 120 | print logs 121 | while logs and logs.pop(): 122 | pass 123 | 124 | nc.registry.unregister(Fungible) 125 | -------------------------------------------------------------------------------- /hydrachain/examples/native/fungible/test_iou_contract.py: -------------------------------------------------------------------------------- 1 | from ethereum import tester 2 | import hydrachain.native_contracts as nc 3 | from fungible_contract import IOU 4 | import ethereum.slogging as slogging 5 | log = slogging.get_logger('test.iou') 6 | 7 | 8 | def test_iou_template(): 9 | """ 10 | Tests; 11 | IOU initialization as Issuer, 12 | Testing issue funds, get_issued_amount 13 | """ 14 | 15 | # Register Contract Fungible 16 | nc.registry.register(IOU) 17 | 18 | # Initialize Participants and Fungible contract 19 | state = tester.state() 20 | logs = [] 21 | issuer_address = tester.a0 22 | issuer_key = tester.k0 23 | # create listeners 24 | for evt_class in IOU.events: 25 | nc.listen_logs(state, evt_class, callback=lambda e: logs.append(e)) 26 | 27 | # Initialization 28 | iou_address = nc.tester_create_native_contract_instance(state, issuer_key, IOU) 29 | iou_as_issuer = nc.tester_nac(state, issuer_key, iou_address) 30 | iou_as_issuer.init() 31 | assert iou_as_issuer.balanceOf(issuer_address) == 0 32 | amount_issued = 200000 33 | iou_as_issuer.issue_funds(amount_issued, '') 34 | assert iou_as_issuer.balanceOf(issuer_address) == amount_issued 35 | 36 | iou_as_issuer.issue_funds(amount_issued, '') 37 | assert iou_as_issuer.balanceOf(issuer_address) == 2 * amount_issued 38 | assert iou_as_issuer.get_issued_amount(issuer_address) == 2 * amount_issued 39 | 40 | print logs 41 | while logs and logs.pop(): 42 | pass 43 | 44 | nc.registry.unregister(IOU) 45 | -------------------------------------------------------------------------------- /hydrachain/hdc_service.py: -------------------------------------------------------------------------------- 1 | import time 2 | from ethereum.config import Env 3 | from ethereum.utils import sha3 4 | import rlp 5 | from rlp.utils import encode_hex 6 | from ethereum import processblock 7 | from ethereum.slogging import get_logger 8 | from ethereum.chain import Chain 9 | from ethereum.refcount_db import RefcountDB 10 | from ethereum.blocks import Block, VerificationFailed 11 | from ethereum.transactions import Transaction 12 | from devp2p.service import WiredService 13 | from ethereum import config as ethereum_config 14 | import gevent 15 | import gevent.lock 16 | from collections import deque 17 | from gevent.queue import Queue 18 | from pyethapp.eth_service import ChainService as eth_ChainService 19 | from .consensus.protocol import HDCProtocol, HDCProtocolError 20 | from .consensus.base import (Signed, VotingInstruction, BlockProposal, VoteBlock, VoteNil, 21 | HDCBlockHeader, LockSet, Ready) 22 | from .consensus.utils import phx 23 | from .consensus.manager import ConsensusManager 24 | from .consensus.contract import ConsensusContract 25 | 26 | 27 | log = get_logger('hdc.chainservice') 28 | 29 | 30 | # patch to get context switches between tx replay 31 | processblock_apply_transaction = processblock.apply_transaction 32 | 33 | 34 | def apply_transaction(block, tx): 35 | # import traceback 36 | # print traceback.print_stack() 37 | log.debug('apply_transaction ctx switch', tx=tx.hash.encode('hex')[:8]) 38 | gevent.sleep(0.0000001) 39 | return processblock_apply_transaction(block, tx) 40 | # processblock.apply_transaction = apply_transaction 41 | 42 | 43 | rlp_hash_hex = lambda data: encode_hex(sha3(rlp.encode(data))) 44 | 45 | 46 | class DuplicatesFilter(object): 47 | 48 | def __init__(self, max_items=1024): 49 | self.max_items = max_items 50 | self.filter = list() 51 | 52 | def update(self, data): 53 | "returns True if unknown" 54 | if data not in self.filter: 55 | self.filter.append(data) 56 | if len(self.filter) > self.max_items: 57 | self.filter.pop(0) 58 | return True 59 | else: 60 | self.filter.append(self.filter.pop(0)) 61 | return False 62 | 63 | def __contains__(self, v): 64 | return v in self.filter 65 | 66 | 67 | def update_watcher(chainservice): 68 | timeout = 180 69 | d = dict(head=chainservice.chain.head) 70 | 71 | def up(b): 72 | log.debug('watcher head updated') 73 | d['head'] = b 74 | chainservice.on_new_head_cbs.append(lambda b: up(b)) 75 | 76 | while True: 77 | last = d['head'] 78 | gevent.sleep(timeout) 79 | assert last != d['head'], 'no updates for %d secs' % timeout 80 | 81 | 82 | class ProposalLock(gevent.lock.BoundedSemaphore): 83 | 84 | def __init__(self): 85 | super(ProposalLock, self).__init__() 86 | self.block = None 87 | 88 | def is_locked(self): 89 | return self.locked() 90 | 91 | def acquire(self): 92 | log.debug('trying to acquire', lock=self) 93 | super(ProposalLock, self).acquire() 94 | log.debug('acquired', lock=self) 95 | 96 | @property 97 | def height(self): 98 | if self.block: 99 | return self.block.number 100 | 101 | def release(self, if_block=-1): 102 | assert self.is_locked() 103 | log.debug('in ProposalLock.relase', lock=self, if_block=if_block, block=self.block) 104 | if if_block != -1 and self.block and if_block != self.block: 105 | log.debug('could not release', lock=self) 106 | return 107 | self.block = None 108 | super(ProposalLock, self).release() 109 | log.debug('released', lock=self) 110 | 111 | def __repr__(self): 112 | return ''.format(self.block, self.is_locked(), id(self)) 113 | 114 | __str__ = __repr__ 115 | 116 | 117 | class ChainService(eth_ChainService): 118 | 119 | """ 120 | Manages the chain and requests to it. 121 | """ 122 | # required by BaseService 123 | name = 'chain' 124 | default_config = dict(eth=dict(network_id=0, 125 | genesis='', 126 | pruning=-1, 127 | block=ethereum_config.default_config), 128 | hdc=dict(validators=[]), 129 | ) 130 | 131 | # required by WiredService 132 | wire_protocol = HDCProtocol # create for each peer 133 | 134 | # initialized after configure: 135 | chain = None 136 | genesis = None 137 | synchronizer = None 138 | config = None 139 | block_queue_size = 1024 140 | transaction_queue_size = 1024 141 | processed_gas = 0 142 | processed_elapsed = 0 143 | min_block_time = 1. # time we try to wait for more transactions after the first 144 | 145 | def __init__(self, app): 146 | self.config = app.config 147 | sce = self.config['eth'] 148 | if int(sce['pruning']) >= 0: 149 | self.db = RefcountDB(app.services.db) 150 | if "I am not pruning" in self.db.db: 151 | raise Exception("This database was initialized as non-pruning." 152 | " Kinda hard to start pruning now.") 153 | self.db.ttl = int(sce['pruning']) 154 | self.db.db.put("I am pruning", "1") 155 | else: 156 | self.db = app.services.db 157 | if "I am pruning" in self.db: 158 | raise Exception("This database was initialized as pruning." 159 | " Kinda hard to stop pruning now.") 160 | self.db.put("I am not pruning", "1") 161 | 162 | if 'network_id' in self.db: 163 | db_network_id = self.db.get('network_id') 164 | if db_network_id != str(sce['network_id']): 165 | raise Exception("This database was initialized with network_id {} " 166 | "and can not be used when connecting to network_id {}".format( 167 | db_network_id, sce['network_id']) 168 | ) 169 | 170 | else: 171 | self.db.put('network_id', str(sce['network_id'])) 172 | self.db.commit() 173 | 174 | assert self.db is not None 175 | 176 | WiredService.__init__(self, app) 177 | log.info('initializing chain') 178 | coinbase = app.services.accounts.coinbase 179 | env = Env(self.db, sce['block']) 180 | self.chain = Chain(env, new_head_cb=self._on_new_head, coinbase=coinbase) 181 | 182 | log.info('chain at', number=self.chain.head.number) 183 | if 'genesis_hash' in sce: 184 | assert sce['genesis_hash'] == self.chain.genesis.hex_hash(), \ 185 | "Unexpected genesis hash.\n Have: {}\n Expected: {}".format( 186 | self.chain.genesis.hex_hash(), sce['genesis_hash']) 187 | 188 | self.transaction_queue = Queue(maxsize=self.transaction_queue_size) 189 | self.add_blocks_lock = False 190 | self.add_transaction_lock = gevent.lock.BoundedSemaphore() 191 | self.broadcast_filter = DuplicatesFilter() 192 | self.on_new_head_cbs = [] 193 | self.on_new_head_candidate_cbs = [] 194 | self.newblock_processing_times = deque(maxlen=1000) 195 | 196 | # Consensus 197 | validators = validators_from_config(self.config['hdc']['validators']) 198 | self.consensus_contract = ConsensusContract(validators=validators) 199 | self.consensus_manager = ConsensusManager(self, self.consensus_contract, 200 | self.consensus_privkey) 201 | 202 | # lock blocks that where proposed, so they don't get mutated 203 | self.proposal_lock = ProposalLock() 204 | assert not self.proposal_lock.is_locked() 205 | 206 | def start(self): 207 | super(ChainService, self).start() 208 | self.consensus_manager.process() 209 | gevent.spawn(self.announce) 210 | 211 | def announce(self): 212 | while not self.consensus_manager.is_ready: 213 | self.consensus_manager.send_ready() 214 | gevent.sleep(0.5) 215 | 216 | # interface accessed by ConensusManager 217 | 218 | def log(self, msg, *args, **kargs): 219 | log.debug(msg, *args, **kargs) 220 | 221 | @property 222 | def consensus_privkey(self): 223 | return self.app.services.accounts[0].privkey 224 | 225 | def sign(self, obj): 226 | assert isinstance(obj, Signed) 227 | obj.sign(self.consensus_privkey) 228 | 229 | @property 230 | def now(self): 231 | return time.time() 232 | 233 | def setup_alarm(self, delay, cb, *args): 234 | log.debug('setting up alarm') 235 | 236 | def _trigger(): 237 | gevent.sleep(delay) 238 | log.debug('alarm triggered') 239 | cb(*args) 240 | 241 | gevent.spawn(_trigger) 242 | 243 | def setup_transaction_alarm(self, cb, *args): 244 | log.debug('setting up tx alarm') 245 | 246 | class Trigger(object): 247 | 248 | def __call__(me, blk): 249 | self.on_new_head_candidate_cbs.remove(me) 250 | log.debug('transaction alarm triggered') 251 | 252 | def do_trigger_delayed(): 253 | gevent.sleep(seconds=self.min_block_time) 254 | log.debug('transaction alarm calling cbs') 255 | cb(*args) 256 | gevent.spawn(do_trigger_delayed) 257 | 258 | self.on_new_head_candidate_cbs.append(Trigger()) 259 | 260 | def commit_block(self, blk): 261 | assert isinstance(blk.header, HDCBlockHeader) 262 | log.debug('trying to acquire transaction lock') 263 | self.add_transaction_lock.acquire() 264 | success = self.chain.add_block(blk, forward_pending_transactions=True) 265 | self.add_transaction_lock.release() 266 | log.debug('transaction lock release') 267 | log.info('new head', head=self.chain.head) 268 | return success 269 | 270 | def link_block(self, t_block): 271 | assert isinstance(t_block.header, HDCBlockHeader) 272 | self.add_transaction_lock.acquire() 273 | block = self._link_block(t_block) 274 | if not block: 275 | return 276 | assert block.get_parent() == self.chain.head, (block.get_parent(), self.chain.head) 277 | assert block.header.coinbase == t_block.header.coinbase 278 | self.add_transaction_lock.release() 279 | return block 280 | 281 | def _link_block(self, t_block): 282 | assert isinstance(t_block.header, HDCBlockHeader) 283 | if t_block.header.hash in self.chain: 284 | log.warn('known block', block=t_block) 285 | return 286 | if t_block.header.prevhash not in self.chain: 287 | log.warn('missing parent', block=t_block, head=self.chain.head, 288 | prevhash=phx(t_block.header.prevhash)) 289 | return 290 | if isinstance(t_block, Block): 291 | return True # already deserialized 292 | try: # deserialize 293 | st = time.time() 294 | block = t_block.to_block(env=self.chain.env) 295 | elapsed = time.time() - st 296 | log.debug('deserialized', elapsed='%.4fs' % elapsed, ts=time.time(), 297 | gas_used=block.gas_used, gpsec=self.gpsec(block.gas_used, elapsed)) 298 | assert block.header.check_pow() 299 | except processblock.InvalidTransaction as e: 300 | log.warn('invalid transaction', block=t_block, error=e, FIXME='ban node') 301 | return 302 | except VerificationFailed as e: 303 | log.warn('verification failed', error=e, FIXME='ban node') 304 | return 305 | return block 306 | 307 | def add_transaction(self, tx, origin=None, force_broadcast=False): 308 | """ 309 | Warning: 310 | Locking proposal_lock may block incoming events which are necessary to unlock! 311 | I.e. votes / blocks! 312 | Take care! 313 | """ 314 | self.consensus_manager.log( 315 | 'add_transaction', blk=self.chain.head_candidate, lock=self.proposal_lock) 316 | log.debug('add_transaction', lock=self.proposal_lock) 317 | block = self.proposal_lock.block 318 | self.proposal_lock.acquire() 319 | self.consensus_manager.log('add_transaction acquired lock', lock=self.proposal_lock) 320 | assert not hasattr(self.chain.head_candidate, 'should_be_locked') 321 | success = super(ChainService, self).add_transaction(tx, origin, force_broadcast) 322 | if self.proposal_lock.is_locked(): # can be unlock if we are at a new block 323 | self.proposal_lock.release(if_block=block) 324 | log.debug('added transaction', num_txs=self.chain.head_candidate.num_transactions()) 325 | return success 326 | 327 | def _on_new_head(self, blk): 328 | self.release_proposal_lock(blk) 329 | super(ChainService, self)._on_new_head(blk) 330 | 331 | def set_proposal_lock(self, blk): 332 | log.debug('set_proposal_lock', locked=self.proposal_lock) 333 | if not self.proposal_lock.is_locked(): 334 | self.proposal_lock.acquire() 335 | self.proposal_lock.block = blk 336 | assert self.proposal_lock.is_locked() # can not be aquired 337 | log.debug('did set_proposal_lock', lock=self.proposal_lock) 338 | 339 | def release_proposal_lock(self, blk): 340 | log.debug('releasing proposal_lock', lock=self.proposal_lock) 341 | if self.proposal_lock.is_locked(): 342 | if self.proposal_lock.height <= blk.number: 343 | assert self.chain.head_candidate.number > self.proposal_lock.height 344 | assert not hasattr(self.chain.head_candidate, 'should_be_locked') 345 | assert not isinstance(self.chain.head_candidate.header, HDCBlockHeader) 346 | self.proposal_lock.release() 347 | log.debug('released') 348 | assert not self.proposal_lock.is_locked() 349 | else: 350 | log.debug('could not release', head=blk, lock=self.proposal_lock) 351 | 352 | ############################################################################### 353 | 354 | @property 355 | def is_syncing(self): 356 | return self.consensus_manager.synchronizer.is_syncing 357 | 358 | @property 359 | def is_mining(self): 360 | return self.chain.coinbase in self.config['hdc']['validators'] 361 | 362 | # wire protocol receivers ########### 363 | 364 | # transactions 365 | 366 | def on_receive_transactions(self, proto, transactions): 367 | "receives rlp.decoded serialized" 368 | log.debug('----------------------------------') 369 | log.debug('remote_transactions_received', count=len(transactions), remote_id=proto) 370 | 371 | def _add_txs(): 372 | for tx in transactions: 373 | self.add_transaction(tx, origin=proto) 374 | gevent.spawn(_add_txs) # so the locks in add_transaction won't lock the connection 375 | 376 | # blocks / proposals ################ 377 | 378 | def on_receive_getblockproposals(self, proto, blocknumbers): 379 | log.debug('----------------------------------') 380 | log.debug("on_receive_getblockproposals", count=len(blocknumbers)) 381 | found = [] 382 | for i, height in enumerate(blocknumbers): 383 | if i == self.wire_protocol.max_getproposals_count: 384 | break 385 | assert isinstance(height, int) # integers 386 | assert i == 0 or height > blocknumbers[i - 1] # sorted 387 | if height > self.chain.head.number: 388 | log.debug("unknown block requested", height=height) 389 | break 390 | rlp_data = self.consensus_manager.get_blockproposal_rlp_by_height(height) 391 | assert isinstance(rlp_data, bytes) 392 | found.append(rlp_data) 393 | if found: 394 | log.debug("found", count=len(found)) 395 | proto.send_blockproposals(*found) 396 | 397 | def on_receive_blockproposals(self, proto, proposals): 398 | log.debug('----------------------------------') 399 | self.consensus_manager.log('received proposals', sender=proto) 400 | log.debug("recv proposals", num=len(proposals), remote_id=proto) 401 | self.consensus_manager.synchronizer.receive_blockproposals(proposals) 402 | 403 | def on_receive_newblockproposal(self, proto, proposal): 404 | if proposal.hash in self.broadcast_filter: 405 | return 406 | log.debug('----------------------------------') 407 | self.consensus_manager.log('receive proposal', sender=proto) 408 | log.debug("recv newblockproposal", proposal=proposal, remote_id=proto) 409 | # self.synchronizer.receive_newproposal(proto, proposal) 410 | assert isinstance(proposal, BlockProposal) 411 | assert isinstance(proposal.block.header, HDCBlockHeader) 412 | isvalid = self.consensus_manager.add_proposal(proposal, proto) 413 | if isvalid: 414 | self.broadcast(proposal, origin=proto) 415 | self.consensus_manager.process() 416 | 417 | def on_receive_votinginstruction(self, proto, votinginstruction): 418 | if votinginstruction.hash in self.broadcast_filter: 419 | return 420 | log.debug('----------------------------------') 421 | log.debug("recv votinginstruction", proposal=votinginstruction, remote_id=proto) 422 | # self.synchronizer.receive_newproposal(proto, proposal) 423 | isvalid = self.consensus_manager.add_proposal(votinginstruction, proto) 424 | if isvalid: 425 | self.broadcast(votinginstruction, origin=proto) 426 | 427 | self.consensus_manager.process() 428 | 429 | # votes 430 | 431 | def on_receive_vote(self, proto, vote): 432 | self.consensus_manager.log('on_receive_vote', v=vote) 433 | if vote.hash in self.broadcast_filter: 434 | log.debug('filtered!!!') 435 | return 436 | log.debug('----------------------------------') 437 | log.debug("recv vote", vote=vote, remote_id=proto) 438 | isvalid = self.consensus_manager.add_vote(vote, proto) 439 | if isvalid: 440 | self.broadcast(vote, origin=proto) 441 | self.consensus_manager.process() 442 | 443 | def on_receive_ready(self, proto, ready): 444 | if ready.hash in self.broadcast_filter: 445 | return 446 | log.debug('----------------------------------') 447 | log.debug("recv ready", ready=ready, remote_id=proto) 448 | self.consensus_manager.add_ready(ready, proto) 449 | self.broadcast(ready, origin=proto) 450 | self.consensus_manager.process() 451 | 452 | # start 453 | 454 | def on_receive_status(self, proto, eth_version, network_id, genesis_hash, current_lockset): 455 | log.debug('----------------------------------') 456 | log.debug('status received', proto=proto, eth_version=eth_version) 457 | assert eth_version == proto.version, (eth_version, proto.version) 458 | if network_id != self.config['eth'].get('network_id', proto.network_id): 459 | log.warn("invalid network id", remote_network_id=network_id, 460 | expected_network_id=self.config['eth'].get('network_id', proto.network_id)) 461 | raise HDCProtocolError('wrong network_id') 462 | 463 | # check genesis 464 | if genesis_hash != self.chain.genesis.hash: 465 | log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex')) 466 | raise HDCProtocolError('wrong genesis block') 467 | 468 | assert isinstance(current_lockset, LockSet) 469 | if len(current_lockset): 470 | log.debug('adding received lockset', ls=current_lockset) 471 | for v in current_lockset.votes: 472 | self.consensus_manager.add_vote(v, proto) 473 | 474 | self.consensus_manager.process() 475 | 476 | # send last BlockProposal 477 | p = self.consensus_manager.last_blockproposal 478 | if p: 479 | log.debug('sending proposal', p=p) 480 | proto.send_newblockproposal(p) 481 | 482 | # send transactions 483 | transactions = self.chain.get_transactions() 484 | if transactions: 485 | log.debug("sending transactions", remote_id=proto) 486 | proto.send_transactions(*transactions) 487 | 488 | def on_wire_protocol_start(self, proto): 489 | log.debug('----------------------------------') 490 | log.debug('on_wire_protocol_start', proto=proto) 491 | assert isinstance(proto, self.wire_protocol) 492 | # register callbacks 493 | proto.receive_status_callbacks.append(self.on_receive_status) 494 | proto.receive_transactions_callbacks.append(self.on_receive_transactions) 495 | proto.receive_blockproposals_callbacks.append(self.on_receive_blockproposals) 496 | proto.receive_getblockproposals_callbacks.append(self.on_receive_getblockproposals) 497 | proto.receive_newblockproposal_callbacks.append(self.on_receive_newblockproposal) 498 | proto.receive_votinginstruction_callbacks.append(self.on_receive_votinginstruction) 499 | proto.receive_vote_callbacks.append(self.on_receive_vote) 500 | proto.receive_ready_callbacks.append(self.on_receive_ready) 501 | 502 | # send status 503 | proto.send_status(genesis_hash=self.chain.genesis.hash, 504 | current_lockset=self.consensus_manager.active_round.lockset) 505 | 506 | def on_wire_protocol_stop(self, proto): 507 | assert isinstance(proto, self.wire_protocol) 508 | log.debug('----------------------------------') 509 | log.debug('on_wire_protocol_stop', proto=proto) 510 | 511 | def broadcast(self, obj, origin=None): 512 | """ 513 | """ 514 | fmap = {BlockProposal: 'newblockproposal', VoteBlock: 'vote', VoteNil: 'vote', 515 | VotingInstruction: 'votinginstruction', Transaction: 'transactions', 516 | Ready: 'ready'} 517 | if self.broadcast_filter.update(obj.hash) is False: 518 | log.debug('already broadcasted', obj=obj) 519 | return 520 | if isinstance(obj, BlockProposal): 521 | assert obj.sender == obj.block.header.coinbase 522 | log.debug('broadcasting', obj=obj, origin=origin) 523 | bcast = self.app.services.peermanager.broadcast 524 | bcast(HDCProtocol, fmap[type(obj)], args=(obj,), 525 | exclude_peers=[origin.peer] if origin else []) 526 | 527 | broadcast_transaction = broadcast 528 | 529 | 530 | def validators_from_config(validators): 531 | """Consolidate (potentially hex-encoded) list of validators 532 | into list of binary address representations. 533 | """ 534 | result = [] 535 | for validator in validators: 536 | if len(validator) == 40: 537 | validator = validator.decode('hex') 538 | result.append(validator) 539 | return result 540 | -------------------------------------------------------------------------------- /hydrachain/nc_utils.py: -------------------------------------------------------------------------------- 1 | from ethereum.utils import zpad 2 | import gevent.event 3 | import ethereum.slogging as slogging 4 | from ethereum import transactions 5 | from pyethapp.rpc_client import ABIContract 6 | from hydrachain import native_contracts as nc 7 | log = slogging.get_logger('nc.utils') 8 | 9 | STATUS = 'uint16' 10 | FORBIDDEN = 403 11 | NOTFOUND = 404 12 | OK = 200 13 | ERROR = 500 14 | BADREQEST = 400 15 | INSUFFICIENTFUNDS = PAYMENTREQUIRED = 402 16 | 17 | 18 | def isaddress(x): 19 | return len(x) == 20 and x != '\0' * 20 20 | 21 | 22 | def lhexenc(lst): 23 | return [l.encode('hex') for l in lst] 24 | 25 | 26 | def transact(app, sender, to_, value=0, data=''): 27 | head_candidate = app.services.chain.chain.head_candidate 28 | default_gasprice = 1 29 | default_startgas = head_candidate.gas_limit - head_candidate.gas_used 30 | nonce = head_candidate.get_nonce(sender) 31 | tx = transactions.Transaction(nonce=nonce, gasprice=default_gasprice, 32 | startgas=default_startgas, to=to_, value=value, data=data) 33 | assert sender in app.services.accounts, 'no account for sender' 34 | app.services.accounts.sign_tx(sender, tx) 35 | success = app.services.chain.add_transaction(tx) 36 | assert success 37 | return tx 38 | 39 | 40 | def wait_next_block_factory(app, timeout=None): 41 | """Creates a `wait_next_block` function, that 42 | will wait `timeout` seconds (`None` = indefinitely) 43 | for a new block to appear. 44 | 45 | :param app: the app-instance the function should work for 46 | :param timeout: timeout in seconds 47 | """ 48 | 49 | chain = app.services.chain 50 | 51 | # setup new block callbacks and events 52 | new_block_evt = gevent.event.Event() 53 | 54 | def _on_new_block(app): 55 | log.DEV('new block mined') 56 | new_block_evt.set() 57 | chain.on_new_head_cbs.append(_on_new_block) 58 | 59 | def wait_next_block(): 60 | bn = chain.chain.head.number 61 | chain.consensus_manager.log('waiting for new block', block=bn) 62 | new_block_evt.wait(timeout) 63 | new_block_evt.clear() 64 | if chain.chain.head.number > bn: 65 | chain.consensus_manager.log('new block event', block=chain.chain.head.number) 66 | elif chain.chain.head.number == bn: 67 | chain.consensus_manager.log('wait_next_block timed out', block=bn) 68 | 69 | return wait_next_block 70 | 71 | 72 | def create_contract_instance(app, sender, contract_template): 73 | log.DEV("creating instance", klass=contract_template) 74 | to_ = nc.CreateNativeContractInstance.address 75 | call_data = contract_template.address[-4:] 76 | tx = transact(app, sender, to_, data=call_data) 77 | instance_address = nc.registry.mk_instance_address(contract_template, sender, tx.nonce) 78 | return instance_address 79 | 80 | 81 | def decode_log(log_, events): 82 | cls = None 83 | for e_class in events: 84 | if e_class.event_id() == log_.topics[0]: 85 | cls = e_class 86 | if not cls: 87 | log.DEV('unknown eventclass for log_') 88 | return None 89 | res = [] 90 | cls.listen(log_, address=None, callback=res.append) 91 | e = res[0] 92 | e['contract'] = log_.address 93 | cn = nc.registry.address_to_native_contract_class(log_.address).im_self.__name__ 94 | e['contract_class'] = cn 95 | return e 96 | 97 | 98 | def get_logs(app, events): 99 | log.DEV('getting logs!') 100 | chain = app.services.chain.chain 101 | logs = [] 102 | for i in range(0, chain.head.number + 1): 103 | block = chain.get(chain.index.get_block_by_number(i)) 104 | receipts = block.get_receipts() 105 | # log.DEV('with', bnum=i, num_receipts=len(receipts)) 106 | for r_idx, receipt in enumerate(receipts): # one receipt per tx 107 | # log.DEV('with', r_num=r_idx, num_logs=len(receipt.logs)) 108 | for l_idx, log_ in enumerate(receipt.logs): 109 | dlog = decode_log(log_, events) 110 | # log.DEV('log', rlog=log_, log=dlog) 111 | logs.append(dlog) 112 | return logs 113 | 114 | 115 | def hexify_dict(dict_): 116 | for k, v in dict_.items(): 117 | if isinstance(v, bytes) and len(v) in (20, 32): 118 | dict_[k] = v.encode('hex') 119 | return dict_ 120 | 121 | 122 | def contract_args_from_kargs(contract_class, method, kargs): 123 | for obj in contract_class.abi(): 124 | if obj['type'] == 'function' and obj['name'] == method: 125 | arg_names = [i['name'] for i in obj['inputs']] 126 | diff = set(kargs.keys()).symmetric_difference(set(arg_names)) 127 | assert not diff, 'doesnt match signature:{} error:{}'.format(arg_names, diff) 128 | return [kargs[name] for name in arg_names] 129 | raise Exception('method not found') 130 | 131 | 132 | class User(): 133 | 134 | def __init__(self, app, address): 135 | self.app = app 136 | self.address = address 137 | 138 | def add_proxy(self, name, address): 139 | template_address = zpad(address[-4:], 20) 140 | klass = nc.registry[template_address].im_self 141 | assert issubclass(klass, nc.NativeABIContract) 142 | 143 | def _transact_func(sender, to, value, data): 144 | return transact(self.app, sender, to, value, data) 145 | 146 | def _call_func(sender, to, value, data): 147 | block = self.app.services.chain.chain.head_candidate 148 | return nc.test_call(block, sender, to, data=data, gasprice=0, value=value) 149 | 150 | proxy = ABIContract(self.address, klass.abi(), address, _call_func, _transact_func) 151 | setattr(self, name, proxy) 152 | -------------------------------------------------------------------------------- /hydrachain/tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from ethereum import slogging 4 | 5 | 6 | CATCH_LOG_HANDLER_NAME = 'catch_log_handler' 7 | 8 | 9 | # Connect catchlog's handler to slogging's root logger 10 | @pytest.hookimpl(hookwrapper=True, trylast=True) 11 | def pytest_runtest_call(item): 12 | catchlog_handler = getattr(item, CATCH_LOG_HANDLER_NAME, None) 13 | if catchlog_handler and catchlog_handler not in slogging.rootLogger.handlers: 14 | slogging.rootLogger.addHandler(catchlog_handler) 15 | 16 | _ = yield 17 | 18 | if catchlog_handler and catchlog_handler in slogging.rootLogger.handlers: 19 | slogging.rootLogger.removeHandler(catchlog_handler) 20 | -------------------------------------------------------------------------------- /hydrachain/tests/test_app.py: -------------------------------------------------------------------------------- 1 | import hydrachain.app 2 | import tempfile 3 | import pyethapp.config as konfig 4 | from pyethapp.db_service import DBService 5 | from pyethapp.accounts import AccountsService 6 | from hydrachain.hdc_service import ChainService 7 | from ethereum.keys import privtoaddr, PBKDF2_CONSTANTS 8 | from devp2p.peermanager import PeerManager 9 | 10 | services = [DBService, 11 | AccountsService, 12 | ChainService, 13 | PeerManager 14 | ] 15 | 16 | PBKDF2_CONSTANTS['c'] = 100 17 | 18 | 19 | def default_config(): 20 | config = konfig.get_default_config(services + [hydrachain.app.HPCApp]) 21 | return config 22 | 23 | 24 | def test_test_privkeys(): 25 | try: 26 | _services_orig = hydrachain.app.services 27 | hydrachain.app.services = services 28 | config = default_config() 29 | config['data_dir'] = tempfile.mktemp() 30 | konfig.setup_data_dir(config['data_dir']) 31 | config['node']['privkey_hex'] = '1' * 64 32 | 33 | privkeys = [str(i) * 32 for i in range(5)] 34 | config['test_privkeys'] = privkeys 35 | config['hdc']['validators'] = [privtoaddr(privkeys[0])] 36 | 37 | app = hydrachain.app.start_app(config, accounts=[]) 38 | g = app.services.chain.chain.genesis 39 | for p in privkeys: 40 | a = privtoaddr(p) 41 | assert len(a) == 20 42 | assert g.get_balance(a) > 0 43 | assert a in app.services.accounts 44 | account = app.services.accounts[a] 45 | assert account.address == a 46 | app.stop() 47 | finally: 48 | hydrachain.app.services = _services_orig 49 | -------------------------------------------------------------------------------- /hydrachain/tests/test_base.py: -------------------------------------------------------------------------------- 1 | # from hydrachain import protocol 2 | from hydrachain.consensus.base import Vote, VoteBlock, VoteNil, LockSet, ishash, Ready 3 | from hydrachain.consensus.base import DoubleVotingError, InvalidVoteError, MissingSignatureError 4 | from hydrachain.consensus.base import BlockProposal, genesis_signing_lockset, InvalidProposalError 5 | from hydrachain.consensus.base import Proposal, VotingInstruction, InvalidSignature, Signed 6 | 7 | 8 | from ethereum import utils, tester 9 | import rlp 10 | import pytest 11 | 12 | privkey = 'x' * 32 13 | 14 | 15 | def test_signed(): 16 | s = Signed(v=0, r=0, s=0) 17 | assert s.sender is None 18 | with pytest.raises(MissingSignatureError): 19 | s.hash 20 | s.sign(privkey) 21 | sender = s.sender 22 | h = s.hash 23 | s.v = 0 # change signature, in order to test signature independend hash 24 | assert s.sender == sender 25 | assert s.hash == h 26 | 27 | 28 | def test_vote(): 29 | h, r = 2, 3 30 | bh = '0' * 32 31 | sender = utils.privtoaddr(privkey) 32 | 33 | v = Vote(h, r) 34 | v2 = Vote(h, r, blockhash=bh) 35 | 36 | assert isinstance(v, Vote) 37 | assert isinstance(v2, Vote) 38 | 39 | assert isinstance(v, VoteNil) 40 | assert isinstance(v, rlp.Serializable) 41 | 42 | assert isinstance(v2, VoteBlock) 43 | 44 | v.sign(privkey) 45 | s = v.sender 46 | assert s == sender 47 | 48 | v2.sign(privkey) 49 | assert v2.sender == sender 50 | 51 | # encode 52 | assert len(v.get_sedes()) == len(v.fields) == 6 53 | 54 | vs = rlp.encode(v) 55 | assert isinstance(vs, bytes) 56 | print rlp.decode(vs) 57 | vd = rlp.decode(vs, Vote) 58 | assert isinstance(vd, VoteNil) 59 | assert vd.blockhash == '' 60 | assert vd == v 61 | 62 | v2s = rlp.encode(v2) 63 | v2d = rlp.decode(v2s, Vote) 64 | assert isinstance(v2d, VoteBlock) 65 | assert v2d.blockhash == bh 66 | assert v2d == v2 67 | 68 | assert v != v2 69 | assert vd != v2d 70 | 71 | assert len(set((v, vd))) == 1 72 | assert len(set((v2, v2d))) == 1 73 | assert len(set((v, vd, v2, v2d))) == 2 74 | 75 | 76 | privkeys = [chr(i) * 32 for i in range(1, 11)] 77 | validators = [utils.privtoaddr(p) for p in privkeys] 78 | 79 | 80 | def test_ready(): 81 | ls = LockSet(num_eligible_votes=len(privkeys)) 82 | s = Ready(0, current_lockset=ls) 83 | assert s.current_lockset == ls 84 | s.sign(privkey) 85 | s0 = Ready(0, current_lockset=ls) 86 | s0.sign(privkey) 87 | s1 = Ready(1, current_lockset=ls) 88 | s1.sign(privkey) 89 | 90 | assert s == s0 91 | assert s != s1 92 | 93 | 94 | def test_LockSet(): 95 | ls = LockSet(num_eligible_votes=len(privkeys)) 96 | assert not ls 97 | assert len(ls) == 0 98 | 99 | bh = '0' * 32 100 | r, h = 2, 3 101 | v1 = VoteBlock(h, r, bh) 102 | 103 | # add not signed 104 | with pytest.raises(InvalidVoteError): 105 | ls.add(v1) 106 | assert not ls 107 | assert v1 not in ls 108 | 109 | # add signed 110 | v1.sign(privkeys[0]) 111 | ls.add(v1) 112 | 113 | assert ls 114 | assert len(ls) == 1 115 | lsh = ls.hash 116 | ls.add(v1) 117 | assert lsh == ls.hash 118 | assert len(ls) == 1 119 | 120 | # second vote same sender 121 | v2 = VoteBlock(h, r, bh) 122 | v2.sign(privkeys[0]) 123 | ls.add(v1) 124 | ls.add(v2) 125 | assert lsh == ls.hash 126 | assert len(ls) == 1 127 | 128 | # third vote 129 | v3 = VoteBlock(h, r, bh) 130 | v3.sign(privkeys[1]) 131 | ls.add(v1) 132 | ls.add(v3) 133 | assert lsh != ls.hash 134 | assert len(ls) == 2 135 | assert v3 in ls 136 | 137 | lsh = ls.hash 138 | 139 | # vote wrong round 140 | v4 = VoteBlock(h, r + 1, bh) 141 | v4.sign(privkeys[2]) 142 | with pytest.raises(InvalidVoteError): 143 | ls.add(v4) 144 | assert lsh == ls.hash 145 | assert len(ls) == 2 146 | assert v4 not in ls 147 | 148 | # vote twice 149 | v3_2 = VoteBlock(h, r, blockhash='1' * 32) 150 | v3_2.sign(privkeys[1]) 151 | with pytest.raises(DoubleVotingError): 152 | ls.add(v3_2) 153 | assert lsh == ls.hash 154 | assert len(ls) == 2 155 | assert v3_2 not in ls 156 | 157 | 158 | def test_one_vote_lockset(): 159 | ls = LockSet(num_eligible_votes=1) 160 | bh = '0' * 32 161 | r, h = 2, 3 162 | v1 = VoteBlock(h, r, bh) 163 | v1.sign(privkeys[0]) 164 | ls.add(v1) 165 | assert ls.has_quorum 166 | 167 | 168 | def test_LockSet_isvalid(): 169 | ls = LockSet(num_eligible_votes=len(privkeys)) 170 | bh = '0' * 32 171 | r, h = 2, 3 172 | 173 | votes = [VoteBlock(h, r, bh) for i in range(len(privkeys))] 174 | for i, v in enumerate(votes): 175 | v.sign(privkeys[i]) 176 | ls.add(v) 177 | assert len(ls) == i + 1 178 | if len(ls) < ls.num_eligible_votes * 2 / 3.: 179 | assert not ls.is_valid 180 | else: 181 | assert ls.is_valid 182 | assert ls.has_quorum # same blockhash 183 | ls.check() 184 | 185 | 186 | def test_LockSet_3_quorums(): 187 | ls = LockSet(3) 188 | v = VoteBlock(0, 0, '0' * 32) 189 | v.sign(privkeys[0]) 190 | ls.add(v) 191 | v = VoteNil(0, 0) 192 | v.sign(privkeys[1]) 193 | ls.add(v) 194 | assert len(ls) == 2 195 | assert not ls.is_valid 196 | v = VoteNil(0, 0) 197 | v.sign(privkeys[2]) 198 | ls.add(v) 199 | assert ls.is_valid 200 | assert ls.has_noquorum 201 | assert not ls.has_quorum 202 | assert not ls.has_quorum_possible 203 | assert ls.check() 204 | 205 | 206 | def test_LockSet_quorums(): 207 | combinations = dict(has_quorum=[ 208 | [1] * 7, 209 | [1] * 7 + [2] * 3, 210 | [1] * 7 + [None] * 3, 211 | ], 212 | has_noquorum=[ 213 | [1] * 3 + [2] * 3 + [None], 214 | [None] * 7, 215 | [None] * 10, 216 | range(10), 217 | range(7) 218 | ], 219 | has_quorum_possible=[ 220 | [1] * 4 + [None] * 3, 221 | [1] * 4 + [2] * 4, 222 | [1] * 4 + [2] * 3 + [3] * 3, 223 | [1] * 6 + [2] 224 | ]) 225 | 226 | r, h = 1, 2 227 | for method, permutations in combinations.items(): 228 | for set_ in permutations: 229 | assert len(set_) >= 7 230 | ls = LockSet(len(privkeys)) 231 | for i, p in enumerate(set_): 232 | if p is not None: 233 | bh = chr(p) * 32 234 | v = VoteBlock(h, r, bh) 235 | else: 236 | v = VoteNil(h, r) 237 | v.sign(privkeys[i]) 238 | ls.add(v) 239 | assert len(ls) >= 7 240 | assert getattr(ls, method) 241 | ls.check() 242 | 243 | # check stable sort 244 | bhs = ls.blockhashes() 245 | if len(bhs) > 1: 246 | assert ishash(bhs[0][0]) 247 | assert isinstance(bhs[0][1], int) 248 | if bhs[0][1] == bhs[1][1]: 249 | assert bhs[0][0] > bhs[1][0] 250 | else: 251 | assert bhs[0][1] > bhs[1][1] 252 | 253 | # test serialization 254 | 255 | s = rlp.encode(ls) 256 | d = rlp.decode(s, LockSet) 257 | 258 | assert ls == d 259 | assert id(ls) != id(d) 260 | assert getattr(ls, method) == getattr(d, method) 261 | 262 | 263 | def test_blockproposal(): 264 | s = tester.state() 265 | 266 | # block 1 267 | s.mine(n=1) 268 | genesis = s.blocks[0] 269 | assert genesis.header.number == 0 270 | blk1 = s.blocks[1] 271 | assert blk1.header.number == 1 272 | gls = genesis_signing_lockset(genesis, privkeys[0]) 273 | bp = BlockProposal(height=1, round=0, block=blk1, signing_lockset=gls, round_lockset=None) 274 | assert bp.lockset == gls 275 | assert isinstance(bp, Proposal) 276 | bp.sign(tester.k0) 277 | 278 | with pytest.raises(InvalidProposalError): # round >0 needs round_lockset 279 | bp = BlockProposal(height=1, round=1, block=blk1, signing_lockset=gls, round_lockset=None) 280 | bp.validate_votes(validators, validators[:1]) 281 | 282 | # block 2 283 | s.mine(n=1) 284 | blk2 = s.blocks[2] 285 | assert blk2.header.number == 2 286 | 287 | ls = LockSet(len(validators)) 288 | for privkey in privkeys: 289 | v = VoteBlock(height=1, round=0, blockhash=blk1.hash) 290 | v.sign(privkey) 291 | ls.add(v) 292 | 293 | bp = BlockProposal(height=2, round=0, block=blk2, signing_lockset=ls, round_lockset=None) 294 | assert bp.lockset == ls 295 | with pytest.raises(InvalidProposalError): # signature missing 296 | bp.validate_votes(validators, validators) 297 | 298 | with pytest.raises(InvalidProposalError): 299 | bp.sign(privkeys[0]) # privkey doesnt match coinbase 300 | bp.validate_votes(validators, validators) 301 | 302 | with pytest.raises(InvalidSignature): # already signed 303 | bp.sign(tester.k0) 304 | 305 | bp.v = 0 # reset sigcheck hack 306 | bp.sign(tester.k0) 307 | 308 | bp.validate_votes(validators, validators) 309 | 310 | with pytest.raises(InvalidProposalError): # round >0 needs round_lockset 311 | bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=gls, round_lockset=None) 312 | 313 | # block 2 round 1, timeout in round=0 314 | rls = LockSet(len(validators)) 315 | for privkey in privkeys: 316 | v = VoteNil(height=2, round=0) 317 | v.sign(privkey) 318 | rls.add(v) 319 | bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=ls, round_lockset=rls) 320 | assert bp.lockset == rls 321 | bp.sign(tester.k0) 322 | bp.validate_votes(validators, validators) 323 | 324 | # serialize 325 | s = rlp.encode(bp) 326 | dbp = rlp.decode(s, BlockProposal) 327 | assert dbp.block == blk2 328 | 329 | dbp.validate_votes(validators, validators) 330 | 331 | # check quorumpossible lockset failure 332 | rls = LockSet(len(validators)) 333 | for i, privkey in enumerate(privkeys): 334 | if i < 4: 335 | v = VoteBlock(height=2, round=0, blockhash='0' * 32) 336 | else: 337 | v = VoteNil(height=2, round=0) 338 | v.sign(privkey) 339 | rls.add(v) 340 | assert not rls.has_noquorum 341 | assert rls.has_quorum_possible 342 | with pytest.raises(InvalidProposalError): # NoQuorum necessary R0 343 | bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=ls, round_lockset=rls) 344 | 345 | 346 | def test_VotingInstruction(): 347 | rls = LockSet(len(validators)) 348 | bh = '1' * 32 349 | for i, privkey in enumerate(privkeys): 350 | if i < 4: # quorum possible 351 | v = VoteBlock(height=2, round=0, blockhash=bh) 352 | 353 | else: 354 | v = VoteNil(height=2, round=0) 355 | v.sign(privkey) 356 | rls.add(v) 357 | assert rls.has_quorum_possible 358 | bp = VotingInstruction(height=2, round=1, round_lockset=rls) 359 | bp.sign(privkeys[0]) 360 | assert bh == bp.blockhash 361 | 362 | # noquorum 363 | rls = LockSet(len(validators)) 364 | for i, privkey in enumerate(privkeys): 365 | if i < 3: # noquorum possible 366 | v = VoteBlock(height=2, round=0, blockhash=bh) 367 | else: 368 | v = VoteNil(height=2, round=0) 369 | v.sign(privkey) 370 | rls.add(v) 371 | assert not rls.has_quorum_possible 372 | assert rls.has_noquorum 373 | with pytest.raises(InvalidProposalError): # QuorumPossiblle necessary R0 374 | bp = VotingInstruction(height=2, round=1, round_lockset=rls) 375 | 376 | # noquorum 377 | rls = LockSet(len(validators)) 378 | for i, privkey in enumerate(privkeys): 379 | if i < 3: # noquorum possible 380 | v = VoteBlock(height=2, round=0, blockhash=bh) 381 | else: 382 | v = VoteNil(height=2, round=0) 383 | 384 | v.sign(privkey) 385 | rls.add(v) 386 | assert not rls.has_quorum_possible 387 | assert rls.has_noquorum 388 | with pytest.raises(InvalidProposalError): # QuorumPossiblle necessary R0 389 | bp = VotingInstruction(height=2, round=1, round_lockset=rls) 390 | -------------------------------------------------------------------------------- /hydrachain/tests/test_docker_integration.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import os 3 | from os.path import abspath, dirname, join 4 | import socket 5 | from urlparse import urlsplit 6 | import time 7 | from logging import getLogger 8 | import operator 9 | 10 | from pyethapp.rpc_client import JSONRPCClient as OrigJSONRPCClient 11 | import pytest 12 | from requests.exceptions import RequestException 13 | from tinyrpc.transports.http import HttpPostClientTransport 14 | 15 | pytest.importorskip('compose', minversion="1.7.0") 16 | 17 | try: 18 | from compose.cli.command import get_project, get_client 19 | except ImportError: 20 | pass 21 | 22 | log = getLogger(__name__) 23 | 24 | JSONRPC_PORT = 4000 25 | PROJECT_NAME = "hydrachaintest" 26 | SERVICE_SCALE = OrderedDict(( 27 | ('statsmon', 1), 28 | ('bootstrap', 1), 29 | ('node', 3), 30 | )) 31 | 32 | 33 | try: 34 | get_client(os.environ).info() 35 | except RequestException: 36 | pytest.skip("Unable to connect to docker daemon. Skipping docker tests.") 37 | 38 | 39 | # Taken from http://stackoverflow.com/questions/12411431 40 | def pytest_runtest_makereport(item, call): 41 | if "incremental" in item.keywords: 42 | if call.excinfo is not None: 43 | parent = item.parent 44 | parent._previousfailed = item 45 | 46 | 47 | # Taken from http://stackoverflow.com/questions/12411431 48 | def pytest_runtest_setup(item): 49 | previousfailed = getattr(item.parent, "_previousfailed", None) 50 | if previousfailed is not None: 51 | pytest.xfail("previous test failed (%s)" % previousfailed.name) 52 | 53 | 54 | class DockerHarness(object): 55 | 56 | def __init__(self): 57 | self.base_dir = abspath(join(dirname(__file__), "..", "..", "docker", "dev", "hydrachain")) 58 | 59 | self.docker_host = None 60 | if os.environ.get('DOCKER_HOST'): 61 | self.docker_host = urlsplit(os.environ['DOCKER_HOST']).netloc.partition(":")[0] 62 | 63 | # This needs to happen before the `get_project()` call below since it caches os.environ. 64 | os.environ['HYDRACHAIN_HOST_PREFIX'] = PROJECT_NAME 65 | 66 | self.project = get_project(self.base_dir, project_name=PROJECT_NAME) 67 | self._modify_service_config() 68 | self.project.build() 69 | 70 | def _modify_service_config(self): 71 | """ 72 | Modify the services configurations to allow testing. 73 | """ 74 | # Add exposed ports to `node`s 75 | self.project.get_service('node').options['ports'] = ["4000"] 76 | 77 | # Remove localhost binding from `bootstrap` to ensure we can connect 78 | # (even under boot2docker, etc.) 79 | # FIXME: Find better general solution for this 80 | bootstrap = self.project.get_service('bootstrap') 81 | bootstrap.options['ports'] = [p.replace("127.0.0.1:", "") 82 | for p in bootstrap.options['ports']] 83 | 84 | # Ensure `container_name` prefix matches test project name 85 | for service in self.project.services: 86 | container_name = service.options.get('container_name') 87 | if container_name: 88 | service.options['container_name'] = container_name.replace("hydrachain", 89 | PROJECT_NAME) 90 | 91 | def start(self): 92 | for service_name, scale in SERVICE_SCALE.items(): 93 | self.project.get_service(service_name).scale(scale) 94 | 95 | @property 96 | def containers_running(self): 97 | if len(self.project.containers()) < sum(SERVICE_SCALE.values()): 98 | for container in self.project.containers(stopped=True): 99 | if not container.is_running: 100 | print(container.logs()) 101 | return False 102 | return True 103 | 104 | def stop(self, remove=True): 105 | self.project.stop() 106 | if remove: 107 | self.project.remove_stopped() 108 | 109 | @property 110 | def rpc_ports(self): 111 | return [ 112 | self._transform_netloc(container.get_local_port(JSONRPC_PORT)) 113 | for container in self.hydrachain_containers 114 | ] 115 | 116 | @property 117 | def hydrachain_containers(self): 118 | for c in self._containers(["node", "bootstrap"]): 119 | yield c 120 | 121 | @property 122 | def stats_container(self): 123 | try: 124 | return next(self._containers(["statsmon"])) 125 | except StopIteration: 126 | return None 127 | 128 | def _containers(self, service_names): 129 | for service in self.project.get_services(service_names): 130 | for container in service.containers(): 131 | yield container 132 | 133 | def _transform_netloc(self, netloc): 134 | if not self.docker_host: 135 | return netloc 136 | _, sep, port = netloc.partition(":") 137 | return sep.join([self.docker_host, port]) 138 | 139 | 140 | class JSONRPCClient(OrigJSONRPCClient): 141 | 142 | def __init__(self, host="127.0.0.1", port=4000, print_communication=True, privkey=None, 143 | sender=None): 144 | super(JSONRPCClient, self).__init__(port, print_communication, privkey, sender) 145 | self.transport = HttpPostClientTransport("http://{}:{}".format(host, port)) 146 | 147 | 148 | @pytest.yield_fixture(scope='module') 149 | def docker_harness(): 150 | try: 151 | harness = DockerHarness() 152 | except RequestException: 153 | pytest.skip("Can't connect to docker daemon") 154 | return 155 | harness.start() 156 | yield harness 157 | for container in harness.hydrachain_containers: 158 | print(container.logs()) 159 | harness.stop() 160 | 161 | 162 | def _can_connect(target): 163 | sock = socket.socket() 164 | sock.settimeout(.1) 165 | try: 166 | host, port = target.split(":") 167 | port = int(port) 168 | sock.connect((host, port)) 169 | return True 170 | except socket.error: 171 | return False 172 | finally: 173 | sock.close() 174 | 175 | 176 | def _get_block_no(target): 177 | client = JSONRPCClient(*target.split(":")) 178 | try: 179 | return client.blocknumber() 180 | except RequestException: 181 | return None 182 | 183 | 184 | def wait_callback_or_timeout(callback, timeout, interval=.5): 185 | start = time.time() 186 | while time.time() - start < timeout: 187 | if callback(): 188 | return True 189 | time.sleep(interval) 190 | return False 191 | 192 | 193 | # The `incremental` mark causes one failure to abort all following tests in this class 194 | @pytest.mark.incremental 195 | class TestDockerSetup(object): 196 | 197 | # Since we're (usually) runnnig tests serially the wait times are cumulative 198 | 199 | @pytest.mark.parametrize('wait', (0, 1, 4, 5, 5)) 200 | def test_containers_up(self, docker_harness, wait): 201 | """ 202 | Ensure the containers keep running after increasing amounts of time have elapsed. 203 | """ 204 | time.sleep(wait) 205 | assert docker_harness.containers_running 206 | 207 | def test_all_nodes_rpc_connectable(self, docker_harness): 208 | targets = docker_harness.rpc_ports 209 | assert wait_callback_or_timeout( 210 | lambda: all(_can_connect(target) for target in targets), 211 | 60 212 | ) 213 | 214 | @pytest.mark.parametrize(('block_no', 'op', 'timeout'), 215 | ((1, operator.ge, 120), 216 | (10, operator.eq, 25), 217 | )) 218 | def test_all_nodes_reach_block_no(self, docker_harness, block_no, op, timeout): 219 | targets = docker_harness.rpc_ports 220 | assert wait_callback_or_timeout( 221 | lambda: all(op(_get_block_no(target), block_no) for target in targets), 222 | timeout, 223 | interval=2 224 | ) 225 | 226 | # TODO: Fix bug and remove 227 | @pytest.mark.xfail(reason="Probable hydrachain bug") 228 | def test_transaction_increases_block_no(self, docker_harness): 229 | targets = docker_harness.rpc_ports 230 | client = JSONRPCClient(*(targets[0].split(":"))) 231 | client.send_transaction( 232 | client.coinbase, 233 | "1" * 40, 234 | 1 235 | ) 236 | assert wait_callback_or_timeout( 237 | lambda: all(_get_block_no(target) == 101 for target in targets), 238 | 10, 239 | interval=2 240 | ) 241 | -------------------------------------------------------------------------------- /hydrachain/tests/test_hdc_protocol.py: -------------------------------------------------------------------------------- 1 | import rlp 2 | from ethereum import tester 3 | from ethereum import utils 4 | from devp2p.service import WiredService 5 | from devp2p.protocol import BaseProtocol 6 | from devp2p.app import BaseApp 7 | from hydrachain.consensus.protocol import HDCProtocol 8 | from hydrachain.consensus.base import genesis_signing_lockset, VoteNil, VoteBlock, LockSet 9 | from hydrachain.consensus.base import VotingInstruction, BlockProposal, TransientBlock 10 | 11 | 12 | class PeerMock(object): 13 | packets = [] 14 | config = dict() 15 | 16 | def send_packet(self, packet): 17 | self.packets.append(packet) 18 | 19 | 20 | def setup(): 21 | peer = PeerMock() 22 | proto = HDCProtocol(peer, WiredService(BaseApp())) 23 | proto.service.app.config['eth'] = dict(network_id=1337) 24 | chain = tester.state() 25 | cb_data = [] 26 | 27 | def cb(proto, **data): 28 | cb_data.append((proto, data)) 29 | return peer, proto, chain, cb_data, cb 30 | 31 | 32 | def test_basics(): 33 | peer, proto, chain, cb_data, cb = setup() 34 | 35 | assert isinstance(proto, BaseProtocol) 36 | 37 | d = dict() 38 | d[proto] = 1 39 | assert proto in d 40 | assert d[proto] == 1 41 | assert not proto 42 | proto.start() 43 | assert proto 44 | 45 | 46 | def test_status(): 47 | peer, proto, chain, cb_data, cb = setup() 48 | genesis = chain.blocks[-1] 49 | ls = LockSet(1) 50 | 51 | # test status 52 | proto.send_status( 53 | genesis_hash=genesis.hash, 54 | current_lockset=ls 55 | ) 56 | packet = peer.packets.pop() 57 | proto.receive_status_callbacks.append(cb) 58 | proto._receive_status(packet) 59 | 60 | _p, _d = cb_data.pop() 61 | assert _p == proto 62 | assert isinstance(_d, dict) 63 | assert _d['genesis_hash'] == genesis.hash 64 | assert _d['current_lockset'] == ls 65 | assert 'eth_version' in _d 66 | assert 'network_id' in _d 67 | 68 | 69 | privkeys = [chr(i) * 32 for i in range(1, 11)] 70 | validators = [utils.privtoaddr(p) for p in privkeys] 71 | 72 | 73 | def create_proposal(blk): 74 | signing_lockset = LockSet(len(validators)) 75 | for privkey in privkeys: 76 | v = VoteBlock(blk.number - 1, 0, blk.hash) 77 | v.sign(privkey) 78 | signing_lockset.add(v) 79 | bp = BlockProposal(height=blk.number, round=0, block=blk, 80 | signing_lockset=signing_lockset, round_lockset=None) 81 | bp.sign(tester.k0) 82 | return bp 83 | 84 | 85 | def test_blocks(): 86 | peer, proto, chain, cb_data, cb = setup() 87 | genesis_signing_lockset(chain.blocks[0], privkeys[0]) 88 | 89 | # test blocks 90 | chain.mine(n=2) 91 | assert len(chain.blocks) == 3 92 | proposals = [create_proposal(b) for b in chain.blocks[1:]] 93 | payload = [rlp.encode(p) for p in proposals] 94 | proto.send_blockproposals(*payload) 95 | packet = peer.packets.pop() 96 | assert len(rlp.decode(packet.payload)) == 2 97 | 98 | def list_cb(proto, blocks): 99 | cb_data.append((proto, blocks)) 100 | 101 | proto.receive_blockproposals_callbacks.append(list_cb) 102 | proto._receive_blockproposals(packet) 103 | 104 | _p, proposals = cb_data.pop() 105 | assert isinstance(proposals, tuple) 106 | for proposal in proposals: 107 | assert isinstance(proposal, BlockProposal) 108 | assert proposal.height == proposal.block.header.number 109 | assert isinstance(proposal.block, TransientBlock) 110 | assert isinstance(proposal.block.transaction_list, tuple) 111 | assert isinstance(proposal.block.uncles, tuple) 112 | # assert that transactions and uncles have not been decoded 113 | assert len(proposal.block.transaction_list) == 0 114 | assert len(proposal.block.uncles) == 0 115 | 116 | 117 | def test_blockproposal(): 118 | pass 119 | 120 | 121 | def test_votinginstruction(): 122 | peer, proto, chain, cb_data, cb = setup() 123 | height = 1 124 | bh = '1' * 32 125 | round_lockset = LockSet(len(validators)) 126 | for i, privkey in enumerate(privkeys): 127 | if i < len(validators) // 3 + 1: 128 | v = VoteBlock(height, 0, bh) 129 | else: 130 | v = VoteNil(height, 0) 131 | v.sign(privkey) 132 | round_lockset.add(v) 133 | bp = VotingInstruction(height=height, round=1, round_lockset=round_lockset) 134 | bp.sign(tester.k0) 135 | 136 | payload = bp 137 | 138 | proto.send_votinginstruction(payload) 139 | packet = peer.packets.pop() 140 | assert len(rlp.decode(packet.payload)) == 1 141 | 142 | def list_cb(proto, votinginstruction): 143 | cb_data.append((proto, votinginstruction)) 144 | 145 | proto.receive_votinginstruction_callbacks.append(list_cb) 146 | proto._receive_votinginstruction(packet) 147 | 148 | _p, vi = cb_data.pop() 149 | assert vi == bp 150 | 151 | 152 | def test_getblockproposals(): 153 | peer, proto, chain, cb_data, cb = setup() 154 | payload = range(10) 155 | proto.send_getblockproposals(*payload) 156 | packet = peer.packets.pop() 157 | assert len(rlp.decode(packet.payload)) == len(payload) 158 | 159 | def list_cb(proto, blocks): 160 | cb_data.append((proto, blocks)) 161 | 162 | proto.receive_getblockproposals_callbacks.append(list_cb) 163 | proto._receive_getblockproposals(packet) 164 | _p, data = cb_data.pop() 165 | assert data == tuple(payload) 166 | 167 | 168 | def test_vote(): 169 | peer, proto, chain, cb_data, cb = setup() 170 | 171 | def list_cb(proto, vote): 172 | cb_data.append((proto, vote)) 173 | proto.receive_vote_callbacks.append(list_cb) 174 | 175 | # VoteBlock 176 | payload = v = VoteBlock(1, 0, '0' * 32) 177 | v.sign(privkeys[0]) 178 | proto.send_vote(payload) 179 | packet = peer.packets.pop() 180 | proto._receive_vote(packet) 181 | _p, data = cb_data.pop() 182 | assert data == payload 183 | assert isinstance(data, VoteBlock) 184 | 185 | payload = v = VoteNil(1, 0) 186 | v.sign(privkeys[0]) 187 | proto.send_vote(payload) 188 | packet = peer.packets.pop() 189 | proto._receive_vote(packet) 190 | _p, data = cb_data.pop() 191 | assert data == payload 192 | assert isinstance(data, VoteNil) 193 | -------------------------------------------------------------------------------- /hydrachain/tests/test_hdc_service.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | 3 | import ethereum.keys 4 | import pytest 5 | import rlp 6 | from ethereum import utils 7 | from ethereum.db import EphemDB 8 | from pyethapp.accounts import Account, AccountsService 9 | 10 | from hydrachain import hdc_service 11 | from hydrachain.consensus import protocol as hdc_protocol 12 | from hydrachain.consensus.base import (Block, BlockProposal, TransientBlock, InvalidProposalError, 13 | LockSet, Ready) 14 | 15 | 16 | # reduce key derivation iterations 17 | ethereum.keys.PBKDF2_CONSTANTS['c'] = 100 18 | 19 | privkeys = [chr(i) * 32 for i in range(1, 11)] 20 | validators = [utils.privtoaddr(p) for p in privkeys] 21 | 22 | 23 | empty = object() 24 | 25 | 26 | class AppMock(object): 27 | 28 | tmpdir = tempfile.mkdtemp() 29 | 30 | config = hdc_service.ChainService.default_config 31 | 32 | config['db'] = dict(path='_db') 33 | config['data_dir'] = tmpdir 34 | config['hdc'] = dict(validators=validators) 35 | 36 | class Services(dict): 37 | 38 | class peermanager: 39 | 40 | @classmethod 41 | def broadcast(*args, **kwargs): 42 | pass 43 | 44 | def __init__(self, privkey): 45 | self.services = self.Services() 46 | self.services.db = EphemDB() 47 | self.services.accounts = AccountsService(self) 48 | account = Account.new(password='', key=privkey) 49 | self.services.accounts.add_account(account, store=False) 50 | 51 | 52 | class PeerMock(object): 53 | 54 | def __init__(self, app): 55 | self.config = app.config 56 | self.send_packet = lambda x: x 57 | self.remote_client_version = empty 58 | 59 | 60 | @pytest.mark.xfail(reason="Broken test? See line 72") 61 | def test_receive_proposal(): 62 | app = AppMock(privkeys[0]) 63 | chainservice = hdc_service.ChainService(app) 64 | proto = hdc_protocol.HDCProtocol(PeerMock(app), chainservice) 65 | cm = chainservice.consensus_manager 66 | p = cm.active_round.mk_proposal() 67 | assert isinstance(p.block, Block) 68 | r = rlp.encode(p) 69 | p = rlp.decode(r, sedes=BlockProposal) 70 | assert isinstance(p.block, TransientBlock) 71 | with pytest.raises(InvalidProposalError): # not the proposser, fix test 72 | chainservice.on_receive_newblockproposal(proto, p) 73 | # assert chainservice.chain.head.number == 1 # we don't have consensus yet 74 | 75 | 76 | def test_broadcast_filter(): 77 | r = Ready(0, LockSet(1)) 78 | r.sign('x' * 32) 79 | df = hdc_service.DuplicatesFilter() 80 | assert r not in df 81 | assert df.update(r) 82 | assert not df.update(r) 83 | assert not df.update(r) 84 | assert r in df 85 | 86 | # def receive_blocks(rlp_data, leveldb=False, codernitydb=False): 87 | # app = AppMock() 88 | # if leveldb: 89 | # app.db = leveldb_service.LevelDB( 90 | # os.path.join(app.config['app']['dir'], app.config['db']['path'])) 91 | 92 | # chainservice = hdc_service.ChainService(app) 93 | # proto = hdc_protocol.HDCProtocol(PeerMock(app), chainservice) 94 | # b = hdc_protocol.HDCProtocol.blocks.decode_payload(rlp_data) 95 | # chainservice.on_receive_blocks(proto, b) 96 | 97 | 98 | # def test_receive_block1(): 99 | # rlp_data = rlp.encode([rlp.decode(block_1.decode('hex'))]) 100 | # receive_blocks(rlp_data) 101 | 102 | 103 | # def test_receive_blocks_256(): 104 | # receive_blocks(data256.decode('hex')) 105 | 106 | 107 | # def test_receive_blocks_256_leveldb(): 108 | # receive_blocks(data256.decode('hex'), leveldb=True) 109 | -------------------------------------------------------------------------------- /hydrachain/tests/test_sim_basics.py: -------------------------------------------------------------------------------- 1 | from hydrachain.consensus.simulation import Network, assert_heightdistance 2 | from hydrachain.consensus.simulation import assert_maxrounds, assert_blocktime, log 3 | from hydrachain.consensus.manager import ConsensusManager 4 | from ethereum.transactions import Transaction 5 | import gevent 6 | 7 | 8 | def test_basic_gevent(): 9 | network = Network(num_nodes=4) 10 | network.connect_nodes() 11 | network.normvariate_base_latencies() 12 | network.start() 13 | network.run(6) 14 | r = network.check_consistency() 15 | # note gevent depends on real clock, therefore results are not predictable 16 | assert_maxrounds(r) 17 | assert_heightdistance(r) 18 | 19 | 20 | def test_basic_simenv(): 21 | network = Network(num_nodes=4, simenv=True) 22 | network.connect_nodes() 23 | network.normvariate_base_latencies() 24 | network.start() 25 | network.run(5) 26 | r = network.check_consistency() 27 | assert_maxrounds(r) 28 | assert_heightdistance(r, max_distance=1) 29 | assert_blocktime(r, 1.5) 30 | 31 | 32 | def test_basic_singlenode(): 33 | network = Network(num_nodes=1, simenv=True) 34 | network.connect_nodes() 35 | network.normvariate_base_latencies() 36 | network.start() 37 | network.run(5) 38 | r = network.check_consistency() 39 | assert_maxrounds(r) 40 | assert_heightdistance(r) 41 | assert_blocktime(r, 1.5) 42 | 43 | 44 | def test_transactions(monkeypatch): 45 | sim_time = 10 46 | num_txs = 2 47 | num_initial_blocks = 2 48 | 49 | monkeypatch.setattr(ConsensusManager, 'num_initial_blocks', num_initial_blocks) 50 | 51 | network = Network(num_nodes=4, simenv=False) 52 | network.connect_nodes() 53 | network.normvariate_base_latencies() 54 | app = network.nodes[0] 55 | chainservice = app.services.chainservice 56 | 57 | # track txs 58 | txs = [] 59 | 60 | def cb(blk): 61 | log.DEV('ON NEW HEAD', blk=blk) 62 | if num_initial_blocks <= blk.number < num_initial_blocks + num_txs: 63 | if blk.number > num_initial_blocks: 64 | assert blk.num_transactions() == 1 65 | sender = chainservice.chain.coinbase 66 | to = 'x' * 20 67 | nonce = chainservice.chain.head.get_nonce(sender) 68 | log.DEV('CREATING TX', nonce=nonce) 69 | gas = 21000 70 | gasprice = 1 71 | value = 1 72 | assert chainservice.chain.head.get_balance(sender) > gas * gasprice + value 73 | tx = Transaction(nonce, gasprice, gas, to, value, data='') 74 | app.services.accounts.sign_tx(sender, tx) 75 | assert tx.sender == sender 76 | 77 | def _do(): 78 | log.DEV('ADDING TX', nonce=nonce) 79 | success = chainservice.add_transaction(tx) 80 | assert success 81 | log.DEV('ADDED TX', success=success) 82 | 83 | if network.simenv: 84 | network.simenv.process(_do()) 85 | else: 86 | gevent.spawn(_do) 87 | txs.append(tx) 88 | 89 | print(chainservice.on_new_head_cbs) 90 | chainservice.on_new_head_cbs.append(cb) 91 | network.start() 92 | network.run(sim_time) 93 | r = network.check_consistency() 94 | log.debug(r) 95 | expected_head_number = num_initial_blocks + num_txs 96 | assert chainservice.chain.head.number == expected_head_number 97 | assert_maxrounds(r) 98 | assert_heightdistance(r, max_distance=1) 99 | #assert_blocktime(r, 1.5) 100 | 101 | # check if all txs are received in all chains 102 | tx_pos = set() 103 | for app in network.nodes: 104 | for tx in txs: 105 | r = app.services.chainservice.chain.index.get_transaction(tx.hash) 106 | assert len(r) == 3 107 | t, blk, idx = r 108 | assert tx == t 109 | tx_pos.add(r) 110 | assert len(tx_pos) == len(txs) 111 | -------------------------------------------------------------------------------- /hydrachain/tests/test_sim_failures.py: -------------------------------------------------------------------------------- 1 | from hydrachain.consensus.manager import ConsensusManager 2 | from hydrachain.consensus.simulation import Network, assert_heightdistance 3 | 4 | 5 | def test_failing_validators(): 6 | network = Network(num_nodes=10, simenv=True) 7 | network.connect_nodes() 8 | network.normvariate_base_latencies() 9 | network.disable_validators(num=3) 10 | network.start() 11 | network.run(10) 12 | r = network.check_consistency() 13 | assert_heightdistance(r) 14 | 15 | 16 | def test_slow_validators(): 17 | network = Network(num_nodes=10, simenv=True) 18 | network.connect_nodes() 19 | network.normvariate_base_latencies() 20 | network.throttle_validators(num=3) 21 | network.start() 22 | network.run(5) 23 | r = network.check_consistency() 24 | assert_heightdistance(r, 1) 25 | 26 | 27 | def test_slow_and_failing_validators(): 28 | network = Network(num_nodes=10, simenv=True) 29 | network.connect_nodes() 30 | network.normvariate_base_latencies() 31 | network.disable_validators(num=3) 32 | network.throttle_validators(num=6) 33 | network.start() 34 | network.run(10) 35 | r = network.check_consistency() 36 | assert_heightdistance(r, 1) 37 | 38 | 39 | def test_low_timeout(monkeypatch): 40 | monkeypatch(ConsensusManager, 'round_timeout', 0.1) 41 | 42 | network = Network(num_nodes=10, simenv=True) 43 | network.connect_nodes() 44 | network.normvariate_base_latencies() 45 | network.start() 46 | network.run(5) 47 | 48 | r = network.check_consistency() 49 | assert_heightdistance(r) 50 | -------------------------------------------------------------------------------- /hydrachain/tests/test_sim_joins.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from hydrachain.consensus.simulation import Network, assert_heightdistance 3 | from ethereum.transactions import Transaction 4 | 5 | 6 | # some known troubling validator counts 7 | @pytest.mark.parametrize('validators', range(4, 7) + [10]) 8 | @pytest.mark.parametrize('late', range(1, 3)) 9 | @pytest.mark.parametrize('delay', [2]) 10 | # run this test with `tox -- -rx -k test_late_joins` 11 | def test_late_joins(validators, late, delay): 12 | """In this test, we spawn a network with a number of 13 | `validators` validator nodes, where a number of `late` nodes stay 14 | offline until after a certain delay: 15 | 16 | >>> initial sync_time = delay * (validators - late) 17 | 18 | Now the "late-joiners" come online and we let them sync until 19 | the networks head block is at `num_initial_blocks` (default: 10). 20 | 21 | Since in some configurations the late-joiners don't manage to catch up 22 | at that point, we inject a transaction (leading to a new block) into 23 | the now fully online network. 24 | 25 | Now all nodes must be at the same block-height: `(num_initial_blocks + 1)`. 26 | """ 27 | network = Network(num_nodes=validators, simenv=True) 28 | for node in network.nodes[validators - late:]: 29 | node.isactive = False 30 | network.connect_nodes() 31 | network.normvariate_base_latencies() 32 | network.start() 33 | network.run(delay * (validators - late)) 34 | for node in network.nodes[validators - late:]: 35 | node.isactive = True 36 | network.connect_nodes() 37 | network.normvariate_base_latencies() 38 | network.start() 39 | network.run(max(10, validators * delay)) 40 | 41 | r = network.check_consistency() 42 | 43 | # now majority must be at block 10 44 | # late-joiners may be at block 9 or even still at block 0 45 | assert_heightdistance(r, max_distance=10) 46 | assert r['heights'][10] >= (validators - late) 47 | 48 | # after a new block, all nodes should be up-to-date: 49 | chainservice = network.nodes[0].services.chainservice 50 | 51 | sender = chainservice.chain.coinbase 52 | to = 'x' * 20 53 | nonce = chainservice.chain.head.get_nonce(sender) 54 | gas = 21000 55 | gasprice = 1 56 | value = 1 57 | assert chainservice.chain.head.get_balance(sender) > gas * gasprice + value 58 | tx = Transaction(nonce, gasprice, gas, to, value, data='') 59 | network.nodes[0].services.accounts.sign_tx(sender, tx) 60 | assert tx.sender == sender 61 | 62 | success = chainservice.add_transaction(tx) 63 | assert success 64 | 65 | # run in ever longer bursts until we're at height 11 66 | for i in range(1, 10): 67 | network.connect_nodes() 68 | network.normvariate_base_latencies() 69 | network.start() 70 | network.run(2 * i) 71 | r = network.check_consistency() 72 | if r['heights'][11] == validators: 73 | break 74 | 75 | assert_heightdistance(r) 76 | assert r['heights'][11] == validators 77 | -------------------------------------------------------------------------------- /hydrachain/tests/test_sim_syncing.py: -------------------------------------------------------------------------------- 1 | from hydrachain.consensus.manager import ConsensusManager 2 | from hydrachain.consensus.simulation import Network, assert_heightdistance, assert_maxrounds 3 | 4 | 5 | def test_resyncing_of_peers(): 6 | network = Network(num_nodes=10, simenv=True) 7 | 8 | # disable one node, i.e. it will not connect yet 9 | network.nodes[0].isactive = False 10 | network.connect_nodes() 11 | network.normvariate_base_latencies() 12 | network.start() 13 | network.run(5) 14 | network.nodes[0].isactive = True 15 | network.connect_nodes() 16 | network.normvariate_base_latencies() 17 | network.start() 18 | network.run(3) 19 | 20 | r = network.check_consistency() 21 | assert_heightdistance(r) 22 | 23 | 24 | def test_successive_joining(): 25 | # bootstrap scenario 26 | 27 | # this works without repeated VoteNil sending, as the first node will 28 | # eventually collect a valid Lockset. 29 | # if: 30 | # nodes can request proposals, they missed 31 | # the network is not disjoint at the beginning 32 | 33 | # solution: 34 | # send current and last valid lockset and proposal with status 35 | 36 | network = Network(num_nodes=10, simenv=True) 37 | 38 | # disable nodes, i.e. they won't connect yet 39 | for n in network.nodes: 40 | n.isactive = False 41 | 42 | for n in network.nodes: 43 | n.isactive = True 44 | network.connect_nodes() 45 | network.start() 46 | network.run(2) 47 | network.run(2) 48 | 49 | r = network.check_consistency() 50 | assert_heightdistance(r) 51 | 52 | 53 | def test_broadcasting(): 54 | network = Network(num_nodes=10, simenv=True) 55 | orig_timeout = ConsensusManager.round_timeout 56 | # ConsensusManager.round_timeout = 100 # don't trigger timeouts 57 | 58 | # connect nodes as a ring 59 | for i, n in enumerate(network.nodes): 60 | if i + 1 < len(network.nodes): 61 | o = network.nodes[i + 1] 62 | else: 63 | o = network.nodes[0] 64 | n.connect_app(o) 65 | network.normvariate_base_latencies() 66 | network.start() 67 | network.run(10) 68 | ConsensusManager.round_timeout = orig_timeout 69 | r = network.check_consistency() 70 | assert_maxrounds(r) 71 | assert_heightdistance(r) 72 | -------------------------------------------------------------------------------- /hydrachain/tests/test_working_app.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | import pytest 4 | import random 5 | import gevent 6 | from threading import Thread 7 | from click.testing import CliRunner 8 | from hydrachain import app 9 | from pyethapp.rpc_client import JSONRPCClient 10 | from requests.exceptions import ConnectionError 11 | from ethereum import slogging 12 | 13 | 14 | solidity_code = """ 15 | contract SimpleStorage { 16 | uint storedData; 17 | function set(uint x) { 18 | storedData = x; 19 | } 20 | function get() constant returns (uint retVal) { 21 | return storedData; 22 | } 23 | } 24 | """ 25 | 26 | # Compiled with https://chriseth.github.io/browser-solidity/ 27 | contract_interface = '[{"constant":false,"inputs":[{"name":"x","type":"uint256"}],"name":"set","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"type":"function"}]' # noqa 28 | 29 | contract_code = "606060405260978060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360fe47b11460415780636d4ce63c14605757603f565b005b605560048080359060200190919050506078565b005b606260048050506086565b6040518082815260200191505060405180910390f35b806000600050819055505b50565b600060006000505490506094565b9056" # noqa 30 | 31 | 32 | class TestDriverThread(Thread): 33 | def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None, 34 | gasprice=None, evt=None, port=4000): 35 | super(TestDriverThread, self).__init__(group, target, name, args, kwargs, verbose) 36 | self.gasprice = gasprice 37 | self.log = slogging.getLogger('test_working_app') 38 | self.test_successful = False 39 | self.finished = False 40 | self.evt = evt 41 | self.port = port 42 | 43 | def wait_for_blocknumber(self, number, retry=20): 44 | block = self.client.call('eth_getBlockByNumber', hex(number), False) 45 | while block is None and retry > 0: 46 | block = self.client.call('eth_getBlockByNumber', hex(number), False) 47 | time.sleep(.5) 48 | retry -= 1 49 | assert retry > 0, "could not find block {}".format(number) 50 | return block 51 | 52 | def connect_client(self): 53 | while True: 54 | try: 55 | self.client = JSONRPCClient(port=self.port, print_communication=False) 56 | self.client.call('web3_clientVersion') 57 | break 58 | except ConnectionError: 59 | time.sleep(0.5) 60 | 61 | def run(self): 62 | self.log.debug('test started') 63 | 64 | try: 65 | self.connect_client() 66 | self.log.debug('client connected') 67 | 68 | # Read initial blocks created by HydraChain on startup 69 | self.wait_for_blocknumber(10) 70 | self.log.debug("found block number 10") 71 | 72 | # Create a contract 73 | params = {'from': self.client.coinbase.encode('hex'), 74 | 'to': '', 75 | 'data': contract_code, 76 | 'gasPrice': '0x{}'.format(self.gasprice)} 77 | self.client.call('eth_sendTransaction', params) 78 | self.log.debug('eth_sendTransaction OK') 79 | 80 | # Wait for new block 81 | recent_block = self.wait_for_blocknumber(11) 82 | 83 | self.log.debug('recent_block_hash {}'.format(recent_block)) 84 | 85 | block = self.client.call('eth_getBlockByHash', recent_block['hash'], True) 86 | self.log.debug('eth_getBlockByHash OK {}'.format(block)) 87 | 88 | assert block['transactions'], 'no transactions in block' 89 | tx = block['transactions'][0] 90 | assert tx['to'] == '0x' 91 | assert tx['gasPrice'] == params['gasPrice'] 92 | assert len(tx['input']) > len('0x') 93 | assert tx['input'].startswith('0x') 94 | 95 | # Get transaction receipt to have the address of contract 96 | receipt = self.client.call('eth_getTransactionReceipt', tx['hash']) 97 | self.log.debug('eth_getTransactionReceipt OK {}'.format(receipt)) 98 | 99 | assert receipt['transactionHash'] == tx['hash'] 100 | assert receipt['blockHash'] == tx['blockHash'] 101 | assert receipt['blockHash'] == block['hash'] 102 | 103 | # Get contract address from receipt 104 | contract_address = receipt['contractAddress'] 105 | code = self.client.call('eth_getCode', contract_address) 106 | self.log.debug('eth_getCode OK {}'.format(code)) 107 | 108 | assert code.startswith('0x') 109 | assert len(code) > len('0x') 110 | 111 | # Perform some action on contract (set value to random number) 112 | rand_value = random.randint(64, 1024) 113 | contract = self.client.new_abi_contract(contract_interface, contract_address) 114 | contract.set(rand_value, gasprice=self.gasprice) 115 | self.log.debug('contract.set({}) OK'.format(rand_value)) 116 | 117 | # Wait for new block 118 | recent_block = self.wait_for_blocknumber(12) 119 | # recent_block_hash = self.wait_for_new_block() 120 | 121 | block = self.client.call('eth_getBlockByHash', recent_block['hash'], True) 122 | 123 | # Check that value was correctly set on contract 124 | res = contract.get() 125 | self.log.debug('contract.get() OK {}'.format(res)) 126 | assert res == rand_value 127 | 128 | self.test_successful = True 129 | except Exception as ex: 130 | print("Exception", ex) 131 | import traceback 132 | traceback.print_exc() 133 | self.log.exception("Exception in test thread") 134 | finally: 135 | self.evt.set() 136 | self.finished = True 137 | 138 | 139 | @pytest.mark.parametrize('gasprice', (0, 1)) 140 | @pytest.mark.xfail(reason="the test result is non-deterministic. fixme!") # FIXME 141 | def test_example(gasprice, caplog): 142 | rand_port = random.randint(4000, 5000) 143 | if caplog: 144 | caplog.set_level(logging.DEBUG) 145 | # Start thread that will communicate to the app ran by CliRunner 146 | evt = gevent.event.Event() 147 | t = TestDriverThread(gasprice=gasprice, evt=evt, port=rand_port) 148 | t.setDaemon(True) 149 | t.start() 150 | 151 | # Stop app after testdriverthread is completed 152 | def mock_serve_until_stopped(*apps): 153 | evt.wait() 154 | for app_ in apps: 155 | app_.stop() 156 | 157 | app.serve_until_stopped = mock_serve_until_stopped 158 | 159 | runner = CliRunner() 160 | with runner.isolated_filesystem(): 161 | datadir = 'datadir{}'.format(gasprice) 162 | runner.invoke(app.pyethapp_app.app, ['-d', datadir, 163 | '-l', ':WARNING,hdc.chainservice:INFO,test_working_app:DEBUG', 164 | '-c', 'jsonrpc.listen_port={}'.format(rand_port), 'runmultiple']) 165 | while not t.finished: 166 | gevent.sleep(1) 167 | 168 | assert t.test_successful 169 | 170 | 171 | if __name__ == '__main__': 172 | slogging.configure(":debug") 173 | test_example(1, None) 174 | test_example(0, None) 175 | -------------------------------------------------------------------------------- /hydrachain/tests/txperf.py: -------------------------------------------------------------------------------- 1 | """Example call: 2 | 3 | >>> NODES=3; rm -rf /tmp/txperf && hydrachain -d /tmp/txperf runmultiple \ 4 | -v $NODES > /dev/null 2>&1 & sleep 15 && time python \ 5 | hydrachain/tests/txperf.py && kill -9 %1 6 | 7 | additional sys.argv: 8 | >>> python .../txperf.py 9 | 10 | """ 11 | 12 | from pyethapp.rpc_client import JSONRPCClient, HttpPostClientTransport 13 | import time 14 | 15 | 16 | class DummyClient(JSONRPCClient): 17 | 18 | def __init__(self, host, port=4000, print_communication=True, privkey=None, sender=None): 19 | self.transport = HttpPostClientTransport('http://' + host + ':{}'.format(port)) 20 | self.print_communication = print_communication 21 | self.privkey = privkey 22 | self._sender = sender 23 | self.port = port 24 | 25 | 26 | def do_tx(client, coinbase): 27 | value = 1 28 | recipient = "1" * 40 29 | r = client.send_transaction( 30 | coinbase, 31 | recipient, 32 | value, 33 | startgas=21001 34 | ) 35 | return r 36 | 37 | 38 | def main(num_clients, host='127.0.0.1', num_txs=1): 39 | st = time.time() 40 | txs = set() 41 | clients = [DummyClient(host, 4000 + i, print_communication=False) for i in range(num_clients)] 42 | coinbase = clients[0].coinbase 43 | 44 | for i in range(num_txs): 45 | txh = do_tx(clients[0], coinbase=coinbase) 46 | print 'tx', i, txh 47 | txs.add(txh) 48 | 49 | # assert len(txs) == num_txs, len(txs) 50 | 51 | elapsed = time.time() - st 52 | # sys.exit(0) 53 | print 'checking if all %d txs are included' % num_txs 54 | time.sleep(5) 55 | tx_blocks = set() 56 | blocks = set() 57 | for tx in txs: 58 | for client in clients: 59 | r = client.call('eth_getTransactionReceipt', tx) 60 | if not r: 61 | continue 62 | blk = r.get('blockHash') 63 | if blk: 64 | tx_blocks.add((tx, blk)) 65 | blocks.add(blk) 66 | 67 | print 68 | print '%d txs in %d blocks in all %d clients' % (len(tx_blocks), len(blocks), num_clients) 69 | print 'elapsed', elapsed 70 | assert len(tx_blocks) == num_txs, (len(tx_blocks), num_txs) 71 | 72 | if __name__ == '__main__': 73 | import sys 74 | if len(sys.argv) > 1: 75 | num_clients = int(sys.argv[1]) 76 | host = str(sys.argv[2]) 77 | num_txs = int(sys.argv[3]) 78 | else: 79 | num_clients = 1 80 | host = '127.0.0.1' 81 | num_txs = 500 82 | print sys.argv 83 | print 'Performing {} txs on host {} with {} local JSONRPC-Client(s)'.\ 84 | format(num_txs, host, num_clients) 85 | main(num_clients, host, num_txs) 86 | 87 | 88 | """ 89 | CPython: 12 tps 90 | PyPy: 18 tps 91 | 92 | With min_block_time = 0.5 93 | CPython: 17 tps 94 | PyPy: 49tps 95 | 96 | Not forwarding did not improve performance! 97 | """ 98 | -------------------------------------------------------------------------------- /hydrachain/utils.py: -------------------------------------------------------------------------------- 1 | from Crypto.Hash import keccak 2 | 3 | 4 | def sha3_256(x): 5 | return keccak.new(digest_bits=256, data=x) 6 | 7 | 8 | def sha3(seed): 9 | return sha3_256(bytes(seed)).digest() 10 | 11 | # colors 12 | 13 | FAIL = '\033[91m' 14 | ENDC = '\033[0m' 15 | BOLD = '\033[1m' 16 | UNDERLINE = '\033[4m' 17 | 18 | 19 | def DEBUG(*args, **kargs): 20 | print(FAIL + repr(args) + repr(kargs) + ENDC) 21 | 22 | colors = ['\033[9%dm' % i for i in range(0, 7)] 23 | colors += ['\033[4%dm' % i for i in range(1, 8)] 24 | 25 | 26 | def cstr(num, txt): 27 | return '%s%s%s' % (colors[num % len(colors)], txt, ENDC) 28 | 29 | 30 | def cprint(num, txt): 31 | print cstr(num, txt) 32 | 33 | 34 | def phx(x): 35 | return x.encode('hex')[:8] 36 | 37 | 38 | if __name__ == '__main__': 39 | for i in range(len(colors)): 40 | cprint(i, 'test') 41 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | bitcoin>=1.1.39 2 | rlp>=0.4.3 3 | pyethapp>=1.2.1 4 | secp256k1==0.12.1 5 | simpy==3.0.8 6 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 0.3.2 3 | commit = True 4 | tag = True 5 | 6 | [bumpversion:file:setup.py] 7 | search = version = "{current_version}" 8 | 9 | [metadata] 10 | description-file = README.md 11 | 12 | [wheel] 13 | universal = 1 14 | 15 | [flake8] 16 | ignore = 17 | E731,C901 18 | 19 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | try: 5 | from setuptools import setup 6 | except ImportError: 7 | from distutils.core import setup 8 | from setuptools.command.test import test as TestCommand 9 | 10 | 11 | class PyTest(TestCommand): 12 | 13 | def finalize_options(self): 14 | TestCommand.finalize_options(self) 15 | self.test_args = [] 16 | self.test_suite = True 17 | 18 | def run_tests(self): 19 | # import here, cause outside the eggs aren't loaded 20 | import pytest 21 | errno = pytest.main(self.test_args) 22 | raise SystemExit(errno) 23 | 24 | 25 | with open('README.md') as readme_file: 26 | readme = readme_file.read() 27 | 28 | 29 | history = '' 30 | 31 | 32 | install_requires = set(x.strip() for x in open('requirements.txt')) 33 | install_requires_replacements = {} 34 | 35 | install_requires = [install_requires_replacements.get(r, r) for r in install_requires] 36 | 37 | test_requirements = [ 38 | 'docker-compose==1.7.0', 39 | 'bumpversion==0.5.3', 40 | 'pytest==2.9.1' 41 | ] 42 | 43 | 44 | # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility. 45 | # see: https://github.com/ethereum/pyethapp/wiki/Development:-Versions-and-Releases 46 | version = '0.3.2' 47 | 48 | 49 | setup( 50 | name='hydrachain', 51 | version=version, 52 | description="Permissioned Distributed Ledger based on Ethereum", 53 | long_description=readme + '\n\n' + history, 54 | author="HeikoHeiko", 55 | author_email='heiko@brainbot.com', 56 | url='https://github.com/HydraChain/hydrachain', 57 | packages=[ 58 | 'hydrachain', 59 | 'hydrachain.consensus', 60 | 'hydrachain.examples', 61 | 'hydrachain.examples.native', 62 | 'hydrachain.examples.native.fungible', 63 | ], 64 | include_package_data=True, 65 | license="MIT", 66 | zip_safe=False, 67 | keywords='hydrachain', 68 | classifiers=[ 69 | 'Development Status :: 2 - Pre-Alpha', 70 | 'Intended Audience :: Developers', 71 | 'License :: OSI Approved :: MIT License', 72 | 'Natural Language :: English', 73 | "Programming Language :: Python :: 2", 74 | 'Programming Language :: Python :: 2.7', 75 | ], 76 | cmdclass={'test': PyTest}, 77 | install_requires=install_requires, 78 | tests_require=test_requirements, 79 | entry_points={ 80 | 'console_scripts': [ 81 | "hydrachain = hydrachain.app:app" 82 | ] 83 | } 84 | ) 85 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py27,pypy,flake8,check_readme,coverage 3 | 4 | [testenv] 5 | deps = 6 | -rrequirements.txt 7 | pytest-catchlog 8 | coverage==4.0.3 9 | docker-compose==1.7.0 10 | py27: ethereum-serpent>=2.0.2 11 | 12 | passenv = DOCKER_HOST DOCKER_TLS_VERIFY DOCKER_CERT_PATH 13 | 14 | commands = 15 | coverage run --source hydrachain --branch -m py.test --ignore examples/ --ignore hydrachain/examples/ {posargs} 16 | 17 | 18 | [testenv:coverage] 19 | basepython = python2.7 20 | skip_install = True 21 | 22 | deps = 23 | coverage==4.0.3 24 | 25 | commands = 26 | coverage report --show-missing 27 | 28 | 29 | [testenv:flake8] 30 | basepython = python2.7 31 | skip_install = True 32 | 33 | deps = 34 | flake8==2.5.4 35 | flake8-tuple==0.2.9 36 | 37 | commands = 38 | flake8 --exclude hydrachain/tests hydrachain 39 | 40 | 41 | [testenv:check_readme] 42 | skip_install = true 43 | deps = readme_renderer 44 | commands = python setup.py check --restructuredtext --strict 45 | 46 | [flake8] 47 | max-line-length = 99 48 | max-complexity = 10 49 | --------------------------------------------------------------------------------