├── .ci
├── travis-after-success.sh
├── travis-before-install.sh
├── travis-install.sh
└── travis-script.sh
├── .codecov.yml
├── .coveragerc
├── .dockerignore
├── .gitignore
├── .travis.yml
├── CRAPL-LICENSE.txt
├── Dockerfile
├── Makefile
├── README.md
├── ROADMAP.rst
├── docker-compose.yml
├── docs
├── Makefile
├── conf.py
├── index.rst
├── libref.rst
└── roadmap.rst
├── experiments
├── __init__.py
├── benchmark
│ ├── __init__.py
│ ├── process.py
│ ├── process_old.py
│ └── visualization
│ │ ├── index.html
│ │ └── jquery-2.1.4.min.js
├── ec2
│ ├── __init__.py
│ ├── fabfile.py
│ ├── generate_tx.py
│ ├── list.py
│ ├── logs
│ │ └── readme.md
│ ├── process_raw.py
│ └── utility.py
├── honest_party_test.py
├── honest_party_test_EC2.py
├── honest_party_test_tor_multipleCircuits.py
├── mmr13_expt.py
├── mmr13_tor.py
├── mmr13_tor_multipleCircuits.py
├── multipleTorLauncher.sh
├── names.txt
├── plots
│ ├── plot_latency.py
│ ├── plot_latency_throughput.py
│ ├── plot_latency_tor.py
│ └── plot_throughput.py
├── run_local.py
└── run_local_tor.py
├── honeybadgerbft
├── __init__.py
├── core
│ ├── __init__.py
│ ├── binaryagreement.py
│ ├── commoncoin.py
│ ├── commonsubset.py
│ ├── honeybadger.py
│ ├── honeybadger_block.py
│ └── reliablebroadcast.py
├── crypto
│ ├── __init__.py
│ ├── ecdsa
│ │ ├── __init__.py
│ │ └── generate_keys_ecdsa.py
│ ├── threshenc
│ │ ├── __init__.py
│ │ ├── generate_keys.py
│ │ └── tpke.py
│ └── threshsig
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── boldyreva.py
│ │ ├── boldyreva_gipc.py
│ │ ├── boldyreva_pool.py
│ │ ├── generate_keys.py
│ │ └── millerrabin.py
└── exceptions.py
├── misc
├── README
├── ansi2html.sh
├── includeTransaction.py
├── shoup_tsig
│ ├── generate_keys_shoup.py
│ └── shoup.py
├── utils.py
└── zfec_benchmark.py
├── pytest.ini
├── setup.cfg
├── setup.py
├── start.sh
├── test.sh
└── test
├── __init__.py
├── byzantine.py
├── conftest.py
├── crypto
├── conftest.py
├── ecdsa
│ └── test_generate_keys_ecdsa.py
├── threshenc
│ └── test_tpke.py
└── threshsig
│ ├── test_boldyreva.py
│ ├── test_boldyreva_gipc.py
│ ├── test_boldyreva_pool.py
│ ├── test_generate_enckeys.py
│ ├── test_generate_sigkeys.py
│ └── test_millerrabin.py
├── demo_attack_issue59.py
├── test_binaryagreement.py
├── test_commoncoin.py
├── test_commonsubset.py
├── test_honeybadger.py
├── test_honeybadger_block.py
├── test_rbc.py
└── test_threshenc.py
/.ci/travis-after-success.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e -x
4 |
5 | if [ "${BUILD}" == "tests" ]; then
6 | codecov
7 | fi
8 |
--------------------------------------------------------------------------------
/.ci/travis-before-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e -x
4 |
5 | if [ "${BUILD}" != "flake8" ]; then
6 | apt-get update -qq
7 | apt-get -y install flex bison libgmp-dev libmpc-dev python-dev libssl-dev
8 | wget https://crypto.stanford.edu/pbc/files/pbc-0.5.14.tar.gz
9 | tar -xvf pbc-0.5.14.tar.gz
10 | cd pbc-0.5.14
11 | ./configure
12 | make
13 | make install
14 | cd ..
15 | fi
16 |
--------------------------------------------------------------------------------
/.ci/travis-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e -x
4 |
5 | pip install --upgrade pip
6 |
7 | if [ "${BUILD}" != "flake8" ]; then
8 | pip install --upgrade setuptools
9 | git clone https://github.com/JHUISI/charm.git
10 | cd charm && ./configure.sh && make install
11 | cd ..
12 | fi
13 |
14 | if [ "${BUILD}" == "tests" ]; then
15 | pip install -e .[test]
16 | pip install --upgrade codecov
17 | elif [ "${BUILD}" == "flake8" ]; then
18 | pip install flake8
19 | elif [ "${BUILD}" == "docs" ]; then
20 | pip install -e .[docs]
21 | fi
22 |
--------------------------------------------------------------------------------
/.ci/travis-script.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e -x
4 |
5 | if [ "${BUILD}" == "tests" ]; then
6 | pytest -v --cov=honeybadgerbft
7 | elif [ "${BUILD}" == "flake8" ]; then
8 | flake8 honeybadgerbft/
9 | elif [ "${BUILD}" == "docs" ]; then
10 | sphinx-build -W -c docs -b html -d docs/_build/doctrees docs docs/_build/html
11 | fi
12 |
--------------------------------------------------------------------------------
/.codecov.yml:
--------------------------------------------------------------------------------
1 | codecov:
2 | branch: dev
3 |
4 | coverage:
5 | range: 70..100
6 | round: down
7 | precision: 5
8 | status:
9 | project:
10 | default:
11 | threshold: 2%
12 | patch:
13 | default:
14 | threshold: 1%
15 |
16 | ignore:
17 | - "docs/*"
18 | - "experiments/*"
19 | - "misc/*"
20 | - "test/*"
21 |
22 | comment:
23 | layout: "header, diff, changes, sunburst, uncovered"
24 | behavior: default
25 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | source = .
3 | omit = *test*, docs/*, setup.py
4 | concurrency = gevent, multiprocessing
5 | branch = True
6 |
7 |
8 | [report]
9 | # Regexes for lines to exclude from consideration
10 | exclude_lines =
11 | # Don't complain if non-runnable code isn't run:
12 | if __name__ == .__main__.:
13 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | **/*.pyc
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # IDE junk files
2 | .idea/*
3 | *.pyc
4 | *~
5 | #*#
6 | tmp*
7 | ec2/logs/*
8 | ec2/logs_old/*
9 | ec2/hosts
10 | *_keys
11 | *.keys
12 |
13 | # Unit test / coverage reports
14 | .cache
15 | .coverage
16 | .coverage.*
17 | coverage.xml
18 | htmlcov/
19 | .pytest_cache
20 |
21 | # Sphinx documentation
22 | docs/_build/
23 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | sudo: required
3 | dist: xenial
4 | python: 3.7
5 |
6 | env:
7 | global:
8 | - LIBRARY_PATH=/usr/local/lib
9 | - LD_LIBRARY_PATH=/usr/local/lib
10 |
11 | matrix:
12 | include:
13 | - env: BUILD=tests
14 | - env: BUILD=flake8
15 | - env: BUILD=docs
16 |
17 | before_install: sudo .ci/travis-before-install.sh
18 | install: .ci/travis-install.sh
19 | script: .ci/travis-script.sh
20 | after_success: .ci/travis-after-success.sh
21 |
--------------------------------------------------------------------------------
/CRAPL-LICENSE.txt:
--------------------------------------------------------------------------------
1 | THE CRAPL v0 BETA 1
2 |
3 |
4 | 0. Information about the CRAPL
5 |
6 | If you have questions or concerns about the CRAPL, or you need more
7 | information about this license, please contact:
8 |
9 | Matthew Might
10 | http://matt.might.net/
11 |
12 |
13 | I. Preamble
14 |
15 | Science thrives on openness.
16 |
17 | In modern science, it is often infeasible to replicate claims without
18 | access to the software underlying those claims.
19 |
20 | Let's all be honest: when scientists write code, aesthetics and
21 | software engineering principles take a back seat to having running,
22 | working code before a deadline.
23 |
24 | So, let's release the ugly. And, let's be proud of that.
25 |
26 |
27 | II. Definitions
28 |
29 | 1. "This License" refers to version 0 beta 1 of the Community
30 | Research and Academic Programming License (the CRAPL).
31 |
32 | 2. "The Program" refers to the medley of source code, shell scripts,
33 | executables, objects, libraries and build files supplied to You,
34 | or these files as modified by You.
35 |
36 | [Any appearance of design in the Program is purely coincidental and
37 | should not in any way be mistaken for evidence of thoughtful
38 | software construction.]
39 |
40 | 3. "You" refers to the person or persons brave and daft enough to use
41 | the Program.
42 |
43 | 4. "The Documentation" refers to the Program.
44 |
45 | 5. "The Author" probably refers to the caffeine-addled graduate
46 | student that got the Program to work moments before a submission
47 | deadline.
48 |
49 |
50 | III. Terms
51 |
52 | 1. By reading this sentence, You have agreed to the terms and
53 | conditions of this License.
54 |
55 | 2. If the Program shows any evidence of having been properly tested
56 | or verified, You will disregard this evidence.
57 |
58 | 3. You agree to hold the Author free from shame, embarrassment or
59 | ridicule for any hacks, kludges or leaps of faith found within the
60 | Program.
61 |
62 | 4. You recognize that any request for support for the Program will be
63 | discarded with extreme prejudice.
64 |
65 | 5. The Author reserves all rights to the Program, except for any
66 | rights granted under any additional licenses attached to the
67 | Program.
68 |
69 |
70 | IV. Permissions
71 |
72 | 1. You are permitted to use the Program to validate published
73 | scientific claims.
74 |
75 | 2. You are permitted to use the Program to validate scientific claims
76 | submitted for peer review, under the condition that You keep
77 | modifications to the Program confidential until those claims have
78 | been published.
79 |
80 | 3. You are permitted to use and/or modify the Program for the
81 | validation of novel scientific claims if You make a good-faith
82 | attempt to notify the Author of Your work and Your claims prior to
83 | submission for publication.
84 |
85 | 4. If You publicly release any claims or data that were supported or
86 | generated by the Program or a modification thereof, in whole or in
87 | part, You will release any inputs supplied to the Program and any
88 | modifications You made to the Progam. This License will be in
89 | effect for the modified program.
90 |
91 |
92 | V. Disclaimer of Warranty
93 |
94 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
95 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
96 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT
97 | WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT
98 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
99 | A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
100 | PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE
101 | DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR
102 | CORRECTION.
103 |
104 |
105 | VI. Limitation of Liability
106 |
107 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
108 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR
109 | CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
110 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES
111 | ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT
112 | NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR
113 | LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM
114 | TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER
115 | PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
116 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:stretch
2 |
3 | # Default cluster arguments. Override with "-e"
4 | #
5 | # total number of parties:
6 | ENV N 8
7 | # tolerance, usually N/4 in our experiments:
8 | ENV t 2
9 | # maximum number of transactions committed in a block:
10 | ENV B 16
11 |
12 | RUN apt-get update && apt-get -y install bison flex libgmp-dev libmpc-dev
13 |
14 | RUN wget https://crypto.stanford.edu/pbc/files/pbc-0.5.14.tar.gz
15 | RUN tar -xvf pbc-0.5.14.tar.gz
16 | RUN cd pbc-0.5.14 && ./configure && make && make install
17 |
18 | ENV LIBRARY_PATH /usr/local/lib
19 | ENV LD_LIBRARY_PATH /usr/local/lib
20 |
21 | RUN git clone https://github.com/JHUISI/charm.git
22 | RUN cd charm && ./configure.sh && make install
23 |
24 | ENV SRC /usr/local/src/HoneyBadgerBFT
25 | WORKDIR $SRC
26 | ADD . $SRC/
27 |
28 | RUN pip install --upgrade pip
29 | RUN pip install -e .[dev]
30 |
31 | # Run tests by default
32 | CMD sh test.sh
33 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: clean clean-test clean-pyc clean-build docs help
2 | .DEFAULT_GOAL := help
3 | define BROWSER_PYSCRIPT
4 | import os, webbrowser, sys
5 | try:
6 | from urllib import pathname2url
7 | except:
8 | from urllib.request import pathname2url
9 |
10 | webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1])))
11 | endef
12 | export BROWSER_PYSCRIPT
13 |
14 | define PRINT_HELP_PYSCRIPT
15 | import re, sys
16 |
17 | for line in sys.stdin:
18 | match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line)
19 | if match:
20 | target, help = match.groups()
21 | print("%-20s %s" % (target, help))
22 | endef
23 | export PRINT_HELP_PYSCRIPT
24 | BROWSER := python -c "$$BROWSER_PYSCRIPT"
25 |
26 | help:
27 | @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)
28 |
29 | clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts
30 |
31 |
32 | clean-build: ## remove build artifacts
33 | rm -fr build/
34 | rm -fr dist/
35 | rm -fr .eggs/
36 | find . -name '*.egg-info' -exec rm -fr {} +
37 | find . -name '*.egg' -exec rm -f {} +
38 |
39 | clean-pyc: ## remove Python file artifacts
40 | find . -name '*.pyc' -exec rm -f {} +
41 | find . -name '*.pyo' -exec rm -f {} +
42 | find . -name '*~' -exec rm -f {} +
43 | find . -name '__pycache__' -exec rm -fr {} +
44 |
45 | clean-test: ## remove test and coverage artifacts
46 | rm -fr .tox/
47 | rm -f .coverage .coverage.*
48 | rm -fr htmlcov/
49 |
50 | lint: ## check style with flake8
51 | flake8 honeybadgerbft tests
52 |
53 | test: ## run tests quickly with the default Python
54 | pytest -v
55 |
56 |
57 | test-all: ## run tests on every Python version with tox
58 | tox
59 |
60 | coverage: ## check code coverage quickly with the default Python
61 | pytest -v -n auto --cov=honeybadgerbft --cov-report term --cov-report html
62 | $(BROWSER) htmlcov/index.html
63 |
64 | docs: ## generate Sphinx HTML documentation, including API docs
65 | $(MAKE) -C docs clean
66 | $(MAKE) -C docs html
67 | $(BROWSER) docs/_build/html/index.html
68 |
69 | servedocs: docs ## compile the docs watching for changes
70 | watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D .
71 |
72 | release: clean ## package and upload a release
73 | twine upload dist/*
74 |
75 | dist: clean ## builds source and wheel package
76 | python setup.py sdist
77 | # python setup.py bdist_wheel
78 | ls -l dist
79 |
80 | install: clean ## install the package to the active Python's site-packages
81 | python setup.py install
82 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # HoneyBadgerBFT
2 | The Honey Badger of BFT Protocols.
3 |
4 |
5 |
6 | [](https://travis-ci.org/initc3/HoneyBadgerBFT-Python)
7 | [](https://codecov.io/github/initc3/honeybadgerbft-python?branch=dev)
8 |
9 | HoneyBadgerBFT is a leaderless and completely asynchronous BFT consensus protocols.
10 | This makes it a good fit for blockchains deployed over wide area networks
11 | or when adversarial conditions are expected.
12 | HoneyBadger nodes can even stay hidden behind anonymizing relays like Tor, and
13 | the purely-asynchronous protocol will make progress at whatever rate the
14 | network supports.
15 |
16 | This repository contains a Python implementation of the HoneyBadgerBFT protocol.
17 | It is still a prototype, and is not approved for production use. It is intended
18 | to serve as a useful reference and alternative implementations for other projects.
19 |
20 | ## Development Activities
21 |
22 | Since its initial implementation, the project has gone through a substantial
23 | refactoring, and is currently under active development.
24 |
25 | At the moment, the following three milestones are being focused on:
26 |
27 | * [Bounded Badger](https://github.com/initc3/HoneyBadgerBFT-Python/milestone/3)
28 | * [Test Network](https://github.com/initc3/HoneyBadgerBFT-Python/milestone/2)
29 | * [Release 1.0](https://github.com/initc3/HoneyBadgerBFT-Python/milestone/1)
30 |
31 | A roadmap of the project can be found in [ROADMAP.rst](./ROADMAP.rst).
32 |
33 |
34 | ### Contributing
35 | Contributions are welcomed! To quickly get setup for development:
36 |
37 | 1. Fork the repository and clone your fork. (See the Github Guide
38 | [Forking Projects](https://guides.github.com/activities/forking/) if
39 | needed.)
40 |
41 | 2. Install [`Docker`](https://docs.docker.com/install/). (For Linux, see
42 | [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user)
43 | to run `docker` without `sudo`.)
44 |
45 | 3. Install [`docker-compose`](https://docs.docker.com/compose/install/).
46 |
47 | 4. Run the tests (the first time will take longer as the image will be built):
48 |
49 | ```bash
50 | $ docker-compose run --rm honeybadger
51 | ```
52 |
53 | The tests should pass, and you should also see a small code coverage report
54 | output to the terminal.
55 |
56 | If the above went all well, you should be setup for developing
57 | **HoneyBadgerBFT-Python**!
58 |
59 | ## License
60 | This is released under the CRAPL academic license. See ./CRAPL-LICENSE.txt
61 | Other licenses may be issued at the authors' discretion.
62 |
--------------------------------------------------------------------------------
/ROADMAP.rst:
--------------------------------------------------------------------------------
1 | *******
2 | Roadmap
3 | *******
4 | The project is currently focusing on three milestones:
5 |
6 | * `Bounded Badger`_: A more secure HoneyBadgerBFT that no longer requires
7 | unbounded buffers for protocol messages.
8 | * `Test Network`_: HoneyBadgerBFT in action with a long-running test network.
9 | * `Release 1.0`_: Towards a better and more stable HoneyBadgerBFT Python
10 | implementation.
11 |
12 | An overview of each milestone is given below.
13 |
14 |
15 | `Bounded Badger`_
16 | =================
17 | The main goal of this milestone is to implement a CHECKPOINT mechanism that
18 | will allow the HoneyBadgerBFT protocol to only require bounded storage. Please
19 | see `amiller/honeybadgerbft#57`_ for a detailed description of the issue, its
20 | motivation, and benefits.
21 |
22 | Overall, this should make HoneyBadgerBFT more secure, (e.g.: resilient to DoS
23 | attacks), and provide a way for nodes to catch up when they fall out of sync.
24 |
25 | The completion of this milestone will involve the following implementations:
26 |
27 | * Tests that reproduce problems stemming from the unbounded-buffers approach
28 | (`#17`_).
29 | * Threshold signature upon the finalization of a block of transactions (
30 | `#15`_).
31 | * Broadcasting and reception of CHECKPOINT messages along with a "message
32 | bounding" behavior (`#16`_).
33 | * Message bounding of ABA (`#22`_).
34 | * Recovery mechanism aka "speedybadger" (`#18`_, `#21`_, `#33`_).
35 | * Garbage collection of "outdated" outgoing protocol messages (`#19`_, `#7`_).
36 |
37 | To stay up-to-date with the issues the milestone comprises, see the milestone
38 | on Github at https://github.com/initc3/HoneyBadgerBFT-Python/milestone/3.
39 |
40 |
41 | `Test Network`_
42 | ===============
43 | At a minimum this milestone wishes to have a long running test network
44 | deployed of approximately 10+ nodes.
45 |
46 | The network will be administered by a trusted party to start with, and
47 | will consist of nodes running the Python implementation. In the near future,
48 | we would like to have an heteregenous network such that some nodes also run
49 | implementations written in other languages (e.g.: Go, Rust, Erlang, Haskell).
50 |
51 | In order to support the delpoyment and operation of the test network, the
52 | following tasks are planned:
53 |
54 | * Persistence layer for transactions, blocks, and "system state" (`#20`_,
55 | `#21`_).
56 | * Update and fix the relevant legacy experiments, including benchmark tests
57 | (`#23`_).
58 | * Provide authenticated communications, with persistent connections (`#25`_,
59 | `#26`_).
60 | * Setup minimal logging infrastructure to help monitoring and troubleshooting
61 | (`#24`_).
62 | * Provide a basic dashboard to view the network's state and activity (`#27`_,
63 | `#35`_).
64 |
65 | To stay up-to-date with the issues the milestone comprises, see the milestone
66 | on Github at https://github.com/initc3/HoneyBadgerBFT-Python/milestone/2.
67 |
68 |
69 | `Release 1.0`_
70 | ==============
71 | Release planned to appear after the completion of the bounded badger and
72 | test network milestones.
73 |
74 | This milestone aims to make the implementation of better quality by addressing
75 | most of the opened issues, meaning:
76 |
77 | * Resolving opened bugs (`#31`_, `#46`_).
78 | * Making sure the subprotocols are well tested (`#34`_).
79 | * Implementing the proposed batch size to be floor(B/N) (`#28`_).
80 | * Implementing a coin schedule for ABA (`#38`_).
81 | * Properly handling redundant messages in ABA (`#10`_).
82 | * Providing an overall good documentation of the project (`#30`_, `#43`_).
83 | * Implementing general best software engineering practices (`#13`_, `#14`_,
84 | `#29`_, `#32`_, `#40`_, `#41`_, `#42`_, `#44`_).
85 |
86 | To stay up-to-date with the issues the milestone comprises, see the milestone
87 | on Github at https://github.com/initc3/HoneyBadgerBFT-Python/milestone/1.
88 |
89 |
90 | For Future Milestones
91 | =====================
92 |
93 | Message Formats
94 | ---------------
95 | Serialization/deserialization of messages using protocol buffers.
96 |
97 | Distributed Key Generation
98 | --------------------------
99 | Dynamic addition and removal of nodes.
100 |
101 |
102 | .. _Bounded Badger: https://github.com/initc3/HoneyBadgerBFT-Python/milestone/3
103 | .. _Test Network: https://github.com/initc3/HoneyBadgerBFT-Python/milestone/2
104 | .. _Release 1.0: https://github.com/initc3/HoneyBadgerBFT-Python/milestone/1
105 | .. _amiller/honeybadgerbft#57: https://github.com/amiller/HoneyBadgerBFT/issues/57
106 | .. _#7: https://github.com/initc3/HoneyBadgerBFT-Python/issues/7
107 | .. _#10: https://github.com/initc3/HoneyBadgerBFT-Python/issues/10
108 | .. _#13: https://github.com/initc3/HoneyBadgerBFT-Python/issues/13
109 | .. _#14: https://github.com/initc3/HoneyBadgerBFT-Python/issues/14
110 | .. _#15: https://github.com/initc3/HoneyBadgerBFT-Python/issues/15
111 | .. _#16: https://github.com/initc3/HoneyBadgerBFT-Python/issues/16
112 | .. _#17: https://github.com/initc3/HoneyBadgerBFT-Python/issues/17
113 | .. _#18: https://github.com/initc3/HoneyBadgerBFT-Python/issues/18
114 | .. _#19: https://github.com/initc3/HoneyBadgerBFT-Python/issues/19
115 | .. _#20: https://github.com/initc3/HoneyBadgerBFT-Python/issues/20
116 | .. _#21: https://github.com/initc3/HoneyBadgerBFT-Python/issues/21
117 | .. _#22: https://github.com/initc3/HoneyBadgerBFT-Python/issues/22
118 | .. _#23: https://github.com/initc3/HoneyBadgerBFT-Python/issues/23
119 | .. _#24: https://github.com/initc3/HoneyBadgerBFT-Python/issues/24
120 | .. _#25: https://github.com/initc3/HoneyBadgerBFT-Python/issues/25
121 | .. _#26: https://github.com/initc3/HoneyBadgerBFT-Python/issues/26
122 | .. _#27: https://github.com/initc3/HoneyBadgerBFT-Python/issues/27
123 | .. _#28: https://github.com/initc3/HoneyBadgerBFT-Python/issues/28
124 | .. _#29: https://github.com/initc3/HoneyBadgerBFT-Python/issues/29
125 | .. _#30: https://github.com/initc3/HoneyBadgerBFT-Python/issues/30
126 | .. _#31: https://github.com/initc3/HoneyBadgerBFT-Python/issues/31
127 | .. _#32: https://github.com/initc3/HoneyBadgerBFT-Python/issues/32
128 | .. _#33: https://github.com/initc3/HoneyBadgerBFT-Python/issues/33
129 | .. _#34: https://github.com/initc3/HoneyBadgerBFT-Python/issues/34
130 | .. _#35: https://github.com/initc3/HoneyBadgerBFT-Python/issues/35
131 | .. _#38: https://github.com/initc3/HoneyBadgerBFT-Python/issues/38
132 | .. _#40: https://github.com/initc3/HoneyBadgerBFT-Python/issues/40
133 | .. _#41: https://github.com/initc3/HoneyBadgerBFT-Python/issues/41
134 | .. _#42: https://github.com/initc3/HoneyBadgerBFT-Python/issues/42
135 | .. _#43: https://github.com/initc3/HoneyBadgerBFT-Python/issues/43
136 | .. _#44: https://github.com/initc3/HoneyBadgerBFT-Python/issues/44
137 | .. _#46: https://github.com/initc3/HoneyBadgerBFT-Python/issues/46
138 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | honeybadger:
5 | build:
6 | context: .
7 | dockerfile: Dockerfile
8 | volumes:
9 | - .:/usr/local/src/HoneyBadgerBFT
10 | command: pytest -v --cov=honeybadgerbft
11 | builddocs:
12 | build:
13 | context: .
14 | dockerfile: Dockerfile
15 | volumes:
16 | - .:/usr/local/src/HoneyBadgerBFT
17 | command: make -C docs html
18 | viewdocs:
19 | image: nginx
20 | ports:
21 | - '52032:80'
22 | volumes:
23 | - ./docs/_build/html:/usr/share/nginx/html
24 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = python -msphinx
7 | SPHINXPROJ = HoneyBadgerBFT
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import sphinx_rtd_theme
4 |
5 |
6 | # -- General configuration ------------------------------------------------
7 |
8 | extensions = [
9 | 'sphinx.ext.autodoc',
10 | 'sphinx.ext.intersphinx',
11 | 'sphinx.ext.todo',
12 | 'sphinx.ext.coverage',
13 | 'sphinx.ext.mathjax',
14 | 'sphinx.ext.viewcode',
15 | ]
16 |
17 | autodoc_default_flags = [
18 | 'members',
19 | 'private-members',
20 | 'inherited-members',
21 | 'show-inheritance',
22 | ]
23 | autoclass_content = 'both'
24 |
25 | templates_path = ['_templates']
26 | source_suffix = '.rst'
27 | master_doc = 'index'
28 | project = u'HoneyBadgerBFT'
29 | copyright = u'2017, Andrew Miller et al.'
30 | author = u'Andrew Miller et al.'
31 | version = u'0.1'
32 | release = u'0.1.0'
33 | language = None
34 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
35 | pygments_style = 'sphinx'
36 | todo_include_todos = True
37 |
38 |
39 | # -- Options for HTML output ----------------------------------------------
40 |
41 | html_theme = 'sphinx_rtd_theme'
42 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
43 | html_sidebars = {
44 | '**': [
45 | 'about.html',
46 | 'navigation.html',
47 | 'relations.html', # needs 'show_related': True theme option to display
48 | 'searchbox.html',
49 | 'donate.html',
50 | ]
51 | }
52 |
53 |
54 | # -- Options for HTMLHelp output ------------------------------------------
55 |
56 | htmlhelp_basename = 'HoneyBadgerBFTdoc'
57 |
58 |
59 | # -- Options for LaTeX output ---------------------------------------------
60 |
61 | latex_elements = {}
62 | latex_documents = [
63 | (master_doc, 'HoneyBadgerBFT.tex', u'HoneyBadgerBFT Documentation',
64 | u'Andrew Miller et al.', 'manual'),
65 | ]
66 |
67 |
68 | # -- Options for manual page output ---------------------------------------
69 |
70 | man_pages = [
71 | (master_doc, 'honeybadgerbft', u'HoneyBadgerBFT Documentation',
72 | [author], 1)
73 | ]
74 |
75 |
76 | # -- Options for Texinfo output -------------------------------------------
77 |
78 | texinfo_documents = [
79 | (master_doc, 'HoneyBadgerBFT', u'HoneyBadgerBFT Documentation',
80 | author, 'HoneyBadgerBFT', 'One line description of project.',
81 | 'Miscellaneous'),
82 | ]
83 |
84 |
85 | intersphinx_mapping = {'https://docs.python.org/': None}
86 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. HoneyBadgerBFT documentation master file, created by
2 | sphinx-quickstart on Wed Aug 16 12:55:00 2017.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to HoneyBadgerBFT's documentation!
7 | ==========================================
8 |
9 | .. toctree::
10 | :maxdepth: 1
11 | :caption: Contents:
12 |
13 | libref
14 | roadmap
15 |
16 |
17 | Indices and tables
18 | ==================
19 |
20 | * :ref:`genindex`
21 | * :ref:`modindex`
22 | * :ref:`search`
23 |
--------------------------------------------------------------------------------
/docs/libref.rst:
--------------------------------------------------------------------------------
1 | *****************
2 | Library Reference
3 | *****************
4 |
5 | Core
6 | ----
7 | .. automodule:: honeybadgerbft.core
8 | :members:
9 |
10 | Binary Agreement
11 | ^^^^^^^^^^^^^^^^
12 | .. automodule:: honeybadgerbft.core.binaryagreement
13 | :members:
14 |
15 | Common Coin
16 | ^^^^^^^^^^^
17 | .. autoexception:: honeybadgerbft.core.commoncoin.CommonCoinFailureException
18 |
19 | .. automethod:: honeybadgerbft.core.commoncoin.shared_coin
20 |
21 | Asynchronous Common Subset (ACS)
22 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
23 | .. automodule:: honeybadgerbft.core.commonsubset
24 | :members:
25 |
26 | HoneyBadgerBFT
27 | ^^^^^^^^^^^^^^
28 | .. autoclass:: honeybadgerbft.core.honeybadger.HoneyBadgerBFT
29 | :members:
30 | :private-members:
31 |
32 | HoneyBadger Block
33 | ^^^^^^^^^^^^^^^^^
34 | .. automodule:: honeybadgerbft.core.honeybadger_block
35 | :members:
36 |
37 | Reliable Broadcast
38 | ^^^^^^^^^^^^^^^^^^
39 | .. automodule:: honeybadgerbft.core.reliablebroadcast
40 | :members:
41 |
42 | Crypto
43 | ------
44 | .. automodule:: honeybadgerbft.crypto
45 | :members:
46 |
47 |
48 | Elliptic Curve Digital Signature Algorithm
49 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
50 | .. automodule:: honeybadgerbft.crypto.ecdsa
51 | :members:
52 |
53 | .. automodule:: honeybadgerbft.crypto.ecdsa.generate_keys_ecdsa
54 | :members:
55 |
56 | Threshold Encryption
57 | ^^^^^^^^^^^^^^^^^^^^
58 | .. automodule:: honeybadgerbft.crypto.threshenc
59 | :members:
60 |
61 | .. automodule:: honeybadgerbft.crypto.threshenc.generate_keys
62 | :members:
63 |
64 | .. automodule:: honeybadgerbft.crypto.threshenc.tpke
65 | :members:
66 |
67 | Threshold Signature
68 | ^^^^^^^^^^^^^^^^^^^
69 | .. automodule:: honeybadgerbft.crypto.threshsig
70 | :members:
71 |
72 | boldyreva
73 | """""""""
74 | .. automodule:: honeybadgerbft.crypto.threshsig.boldyreva
75 | :members:
76 |
77 | boldyreva_gibc
78 | """"""""""""""
79 | .. automodule:: honeybadgerbft.crypto.threshsig.boldyreva_gipc
80 | :members:
81 |
82 | boldyreva_pool
83 | """"""""""""""
84 | .. automodule:: honeybadgerbft.crypto.threshsig.boldyreva_pool
85 | :members:
86 |
87 | generate_keys
88 | """""""""""""
89 | .. automodule:: honeybadgerbft.crypto.threshsig.generate_keys
90 | :members:
91 |
92 | millerrabin
93 | """""""""""
94 | .. automodule:: honeybadgerbft.crypto.threshsig.millerrabin
95 | :members:
96 |
--------------------------------------------------------------------------------
/docs/roadmap.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../ROADMAP.rst
2 |
--------------------------------------------------------------------------------
/experiments/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'aluex'
2 |
--------------------------------------------------------------------------------
/experiments/benchmark/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/initc3/HoneyBadgerBFT-Python/e8bcbc081dfb5d1e7298039d47bbebf7048b8e62/experiments/benchmark/__init__.py
--------------------------------------------------------------------------------
/experiments/benchmark/process.py:
--------------------------------------------------------------------------------
1 | ################################
2 |
3 | import sys
4 | import re
5 |
6 | infoExtractor = re.compile(r'(?P\d+):(?P\d+)\((?P\d+)\-\>(?P\d+)\)\[(?P[\d\.]+)\]-\[(?P[\d\.]+)\]\((?P\d+),\s*(?P.*)\)')
7 |
8 | def main(filename):
9 | # Generate summaries and chart.
10 | content = open(filename, 'r').read().decode('utf-8','ignore')
11 | timelap = []
12 | start_times = []
13 | end_times = []
14 | msgsize = []
15 | outputObj = []
16 | import json
17 | infoList = [a for a in infoExtractor.finditer(content)]
18 | if not infoList:
19 | return
20 |
21 | for mat in infoList:
22 | res = mat.groupdict()
23 | start_times.append(float(res['start_time']))
24 | # print start_times
25 | end_times.append(float(res['end_time']))
26 | time_diff = float(res['end_time']) - float(res['start_time'])
27 | timelap.append(time_diff)
28 | msgsize.append(int(res['bytes']))
29 |
30 | outputObj.append([
31 | res['from'], "(%s->%s)%s" % (res['from'], res['to'], res['content']), '|'+str(float(res['start_time']))+'|', '|'+str(float(res['end_time']))+'|'
32 | ])
33 |
34 | open('rawdata.'+filename,'w').write(json.dumps(outputObj).replace('"|','new Date(').replace('|"','*1000)'))
35 | # return
36 | import numpy
37 |
38 | print max(end_times) - min(start_times)
39 | print len(msgsize)
40 | print sum(msgsize)
41 | print sum(timelap) / len(timelap)
42 | print numpy.var(timelap)
43 | print max(timelap)
44 | # return
45 |
46 | import matplotlib.pyplot as plt
47 |
48 | plt.hist(timelap, bins=50)
49 | plt.title("Histogram")
50 | plt.xlabel("Value")
51 | plt.ylabel("Frequency")
52 | plt.show()
53 |
54 | import matplotlib.pyplot as plt
55 | import numpy as np
56 | from scipy.stats import gaussian_kde
57 | data = timelap
58 | density = gaussian_kde(data)
59 | xs = np.linspace(0,8,200)
60 | density.covariance_factor = lambda : .45
61 | density._compute_covariance()
62 | plt.plot(xs, density(xs))
63 | plt.show()
64 |
65 | if __name__=='__main__':
66 | main(sys.argv[1])
67 |
--------------------------------------------------------------------------------
/experiments/benchmark/process_old.py:
--------------------------------------------------------------------------------
1 | ################################
2 |
3 | import sys
4 | import re
5 |
6 | infoExtractor = re.compile(r'(?P\d+):(?P\d+)\[(?P[\d\.]+)\]-\[(?P[\d\.]+)\]\((?P\d+),\s*(?P.*)\)')
7 |
8 | def main(filename):
9 | content = open(filename, 'r').read().decode('utf-8','ignore')
10 | timelap = []
11 | start_times = []
12 | end_times = []
13 | msgsize = []
14 | outputObj = []
15 | import json
16 |
17 | for mat in infoExtractor.finditer(content):
18 | res = mat.groupdict()
19 | start_times.append(float(res['start_time']))
20 | end_times.append(float(res['end_time']))
21 | time_diff = float(res['end_time']) - float(res['start_time'])
22 | timelap.append(time_diff)
23 | msgsize.append(int(res['bytes']))
24 | outputObj.append([res['sender'], res['content'], '|'+str(float(res['start_time']))+'|', '|'+str(float(res['end_time']))+'|'])
25 |
26 | open('rawdata.'+filename,'w').write(json.dumps(outputObj).replace('"|','new Date(').replace('|"','*1000)'))
27 | # return
28 | import numpy
29 |
30 | print max(end_times) - min(start_times)
31 | print len(msgsize)
32 | print sum(msgsize)
33 | print sum(timelap) / len(timelap)
34 | print numpy.var(timelap)
35 | print max(timelap)
36 | return
37 |
38 |
39 | if __name__=='__main__':
40 | main(sys.argv[1])
41 |
--------------------------------------------------------------------------------
/experiments/benchmark/visualization/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
44 |
45 |
--------------------------------------------------------------------------------
/experiments/ec2/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/initc3/HoneyBadgerBFT-Python/e8bcbc081dfb5d1e7298039d47bbebf7048b8e62/experiments/ec2/__init__.py
--------------------------------------------------------------------------------
/experiments/ec2/fabfile.py:
--------------------------------------------------------------------------------
1 | from __future__ import with_statement
2 | from fabric.api import *
3 | from fabric.operations import put, get
4 | from fabric.contrib.console import confirm
5 | from fabric.contrib.files import append
6 | import time, sys, os, scanf
7 | from io import BytesIO
8 | import math
9 |
10 | @parallel
11 | def host_type():
12 | run('uname -s')
13 |
14 | @parallel
15 | def gettime():
16 | run('date +%s.%N')
17 |
18 | @parallel
19 | def checkLatency():
20 | '''
21 | PING 52.49.163.101 (52.49.163.101) 56(84) bytes of data.
22 | 64 bytes from 52.49.163.101: icmp_seq=1 ttl=45 time=141 ms
23 |
24 | --- 52.49.163.101 ping statistics ---
25 | 1 packets transmitted, 1 received, 0% packet loss, time 0ms
26 | rtt min/avg/max/mdev = 141.722/141.722/141.722/0.000 ms
27 | '''
28 | resDict = []
29 | totLen = len(env.hosts)
30 | for destination in env.hosts:
31 | waste = BytesIO()
32 | with hide('output', 'running'):
33 | res = run('ping -c 3 %s' % destination, stdout=waste, stderr=waste).strip().split('\n')[1].strip()
34 | # print repr(res)
35 | lat = scanf.sscanf(res, '%d bytes from %s icmp_seq=%d ttl=%d time=%f ms')[-1]
36 | resDict.append(lat)
37 | print ' '.join([env.host_string, str(sorted(resDict)[int(math.ceil(totLen * 0.75))]), str(sum(resDict) / len(resDict))])
38 |
39 | @parallel
40 | def ping():
41 | run('ping -c 5 google.com')
42 | run('echo "synced transactions set"')
43 | run('ping -c 100 google.com')
44 |
45 | @parallel
46 | def cloneRepo():
47 | run('git clone https://github.com/amiller/HoneyBadgerBFT.git')
48 | with cd('HoneyBadgerBFT'):
49 | run('git checkout another-dev')
50 |
51 | @parallel
52 | def install_dependencies():
53 | sudo('apt-get update')
54 | sudo('apt-get -y install python-gevent')
55 | sudo('apt-get -y install git')
56 | sudo('apt-get -y install python-socksipy')
57 | sudo('apt-get -y install python-pip')
58 | sudo('apt-get -y install python-dev')
59 | sudo('apt-get -y install python-gmpy2')
60 | sudo('apt-get -y install flex')
61 | sudo('apt-get -y install bison')
62 | sudo('apt-get -y install libgmp-dev')
63 | sudo('apt-get -y install libssl-dev')
64 | sudo('pip install pycrypto')
65 | sudo('pip install ecdsa')
66 | sudo('pip install zfec')
67 | sudo('pip install gipc')
68 | run('wget https://crypto.stanford.edu/pbc/files/pbc-0.5.14.tar.gz')
69 | run('tar -xvf pbc-0.5.14.tar.gz')
70 | with cd('pbc-0.5.14'):
71 | run('./configure')
72 | run('make')
73 | sudo('make install')
74 | with settings(warn_only=True):
75 | if run('test -d charm').failed:
76 | run('git clone https://github.com/JHUISI/charm.git')
77 | with cd('charm'):
78 | run('git checkout 2.7-dev')
79 | run('./configure.sh')
80 | sudo('python setup.py install')
81 |
82 | @parallel
83 | def prepare():
84 | syncKeys()
85 | install_dependencies()
86 | cloneRepo()
87 | git_pull()
88 |
89 |
90 | @parallel
91 | def stopProtocols():
92 | with settings(warn_only=True):
93 | run('killall python')
94 | run('killall dtach')
95 | run('killall server.py')
96 |
97 | @parallel
98 | def removeHosts():
99 | run('rm ~/hosts')
100 |
101 | @parallel
102 | def writeHosts():
103 | put('./hosts', '~/')
104 |
105 | @parallel
106 | def fetchLogs():
107 | get('~/msglog.TorMultiple',
108 | 'logs/%(host)s' + time.strftime('%Y-%m-%d_%H:%M:%SZ',time.gmtime()) + '.log')
109 |
110 | @parallel
111 | def syncKeys():
112 | put('./*.keys', '~/')
113 |
114 | import SocketServer, time
115 | start_time = 0
116 | sync_counter = 0
117 | N = 1
118 | t = 1
119 |
120 | class MyTCPHandler(SocketServer.BaseRequestHandler):
121 | """
122 | The RequestHandler class for our server.
123 |
124 | It is instantiated once per connection to the server, and must
125 | override the handle() method to implement communication to the
126 | client.
127 | """
128 | def handle(self):
129 | # self.request is the TCP socket connected to the client
130 | self.data = self.rfile.readline().strip()
131 | print "%s finishes at %lf" % (self.client_address[0], time.time() - start_time)
132 | print self.data
133 | sync_counter += 1
134 | if sync_counter >= N - t:
135 | print "finished at %lf" % (time.time() - start_time)
136 |
137 | def runServer(): # deprecated
138 | global start_time, sync_counter, N, t
139 | N = int(Nstr)
140 | t = int(tstr)
141 | start_time = time.time()
142 | sync_counter = 0
143 | server = SocketServer.TCPServer(('0.0.0.0', 51234), MyTCPHandler)
144 | server.serve_forever()
145 |
146 | @parallel
147 | def runProtocolFromClient(client, key):
148 | # s = StringIO()
149 | with cd('~/HoneyBadgerBFT/mmr13'):
150 | run('python honest_party_test_EC2.py ~/hosts %s ~/ecdsa_keys %s' % (key, client))
151 |
152 | @parallel
153 | def generateTX(N_, seed):
154 | N = int(N_)
155 | run('python -m HoneyBadgerBFT.ec2.generate_tx %d %s > tx' % (N, seed))
156 |
157 | @parallel
158 | def runProtocol(N_, t_, B_, timespan_, tx='tx'):
159 | N = int(N_)
160 | t = int(t_)
161 | B = int(B_) * N # now we don't have to calculate them anymore
162 | timespan = int(timespan_)
163 | print N, t, B, timespan
164 | with shell_env(LIBRARY_PATH='/usr/local/lib', LD_LIBRARY_PATH='/usr/local/lib'):
165 | run('python -m HoneyBadgerBFT.test.honest_party_test_EC2 -k'
166 | ' thsig%d_%d.keys -e ecdsa.keys -a %d -b %d -n %d -t %d -c thenc%d_%d.keys' % (N, t, timespan, B, N, t, N, t))
167 |
168 | @parallel
169 | def checkout():
170 | run('svn checkout --no-auth-cache --username aluex --password JkJ-3pc-s3Y-prp https://subversion.assembla.com/svn/ktc-scratch/')
171 |
172 | @parallel
173 | def svnUpdate():
174 | with settings(warn_only=True):
175 | if run('test -d ktc-scratch').failed:
176 | run('svn checkout --no-auth-cache --username aluex --password JkJ-3pc-s3Y-prp https://subversion.assembla.com/svn/ktc-scratch/')
177 | with cd('~/ktc-scratch'):
178 | run('svn up --no-auth-cache --username aluex --password JkJ-3pc-s3Y-prp')
179 |
180 | @parallel
181 | def svnClean():
182 | with settings(warn_only=True):
183 | if run('test -d ktc-scratch').failed:
184 | run('svn checkout --username aluex --password JkJ-3pc-s3Y-prp https://subversion.assembla.com/svn/ktc-scratch/')
185 | with cd('~/ktc-scratch'):
186 | run('svn cleanup')
187 |
188 | @parallel
189 | def makeExecutable():
190 | with cd('~/ktc-scratch'):
191 | run('chmod +x server.py')
192 |
193 | # http://stackoverflow.com/questions/8775598/start-a-background-process-with-nohup-using-fabric
194 | def runbg(cmd, sockname="dtach"):
195 | return run('dtach -n `mktemp -u /tmp/%s.XXXX` %s' % (sockname,cmd))
196 |
197 | @parallel
198 | def startPBFT(): ######## THIS SHOULD BE CALLED IN REVERSED HOST ORDER
199 | with cd('~/ktc-scratch'):
200 | runbg('python server.py')
201 |
202 | def startClient():
203 | with cd('~/ktc-scratch'):
204 | #batch_size = 1024
205 | #batch_size = 2048
206 | #batch_size = 4096
207 | #batch_size = 8192
208 | batch_size = 16384
209 | #batch_size = 65536
210 | run('python gen_requests.py 1000 %d' % (batch_size,))
211 | run('python client.py 40')
212 | run('python parse_client_log.py %d' % (batch_size,))
213 |
214 | @parallel
215 | def git_pull():
216 | with settings(warn_only=True):
217 | if run('test -d HoneyBadgerBFT').failed:
218 | run('git clone https://github.com/amiller/HoneyBadgerBFT.git')
219 | with cd('~/HoneyBadgerBFT'):
220 | run('git checkout another-dev')
221 | run('git pull')
222 |
223 |
--------------------------------------------------------------------------------
/experiments/ec2/generate_tx.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import cPickle
3 | from ..core.utils import encodeTransaction, randomTransaction
4 | import random
5 |
6 |
7 | def main():
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument('n', help='The number of transactions')
10 | parser.add_argument('seed', help='seed')
11 | args = parser.parse_args()
12 | ntx = int(args.n)
13 | if args.seed:
14 | seed = int(args.seed)
15 | else:
16 | seed = 123
17 | rnd = random.Random(seed)
18 | print "Random transaction generator fingerprints %s" % (hex(rnd.getrandbits(32*8)))
19 | transactionSet = set([encodeTransaction(randomTransaction(rnd), randomGenerator=rnd) for trC in range(ntx)]) # we are using the same one
20 | print cPickle.dumps(transactionSet)
21 |
22 | if __name__ == '__main__':
23 | main()
24 |
--------------------------------------------------------------------------------
/experiments/ec2/list.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import boto.ec2
3 |
4 | access_key = ''
5 | secret_key = ''
6 |
7 | def get_ec2_instances(region):
8 | ec2_conn = boto.ec2.connect_to_region(region,
9 | aws_access_key_id=access_key,
10 | aws_secret_access_key=secret_key)
11 | if ec2_conn:
12 | reservations = ec2_conn.get_all_reservations(filters={'tag:Name':'pbft'})
13 | for reservation in reservations:
14 | for ins in reservation.instances:
15 | if ins.public_dns_name:
16 | print ins.public_dns_name.split('.')[0][4:].replace('-','.')
17 |
18 | def get_ec2_instances_names(region):
19 | ec2_conn = boto.ec2.connect_to_region(region,
20 | aws_access_key_id=access_key,
21 | aws_secret_access_key=secret_key)
22 | if ec2_conn:
23 | reservations = ec2_conn.get_all_reservations(filters={'tag:Name':'pbft'})
24 | for reservation in reservations:
25 | for ins in reservation.instances:
26 | print ins.instance_id
27 |
28 | def start_all_instances(region):
29 | ec2_conn = boto.ec2.connect_to_region(region,
30 | aws_access_key_id=access_key,
31 | aws_secret_access_key=secret_key)
32 | idList = []
33 | if ec2_conn:
34 | reservations = ec2_conn.get_all_reservations(filters={'tag:Name':'pbft'})
35 | for reservation in reservations:
36 | for ins in reservation.instances:
37 | idList.append(ins.instance_id)
38 | ec2_conn.start_instances(instance_ids=idList)
39 |
40 | def main():
41 | regions = ['us-east-1','us-west-1','us-west-2','eu-west-1','sa-east-1',
42 | 'ap-southeast-1','ap-southeast-2','ap-northeast-1', 'eu-central-1']
43 | parser = argparse.ArgumentParser()
44 | parser.add_argument('access_key', help='Access Key')
45 | parser.add_argument('secret_key', help='Secret Key')
46 | args = parser.parse_args()
47 | global access_key
48 | global secret_key
49 | access_key = args.access_key
50 | secret_key = args.secret_key
51 |
52 | for region in regions:
53 | get_ec2_instances(region)
54 |
55 | if __name__ =='__main__':main()
--------------------------------------------------------------------------------
/experiments/ec2/logs/readme.md:
--------------------------------------------------------------------------------
1 | This is the place for logs
2 |
--------------------------------------------------------------------------------
/experiments/ec2/process_raw.py:
--------------------------------------------------------------------------------
1 | ############# Process the latency from a raw screen log
2 | import scanf
3 | import math
4 | import numpy
5 | from collections import defaultdict
6 |
7 | def process(s, txpp, N=-1, t=-1):
8 | endtime = dict()
9 | starttime = dict()
10 | tList = []
11 | lines = s.split('\n')
12 | scheduleTime = defaultdict(lambda: 0)
13 | for line in lines:
14 | if 'timestampE ' in line:
15 | info = eval(line.split('timestampE')[1])
16 | endtime[info[0]] = info[1]
17 | if 'timestampB ' in line:
18 | info = eval(line.split('timestampB')[1])
19 | starttime[info[0]] = info[1]
20 | if 'waits for' in line:
21 | if 'now is' in line:
22 | tl = scanf.sscanf(line, '%s out: %d waits for %f now is %f')
23 | else:
24 | tl = scanf.sscanf(line, '%s out: %d waits for %f')
25 | scheduleTime[tl[2]] += 1
26 | if 'proposing' in line:
27 | tl = scanf.sscanf(line, '%s out: [%d] proposing %d transactions')
28 | if txpp!=tl[2]:
29 | print "\n\n!!!!!!!!!!!!! File Inconsistent\n\n"
30 | return
31 |
32 | earlyStart = min(scheduleTime.keys())
33 | if scheduleTime[earlyStart] < N - t:
34 | print "\n\n!!!!!!!!!!!!! Starting Time Unsynced\n\n"
35 | return
36 |
37 | maxLatency = 0
38 | for key, value in endtime.items():
39 | print key, starttime[key], value, value - starttime[key]
40 | tList.append(value - starttime[key])
41 | if value - starttime[key] > maxLatency:
42 | maxLatency = value - starttime[key]
43 | if N < 0 or t < 0 or 3*t < N:
44 | # infer N, t
45 | N = len(starttime.keys())
46 | t = N/4 # follows the convention that 4t = N
47 | print 'N', N, 't', t
48 | if len(endtime) < N - t:
49 | print "!!!!!!!!!!!!! Census Unfinished"
50 | return
51 | print '(N-t) finishing at', sorted(endtime.values())[N-t-1] - min(starttime.values())
52 | print '(N/2) finishing at', sorted(endtime.values())[N/2] - min(starttime.values())
53 | print 'max', maxLatency
54 | print 'avg', sum(tList) / len(tList)
55 | print 'range', max(endtime.values()) - min(starttime.values())
56 | return sorted(endtime.values())[N-t-1] - min(starttime.values())
57 |
58 | def processIncTx(s, txpp, N=-1, t=-1):
59 | endtime = dict()
60 | starttime = dict()
61 | tList = []
62 | lines = s.split('\n')
63 | scheduleTime = dict()
64 | for line in lines:
65 | if 'timestampIE ' in line:
66 | info = eval(line.split('timestampIE')[1])
67 | endtime[info[0]] = info[1]
68 | if 'timestampIB ' in line:
69 | info = eval(line.split('timestampIB')[1])
70 | starttime[info[0]] = info[1]
71 | if 'waits for' in line:
72 | if 'now is' in line:
73 | tl = scanf.sscanf(line, '%s out: %d waits for %f now is %f')
74 | else:
75 | tl = scanf.sscanf(line, '%s out: %d waits for %f')
76 | scheduleTime[tl[1]] = tl[2]
77 | if 'proposing' in line:
78 | tl = scanf.sscanf(line, '%s out: [%d] proposing %d transactions')
79 | if txpp!=tl[2]:
80 | print "\n\n!!!!!!!!!!!!! File Inconsistent\n\n"
81 | return
82 |
83 | uniqueScheduleTime = set(scheduleTime.values())
84 | print uniqueScheduleTime
85 | if len(uniqueScheduleTime) != 1:
86 | print "\n\n!!!!!!!!!!!!! Starting Time Unsynced\n\n"
87 | return
88 |
89 | maxLatency = 0
90 | for key, value in endtime.items():
91 | print key, starttime[key], value, value - starttime[key]
92 | tList.append(value - starttime[key])
93 | if value - starttime[key] > maxLatency:
94 | maxLatency = value - starttime[key]
95 | if N < 0 or t < 0 or 3*t < N:
96 | # infer N, t
97 | N = len(starttime.keys())
98 | t = N/4 # follows the convention that 4t = N
99 | print 'N', N, 't', t
100 | if len(endtime) < N - t:
101 | print "!!!!!!!!!!!!! Census Unfinished"
102 | return
103 | print '(N-t) finishing at', sorted(endtime.values())[N-t-1] - min(starttime.values())
104 | print '(N/2) finishing at', sorted(endtime.values())[N/2] - min(starttime.values())
105 | print 'max', maxLatency
106 | print 'avg', sum(tList) / len(tList)
107 | print 'range', max(endtime.values()) - min(starttime.values())
108 | return sorted(endtime.values())[N-t-1] - min(starttime.values())
109 |
110 | def p(N, t, b):
111 | fileName = "logs/%d_%d_%d.txt" % (N, t, b)
112 | contents = open(fileName).read().strip().split('\n\n')
113 | re = []
114 | for c in contents:
115 | if c:
116 | ttt = process(c, b, N, t)
117 | if ttt:
118 | re.append(ttt)
119 | print tuple(re)
120 | print sum(re) / len(re), numpy.std(re), 'num', len(re)
121 |
122 | def pIncTx(N, t, b):
123 | fileName = "logs/%d_%d_%d.txt" % (N, t, b)
124 | contents = open(fileName).read().strip().split('\n\n')
125 | re = []
126 | for c in contents:
127 | if c:
128 | ttt = processIncTx(c, b, N, t)
129 | if ttt:
130 | re.append(ttt)
131 | print sum(re) / len(re), numpy.std(re)
132 |
133 | if __name__ =='__main__':
134 | try: __IPYTHON__
135 | except NameError:
136 |
137 | import IPython
138 | IPython.embed()
139 |
140 |
--------------------------------------------------------------------------------
/experiments/ec2/utility.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import boto.ec2
3 | import sys, os
4 | import time
5 | if not boto.config.has_section('ec2'):
6 | boto.config.add_section('ec2')
7 | boto.config.setbool('ec2','use-sigv4',True)
8 |
9 | secgroups = {
10 | 'us-east-1':'sg-6c4dfe0a',
11 | 'us-west-1':'sg-7b34651e',
12 | 'us-west-2':'sg-8f067ceb',
13 | 'eu-west-1':'sg-368c3152',
14 | 'sa-east-1':'sg-2a1f744f',
15 | 'ap-southeast-1':'sg-2d491c48',
16 | 'ap-southeast-2':'sg-d58dd4b0',
17 | 'ap-northeast-1':'sg-499fb02c',
18 | # 'eu-central-1':'sg-2bfe9342' # somehow this group does not work
19 | }
20 | regions = sorted(secgroups.keys())[::-1]
21 |
22 | NameFilter = 'Badger'
23 |
24 | def getAddrFromEC2Summary(s):
25 | return [
26 | x.split('ec2.')[-1] for x in s.replace(
27 | '.compute.amazonaws.com', ''
28 | ).replace(
29 | '.us-west-1', '' # Later we need to add more such lines
30 | ).replace(
31 | '-', '.'
32 | ).strip().split('\n')]
33 |
34 | def get_ec2_instances_ip(region):
35 | ec2_conn = boto.ec2.connect_to_region(region,
36 | aws_access_key_id=access_key,
37 | aws_secret_access_key=secret_key)
38 | if ec2_conn:
39 | result = []
40 | reservations = ec2_conn.get_all_reservations(filters={'tag:Name': NameFilter})
41 | for reservation in reservations:
42 | if reservation:
43 | for ins in reservation.instances:
44 | if ins.public_dns_name:
45 | currentIP = ins.public_dns_name.split('.')[0][4:].replace('-','.')
46 | result.append(currentIP)
47 | print currentIP
48 | return result
49 | else:
50 | print 'Region failed', region
51 | return None
52 |
53 | def get_ec2_instances_id(region):
54 | ec2_conn = boto.ec2.connect_to_region(region,
55 | aws_access_key_id=access_key,
56 | aws_secret_access_key=secret_key)
57 | if ec2_conn:
58 | result = []
59 | reservations = ec2_conn.get_all_reservations(filters={'tag:Name': NameFilter})
60 | for reservation in reservations:
61 | for ins in reservation.instances:
62 | print ins.id
63 | result.append(ins.id)
64 | return result
65 | else:
66 | print 'Region failed', region
67 | return None
68 |
69 | def stop_all_instances(region):
70 | ec2_conn = boto.ec2.connect_to_region(region,
71 | aws_access_key_id=access_key,
72 | aws_secret_access_key=secret_key)
73 | idList = []
74 | if ec2_conn:
75 | reservations = ec2_conn.get_all_reservations(filters={'tag:Name': NameFilter})
76 | for reservation in reservations:
77 | if reservation:
78 | for ins in reservation.instances:
79 | idList.append(ins.id)
80 | ec2_conn.stop_instances(instance_ids=idList)
81 |
82 | def terminate_all_instances(region):
83 | ec2_conn = boto.ec2.connect_to_region(region,
84 | aws_access_key_id=access_key,
85 | aws_secret_access_key=secret_key)
86 | idList = []
87 | if ec2_conn:
88 | reservations = ec2_conn.get_all_reservations(filters={'tag:Name': NameFilter})
89 | for reservation in reservations:
90 | if reservation:
91 | for ins in reservation.instances:
92 | idList.append(ins.id)
93 | ec2_conn.terminate_instances(instance_ids=idList)
94 |
95 | def launch_new_instances(region, number):
96 | ec2_conn = boto.ec2.connect_to_region(region,
97 | aws_access_key_id=access_key,
98 | aws_secret_access_key=secret_key)
99 | dev_sda1 = boto.ec2.blockdevicemapping.EBSBlockDeviceType(delete_on_termination=True)
100 | dev_sda1.size = 8 # size in Gigabytes
101 | dev_sda1.delete_on_termination = True
102 | bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping()
103 | bdm['/dev/sda1'] = dev_sda1
104 | img = ec2_conn.get_all_images(filters={'name':'ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20150325'})[0].id
105 | reservation = ec2_conn.run_instances(image_id=img, #'ami-df6a8b9b', # ami-9f91a5f5
106 | min_count=number,
107 | max_count=number,
108 | key_name='amiller-mc2ec2',
109 | instance_type='t2.medium',
110 | security_group_ids = [secgroups[region], ],
111 | block_device_map = bdm)
112 | for instance in reservation.instances:
113 | instance.add_tag("Name", NameFilter)
114 | return reservation
115 |
116 |
117 | def start_all_instances(region):
118 | ec2_conn = boto.ec2.connect_to_region(region,
119 | aws_access_key_id=access_key,
120 | aws_secret_access_key=secret_key)
121 | idList = []
122 | if ec2_conn:
123 | reservations = ec2_conn.get_all_reservations(filters={'tag:Name': NameFilter})
124 | for reservation in reservations:
125 | for ins in reservation.instances:
126 | idList.append(ins.id)
127 | ec2_conn.start_instances(instance_ids=idList)
128 |
129 |
130 | def ipAll():
131 | result = []
132 | for region in regions:
133 | result += get_ec2_instances_ip(region) or []
134 | open('hosts','w').write('\n'.join(result))
135 | callFabFromIPList(result, 'removeHosts')
136 | callFabFromIPList(result, 'writeHosts')
137 | return result
138 |
139 |
140 | def getIP():
141 | return [l for l in open('hosts', 'r').read().split('\n') if l]
142 |
143 |
144 | def idAll():
145 | result = []
146 | for region in regions:
147 | result += get_ec2_instances_id(region) or []
148 | return result
149 |
150 |
151 | def startAll():
152 | for region in regions:
153 | start_all_instances(region)
154 |
155 |
156 | def stopAll():
157 | for region in regions:
158 | stop_all_instances(region)
159 |
160 | from subprocess import check_output, Popen, call, PIPE, STDOUT
161 | import fcntl
162 | from threading import Thread
163 | import platform
164 |
165 |
166 | def callFabFromIPList(l, work):
167 | if platform.system() == 'Darwin':
168 | print Popen(['fab', '-i', '~/.ssh/amiller-mc2ec2.pem',
169 | '-u', 'ubuntu', '-H', ','.join(l), # We rule out the client
170 | work])
171 | else:
172 | call('fab -i ~/.ssh/amiller-mc2ec2.pem -u ubuntu -P -H %s %s' % (','.join(l), work), shell=True)
173 |
174 | def non_block_read(output):
175 | ''' even in a thread, a normal read with block until the buffer is full '''
176 | fd = output.fileno()
177 | fl = fcntl.fcntl(fd, fcntl.F_GETFL)
178 | fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
179 | try:
180 | return output.readline()
181 | except:
182 | return ''
183 |
184 | def monitor(stdout, N, t):
185 | starting_time = time.time()
186 | counter = 0
187 | while True:
188 | output = non_block_read(stdout).strip()
189 | print output
190 | if 'synced transactions set' in output:
191 | counter += 1
192 | if counter >= N - t:
193 | break
194 | ending_time = time.time()
195 | print 'Latency from client scope:', ending_time - starting_time
196 |
197 | def runProtocol(): # fast-path to run, assuming we already have the files ready
198 | callFabFromIPList(getIP(), 'runProtocol')
199 |
200 | def runProtocolfromClient(client, key, hosts=None):
201 | if not hosts:
202 | callFabFromIPList(getIP(), 'runProtocolFromClient:%s,%s' % (client, key))
203 | else:
204 | callFabFromIPList(hosts, 'runProtocolFromClient:%s,%s' % (client, key))
205 |
206 | def runEC2(Tx, N, t, n): # run 4 in a row
207 | for i in range(1, n+1):
208 | runProtocolfromClient('"%d %d %d"' % (Tx, N, t), "~/%d_%d_%d.key" % (N, t, i))
209 |
210 | def stopProtocol():
211 | callFabFromIPList(getIP(), 'stopProtocols')
212 |
213 | def callStartProtocolAndMonitorOutput(N, t, l, work='runProtocol'):
214 | if platform.system() == 'Darwin':
215 | popen = Popen(['fab', '-i', '~/.ssh/amiller-mc2ec2.pem',
216 | '-u', 'ubuntu', '-H', ','.join(l),
217 | work], stdout=PIPE, stderr=STDOUT, close_fds=True, bufsize=1, universal_newlines=True)
218 | else:
219 | popen = Popen('fab -i ~/.ssh/amiller-mc2ec2.pem -u ubuntu -P -H %s %s' % (','.join(l), work),
220 | shell=True, stdout=PIPE, stderr=STDOUT, close_fds=True, bufsize=1, universal_newlines=True)
221 | thread = Thread(target=monitor, args=[popen.stdout, N, t])
222 | thread.daemon = True
223 | thread.start()
224 |
225 | popen.wait()
226 | thread.join(timeout=1)
227 |
228 | return # to comment the following lines
229 | counter = 0
230 | while True:
231 | line = popen.stdout.readline()
232 | if not line: break
233 | if 'synced transactions set' in line:
234 | counter += 1
235 | if counter >= N - t:
236 | break
237 | print line # yield line
238 | sys.stdout.flush()
239 | ending_time = time.time()
240 | print 'Latency from client scope:', ending_time - starting_time
241 |
242 |
243 |
244 | def callFab(s, work): # Deprecated
245 | print Popen(['fab', '-i', '~/.ssh/amiller-mc2ec2.pem',
246 | '-u', 'ubuntu', '-H', ','.join(getAddrFromEC2Summary(s)),
247 | work])
248 |
249 | #short-cuts
250 |
251 | c = callFabFromIPList
252 |
253 | def sk():
254 | c(getIP(), 'syncKeys')
255 |
256 | def id():
257 | c(getIP(), 'install_dependencies')
258 |
259 | def gp():
260 | c(getIP(), 'git_pull')
261 |
262 | def rp(srp):
263 | c(getIP(), 'runProtocol:%s' % srp)
264 |
265 | if __name__ =='__main__':
266 | try: __IPYTHON__
267 | except NameError:
268 | parser = argparse.ArgumentParser()
269 | parser.add_argument('access_key', help='Access Key');
270 | parser.add_argument('secret_key', help='Secret Key');
271 | args = parser.parse_args()
272 | access_key = args.access_key
273 | secret_key = args.secret_key
274 |
275 | import IPython
276 | IPython.embed()
277 |
278 |
--------------------------------------------------------------------------------
/experiments/honest_party_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | __author__ = 'aluex'
3 | from gevent import monkey
4 | monkey.patch_all()
5 |
6 | from gevent.queue import *
7 | from gevent import Greenlet
8 | from ..core.utils import bcolors, mylog, initiateThresholdSig
9 | from ..core.includeTransaction import honestParty
10 | from ..core.bkr_acs import initBeforeBinaryConsensus
11 | import gevent
12 | import os
13 | from ..core.utils import myRandom as random
14 | from ..core.utils import ACSException, checkExceptionPerGreenlet, getSignatureCost, encodeTransaction, getKeys, \
15 | deepEncode, deepDecode, randomTransaction, initiateECDSAKeys, initiateThresholdEnc, finishTransactionLeap
16 |
17 | import time
18 | import math
19 | from ..commoncoin.boldyreva_gipc import initialize as initializeGIPC
20 |
21 | USE_DEEP_ENCODE = True
22 | QUIET_MODE = True
23 |
24 | def exception(msg):
25 | mylog(bcolors.WARNING + "Exception: %s\n" % msg + bcolors.ENDC)
26 | os.exit(1)
27 |
28 | msgCounter = 0
29 | totalMessageSize = 0
30 | starting_time = dict()
31 | ending_time = dict()
32 | msgSize = dict()
33 | msgFrom = dict()
34 | msgTo = dict()
35 | msgContent = dict()
36 | logChannel = Queue()
37 | msgTypeCounter = [[0, 0] for _ in range(8)]
38 | logGreenlet = None
39 |
40 | def logWriter(fileHandler):
41 | while True:
42 | msgCounter, msgSize, msgFrom, msgTo, st, et, content = logChannel.get()
43 | fileHandler.write("%d:%d(%d->%d)[%s]-[%s]%s\n" % (msgCounter, msgSize, msgFrom, msgTo, st, et, content))
44 | fileHandler.flush()
45 |
46 | def encode(m): # TODO
47 | global msgCounter
48 | msgCounter += 1
49 | starting_time[msgCounter] = str(time.time())
50 | if USE_DEEP_ENCODE:
51 | result = deepEncode(msgCounter, m)
52 | else:
53 | result = (msgCounter, m)
54 | if m[0] == m[1] and m[2][0]!='O' and m[2][1][0] == 'e':
55 | ### this is a self to self echo message
56 | msgSize[msgCounter] = 0
57 | else:
58 | msgSize[msgCounter] = len(result)
59 | msgFrom[msgCounter] = m[1]
60 | msgTo[msgCounter] = m[0]
61 | msgContent[msgCounter] = m
62 | return result
63 |
64 | def decode(s): # TODO
65 | if USE_DEEP_ENCODE:
66 | result = deepDecode(s, msgTypeCounter)
67 | else:
68 | result = s
69 | assert(isinstance(result, tuple))
70 | ending_time[result[0]] = str(time.time())
71 | msgContent[result[0]] = None
72 | global totalMessageSize
73 | totalMessageSize += msgSize[result[0]]
74 | if not QUIET_MODE:
75 | logChannel.put((result[0], msgSize[result[0]], msgFrom[result[0]], msgTo[result[0]],
76 | starting_time[result[0]], ending_time[result[0]], repr(result[1])))
77 | return result[1]
78 |
79 | def client_test_freenet(N, t, options):
80 | '''
81 | Test for the client with random delay channels
82 |
83 | command list
84 | i [target]: send a transaction to include for some particular party
85 | h [target]: stop some particular party
86 | m [target]: manually make particular party send some message
87 | help: show the help screen
88 |
89 | :param N: the number of parties
90 | :param t: the number of malicious parties
91 | :return None:
92 | '''
93 | maxdelay = 0.01
94 | initiateThresholdSig(open(options.threshold_keys, 'r').read())
95 | initiateECDSAKeys(open(options.ecdsa, 'r').read())
96 | initiateThresholdEnc(open(options.threshold_encs, 'r').read())
97 | initializeGIPC(getKeys()[0])
98 | buffers = map(lambda _: Queue(1), range(N))
99 | global logGreenlet
100 | logGreenlet = Greenlet(logWriter, open('msglog.TorMultiple', 'w'))
101 | logGreenlet.parent_args = (N, t)
102 | logGreenlet.name = 'client_test_freenet.logWriter'
103 | logGreenlet.start()
104 |
105 | # Instantiate the "broadcast" instruction
106 | def makeBroadcast(i):
107 | def _broadcast(v):
108 | def _deliver(j):
109 | buffers[j].put(encode((j, i, v)))
110 | for j in range(N):
111 | Greenlet(_deliver, j).start()
112 | return _broadcast
113 |
114 | def recvWithDecode(buf):
115 | def recv():
116 | s = buf.get()
117 | return decode(s)[1:]
118 | return recv
119 |
120 | def makeSend(i): # point to point message delivery
121 | def _send(j, v):
122 | buffers[j].put(encode((j, i, v)))
123 | return _send
124 |
125 | while True:
126 | #if True:
127 | initBeforeBinaryConsensus()
128 | ts = []
129 | controlChannels = [Queue() for _ in range(N)]
130 | transactionSet = set([encodeTransaction(randomTransaction()) for trC in range(int(options.tx))]) # we are using the same one
131 | for i in range(N):
132 | bc = makeBroadcast(i)
133 | recv = recvWithDecode(buffers[i])
134 | th = Greenlet(honestParty, i, N, t, controlChannels[i], bc, recv, makeSend(i), options.B)
135 | controlChannels[i].put(('IncludeTransaction', transactionSet))
136 | th.start_later(random.random() * maxdelay)
137 | ts.append(th)
138 |
139 | try:
140 | gevent.joinall(ts)
141 | except ACSException:
142 | gevent.killall(ts)
143 | except finishTransactionLeap: ### Manually jump to this level
144 | print 'msgCounter', msgCounter
145 | print 'msgTypeCounter', msgTypeCounter
146 | # message id 0 (duplicated) for signatureCost
147 | logChannel.put(StopIteration)
148 | mylog("=====", verboseLevel=-1)
149 | for item in logChannel:
150 | mylog(item, verboseLevel=-1)
151 | mylog("=====", verboseLevel=-1)
152 | continue
153 | except gevent.hub.LoopExit: # Manual fix for early stop
154 | while True:
155 | gevent.sleep(1)
156 | checkExceptionPerGreenlet()
157 | finally:
158 | print "Concensus Finished"
159 |
160 | # import GreenletProfiler
161 | import atexit
162 |
163 | USE_PROFILE = False
164 | GEVENT_DEBUG = False
165 | OUTPUT_HALF_MSG = False
166 |
167 | if USE_PROFILE:
168 | import GreenletProfiler
169 |
170 | def exit():
171 | print "Entering atexit()"
172 | print 'msgCounter', msgCounter
173 | print 'msgTypeCounter', msgTypeCounter
174 | nums,lens = zip(*msgTypeCounter)
175 | print ' Init Echo Val Aux Coin Ready Share'
176 | print '%8d %8d %9d %9d %9d %9d %9d' % nums[1:]
177 | print '%8d %8d %9d %9d %9d %9d %9d' % lens[1:]
178 | mylog("Total Message size %d" % totalMessageSize, verboseLevel=-2)
179 | if OUTPUT_HALF_MSG:
180 | halfmsgCounter = 0
181 | for msgindex in starting_time.keys():
182 | if msgindex not in ending_time.keys():
183 | logChannel.put((msgindex, msgSize[msgindex], msgFrom[msgindex],
184 | msgTo[msgindex], starting_time[msgindex], time.time(), '[UNRECEIVED]' + repr(msgContent[msgindex])))
185 | halfmsgCounter += 1
186 | mylog('%d extra log exported.' % halfmsgCounter, verboseLevel=-1)
187 |
188 | if GEVENT_DEBUG:
189 | checkExceptionPerGreenlet()
190 |
191 | if USE_PROFILE:
192 | GreenletProfiler.stop()
193 | stats = GreenletProfiler.get_func_stats()
194 | stats.print_all()
195 | stats.save('profile.callgrind', type='callgrind')
196 |
197 | if __name__ == '__main__':
198 | # GreenletProfiler.set_clock_type('cpu')
199 | atexit.register(exit)
200 | if USE_PROFILE:
201 | GreenletProfiler.set_clock_type('cpu')
202 | GreenletProfiler.start()
203 |
204 | from optparse import OptionParser
205 | parser = OptionParser()
206 | parser.add_option("-e", "--ecdsa-keys", dest="ecdsa",
207 | help="Location of ECDSA keys", metavar="KEYS")
208 | parser.add_option("-k", "--threshold-keys", dest="threshold_keys",
209 | help="Location of threshold signature keys", metavar="KEYS")
210 | parser.add_option("-c", "--threshold-enc", dest="threshold_encs",
211 | help="Location of threshold encryption keys", metavar="KEYS")
212 | parser.add_option("-n", "--number", dest="n",
213 | help="Number of parties", metavar="N", type="int")
214 | parser.add_option("-b", "--propose-size", dest="B",
215 | help="Number of transactions to propose", metavar="B", type="int")
216 | parser.add_option("-t", "--tolerance", dest="t",
217 | help="Tolerance of adversaries", metavar="T", type="int")
218 | parser.add_option("-x", "--transactions", dest="tx",
219 | help="Number of transactions proposed by each party", metavar="TX", type="int", default=-1)
220 | (options, args) = parser.parse_args()
221 | if (options.ecdsa and options.threshold_keys and options.threshold_encs and options.n and options.t):
222 | if not options.B:
223 | options.B = int(math.ceil(options.n * math.log(options.n)))
224 | if options.tx < 0:
225 | options.tx = options.B
226 | client_test_freenet(options.n , options.t, options)
227 | else:
228 | parser.error('Please specify the arguments')
229 |
230 |
--------------------------------------------------------------------------------
/experiments/mmr13_expt.py:
--------------------------------------------------------------------------------
1 | from gevent import monkey
2 | monkey.patch_all()
3 |
4 | import gevent
5 | from gevent import Greenlet
6 | from gevent.queue import Queue
7 | import random
8 |
9 | from ..core.broadcasts import bv_broadcast, binary_consensus, bcolors, mylog, mv84consensus, globalState
10 |
11 |
12 | # Run the BV_broadcast protocol with no corruptions and uniform random message delays
13 | def random_delay_broadcast1(inputs, t):
14 | maxdelay = 0.01
15 |
16 | N = len(inputs)
17 | buffers = map(lambda _: Queue(1), inputs)
18 |
19 | # Instantiate the "broadcast" instruction
20 | def makeBroadcast(i):
21 | def _broadcast(v):
22 | def _deliver(j):
23 | buffers[j].put((i,v))
24 | for j in range(N):
25 | Greenlet(_deliver, j).start_later(random.random()*maxdelay)
26 | return _broadcast
27 |
28 | def makeOutput(i):
29 | def _output(v):
30 | print '[%d]' % i, 'output:', v
31 | return _output
32 |
33 | ts = []
34 | for i in range(N):
35 | bc = makeBroadcast(i)
36 | recv = buffers[i].get
37 | outp = makeOutput(i)
38 | inp = bv_broadcast(i, N, t, bc, recv, outp)
39 | th = Greenlet(inp, inputs[i])
40 | th.start_later(random.random()*maxdelay)
41 | ts.append(th)
42 |
43 | try:
44 | gevent.joinall(ts)
45 | except gevent.hub.LoopExit: pass
46 |
47 |
48 | # Run the BV_broadcast protocol with no corruptions and uniform random message delays
49 | def random_delay_sharedcoin_dummy(N, t):
50 | maxdelay = 0.01
51 |
52 | buffers = map(lambda _: Queue(1), range(N))
53 |
54 | # Instantiate the "broadcast" instruction
55 | def makeBroadcast(i):
56 | def _broadcast(v):
57 | def _deliver(j):
58 | buffers[j].put((i,v))
59 | for j in range(N):
60 | Greenlet(_deliver, j).start_later(random.random()*maxdelay)
61 | return _broadcast
62 |
63 | def _run(i, coin):
64 | # Party i, continue to run the shared coin
65 | r = 0
66 | while r < 5:
67 | gevent.sleep(random.random() * maxdelay)
68 | print '[',i,'] at round ', r
69 | b = next(coin)
70 | print '[',i,'] bit[%d]:'%r, b
71 | r += 1
72 | print '[',i,'] done'
73 |
74 | ts = []
75 | for i in range(N):
76 | bc = makeBroadcast(i)
77 | recv = buffers[i].get
78 | coin = shared_coin_dummy(i, N, t, bc, recv)
79 | th = Greenlet(_run, i, coin)
80 | th.start_later(random.random() * maxdelay)
81 | ts.append(th)
82 |
83 | try:
84 | gevent.joinall(ts)
85 | except gevent.hub.LoopExit: pass
86 |
87 | # Run the BV_broadcast protocol with no corruptions and uniform random message delays
88 | def random_delay_binary_consensus(N, t, inputs):
89 | maxdelay = 0.01
90 |
91 | buffers = map(lambda _: Queue(1), range(N))
92 | random_delay_binary_consensus.msgCount = 0
93 | # Instantiate the "broadcast" instruction
94 | def makeBroadcast(i):
95 | def _broadcast(v):
96 | def _deliver(j):
97 | random_delay_binary_consensus.msgCount += 1
98 | tmpCount = random_delay_binary_consensus.msgCount
99 | mylog(bcolors.OKGREEN + "MSG: [%d] -[%d]-> [%d]: %s" % (i, tmpCount, j, repr(v)) + bcolors.ENDC)
100 | buffers[j].put((i, v))
101 | mylog(bcolors.OKGREEN + " [%d] -[%d]-> [%d]: Finish" % (i, tmpCount, j) + bcolors.ENDC)
102 | for j in range(N):
103 | Greenlet(_deliver, j).start_later(random.random()*maxdelay)
104 | return _broadcast
105 |
106 | ts = []
107 | for i in range(N):
108 | bc = makeBroadcast(i)
109 | recv = buffers[i].get
110 | vi = inputs[i] #random.randint(0, 1)
111 | decideChannel = Queue(1)
112 | th = Greenlet(binary_consensus, i, N, t, vi, decideChannel, bc, recv)
113 | th.start_later(random.random() * maxdelay)
114 | ts.append(th)
115 |
116 |
117 |
118 | gevent.joinall(ts)
119 |
120 | for key, item in globalState.items():
121 | if item != globalState[0]:
122 | mylog(bcolors.FAIL + 'Bad Concensus!' + bcolors.ENDC)
123 |
124 | print globalState
125 |
126 |
127 | # Run the BV_broadcast protocol with no corruptions and uniform random message delays
128 | def random_delay_multivalue_consensus(N, t, inputs):
129 | maxdelay = 0.01
130 |
131 | msgThreads = []
132 |
133 | buffers = map(lambda _: Queue(1), range(N))
134 |
135 | random_delay_multivalue_consensus.msgCount = 0
136 | # Instantiate the "broadcast" instruction
137 | def makeBroadcast(i):
138 | def _broadcast(v):
139 | def _deliver(j):
140 | random_delay_multivalue_consensus.msgCount += 1
141 | tmpCount = random_delay_multivalue_consensus.msgCount
142 | mylog(bcolors.OKGREEN + "MSG: [%d] -[%d]-> [%d]: %s" % (i, tmpCount, j, repr(v)) + bcolors.ENDC)
143 | buffers[j].put((i,v))
144 | mylog(bcolors.OKGREEN + " [%d] -[%d]-> [%d]: Finish" % (i, tmpCount, j) + bcolors.ENDC)
145 |
146 | for j in range(N):
147 | g = Greenlet(_deliver, j)
148 | g.start_later(random.random()*maxdelay)
149 | msgThreads.append(g) # Keep reference
150 | return _broadcast
151 |
152 | ts = []
153 | #cid = 1
154 | for i in range(N):
155 | bc = makeBroadcast(i)
156 | recv = buffers[i].get
157 | vi = inputs[i]
158 | th = Greenlet(mv84consensus, i, N, t, vi, bc, recv)
159 | th.start_later(random.random() * maxdelay)
160 | ts.append(th)
161 |
162 | try:
163 | gevent.joinall(ts)
164 | except gevent.hub.LoopExit: # Manual fix for early stop
165 | agreed = ""
166 | for key, value in globalState.items():
167 | if globalState[key] != "":
168 | agreed = globalState[key]
169 | for key, value in globalState.items():
170 | if globalState[key] == "":
171 | globalState[key] = agreed
172 | if globalState[key] != agreed:
173 | print "Consensus Error"
174 |
175 |
176 | print globalState
177 |
178 | if __name__=='__main__':
179 | print "[ =========== ]"
180 | print "Testing binary consensus..."
181 | inputs = [random.randint(0, 1) for _ in range(5)]
182 | print "Inputs:", inputs
183 | random_delay_binary_consensus(5, 1, inputs)
184 |
185 |
--------------------------------------------------------------------------------
/experiments/mmr13_tor.py:
--------------------------------------------------------------------------------
1 | # Basic framework requirements
2 | from gevent import monkey
3 | monkey.patch_all()
4 |
5 | import gevent
6 | from gevent import Greenlet
7 | from gevent.server import StreamServer
8 | from gevent.queue import Queue
9 | import json
10 |
11 | # Import the algorithm
12 | from ..core.broadcasts import makeCallOnce, bv_broadcast, shared_coin_dummy
13 |
14 | # Sockets that route through Tor
15 | import socket
16 | import socks
17 | socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050, True)
18 |
19 | def listen_to_channel(port):
20 | q = Queue(1)
21 | def _handle(socket, address):
22 | f = socket.makefile()
23 | for line in f:
24 | obj = json.loads(line)
25 | q.put(obj)
26 | server = StreamServer(('127.0.0.1', port), _handle)
27 | server.start()
28 | return q
29 |
30 | def connect_to_channel(hostname, port):
31 | s = socks.socksocket()
32 | s.connect((hostname, port))
33 | q = Queue(1)
34 | def _handle():
35 | while True:
36 | obj = q.get()
37 | s.sendall(json.dumps(obj) + '\n')
38 | Greenlet(_handle).start()
39 | return q
40 |
41 | TOR_MAPPINGS = [
42 | ('t6wgydamj55qs7do.onion',49500),
43 | ('qk7v4tpkwnslwfvb.onion',49501),
44 | ('cs25ak52h4efslwp.onion',49502),
45 | ('7vcug2izpf5psowt.onion',49503)
46 | ]
47 |
48 | # Run the BV_broadcast protocol with no corruptions and uniform random message delays
49 | def random_delay_broadcast_tor(inputs, t):
50 |
51 | N = len(inputs)
52 |
53 | # Instantiate the "broadcast" instruction
54 | def makeBroadcast(i):
55 | chans = []
56 | # First establish N connections (including a self connection)
57 | for j in range(N):
58 | host,port = TOR_MAPPINGS[j]
59 | chans.append(connect_to_channel(host,port))
60 | def _broadcast(v):
61 | for j in range(N):
62 | chans[j].put( (i,v) )
63 | return _broadcast
64 |
65 | # Get the servers ready
66 | def makeOutput(i):
67 | def _output(v):
68 | print '[%d]' % i, 'output:', v
69 | return _output
70 |
71 | # Create the servers
72 | servers = []
73 | for i in range(N):
74 | _,port = TOR_MAPPINGS[i]
75 | servers.append(listen_to_channel(port))
76 | gevent.sleep(2)
77 | print 'servers started'
78 |
79 | ts = []
80 | for i in range(N):
81 | bc = makeBroadcast(i)
82 | recv = servers[i].get
83 | outp = makeOutput(i)
84 | inp = bv_broadcast(i, N, t, bc, recv, outp)
85 | th = Greenlet(inp, inputs[i])
86 | th.start()
87 | ts.append(th)
88 |
89 | try:
90 | gevent.joinall(ts)
91 | except gevent.hub.LoopExit: pass
92 |
--------------------------------------------------------------------------------
/experiments/mmr13_tor_multipleCircuits.py:
--------------------------------------------------------------------------------
1 | from gevent import monkey
2 | monkey.patch_all()
3 |
4 | import gevent
5 | from gevent import Greenlet
6 | from gevent.server import StreamServer
7 | from gevent.queue import Queue
8 | import json
9 |
10 | import random
11 |
12 | from ..core.broadcasts import bv_broadcast, shared_coin_dummy, binary_consensus, bcolors, mylog, mv84consensus, globalState
13 |
14 | import socks
15 |
16 | TOR_SOCKSPORT = range(9050, 9055)
17 |
18 | def listen_to_channel(port):
19 | mylog('Preparing server on %d...' % port)
20 | q = Queue(1)
21 | def _handle(socket, address):
22 | f = socket.makefile()
23 | for line in f:
24 | #print 'line read from socket', line
25 | obj = json.loads(line)
26 | q.put(obj)
27 | server = StreamServer(('127.0.0.1', port), _handle)
28 | server.start()
29 | return q
30 |
31 | def connect_to_channel(hostname, port, party):
32 | s = socks.socksocket()
33 | s.setproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", TOR_SOCKSPORT[party], True)
34 | s.connect((hostname, port))
35 | q = Queue(1)
36 | def _handle():
37 | while True:
38 | obj = q.get()
39 | s.sendall(json.dumps(obj) + '\n')
40 | Greenlet(_handle).start()
41 | return q
42 |
43 | BASE_PORT = 49500
44 |
45 | TOR_MAPPING_LIST = """
46 | 3lejkcwieaamk2ea.onion
47 | l2y6c2tztpjbcjv5.onion
48 | cystbatihmcyj6nf.onion
49 | hhhegzzwem6v2rpx.onion
50 | za44dm5gbhkzif24.onion
51 | """.strip().split('\n') # hard-coded test
52 |
53 |
54 |
55 |
56 | TOR_MAPPINGS = [(host, BASE_PORT+i) for i, host in enumerate(TOR_MAPPING_LIST)]
57 | mylog("[INIT] TOR_MAPPINGS: %s" % repr(TOR_MAPPINGS))
58 |
59 | # Run the BV_broadcast protocol with no corruptions and uniform random message delays
60 | def random_delay_broadcast1(inputs, t):
61 | maxdelay = 0.01
62 |
63 | N = len(inputs)
64 | buffers = map(lambda _: Queue(1), inputs)
65 |
66 | # Instantiate the "broadcast" instruction
67 | def makeBroadcast(i):
68 | def _broadcast(v):
69 | def _deliver(j):
70 | buffers[j].put((i,v))
71 | for j in range(N):
72 | Greenlet(_deliver, j).start_later(random.random()*maxdelay)
73 | return _broadcast
74 |
75 | def makeOutput(i):
76 | def _output(v):
77 | print '[%d]' % i, 'output:', v
78 | return _output
79 |
80 | ts = []
81 | for i in range(N):
82 | bc = makeBroadcast(i)
83 | recv = buffers[i].get
84 | outp = makeOutput(i)
85 | inp = bv_broadcast(i, N, t, bc, recv, outp)
86 | th = Greenlet(inp, inputs[i])
87 | th.start_later(random.random()*maxdelay)
88 | ts.append(th)
89 |
90 |
91 | try:
92 | gevent.joinall(ts)
93 | except gevent.hub.LoopExit: pass
94 |
95 |
96 | # Run the BV_broadcast protocol with no corruptions and uniform random message delays
97 | def random_delay_sharedcoin_dummy(N, t):
98 | maxdelay = 0.01
99 |
100 | buffers = map(lambda _: Queue(1), range(N))
101 |
102 | # Instantiate the "broadcast" instruction
103 | def makeBroadcast(i):
104 | def _broadcast(v):
105 | def _deliver(j):
106 | buffers[j].put((i,v))
107 | for j in range(N):
108 | Greenlet(_deliver, j).start_later(random.random()*maxdelay)
109 | return _broadcast
110 |
111 | def _run(i, coin):
112 | # Party i, continue to run the shared coin
113 | r = 0
114 | while r < 5:
115 | gevent.sleep(random.random() * maxdelay)
116 | print '[',i,'] at round ', r
117 | b = next(coin)
118 | print '[',i,'] bit[%d]:'%r, b
119 | r += 1
120 | print '[',i,'] done'
121 |
122 | ts = []
123 | for i in range(N):
124 | bc = makeBroadcast(i)
125 | recv = buffers[i].get
126 | coin = shared_coin_dummy(i, N, t, bc, recv)
127 | th = Greenlet(_run, i, coin)
128 | th.start_later(random.random() * maxdelay)
129 | ts.append(th)
130 |
131 | try:
132 | gevent.joinall(ts)
133 | except gevent.hub.LoopExit: pass
134 |
135 | # Run the BV_broadcast protocol with no corruptions and uniform random message delays
136 | def random_delay_binary_consensus(N, t):
137 | maxdelay = 0.01
138 |
139 | buffers = map(lambda _: Queue(1), range(N))
140 |
141 | # Instantiate the "broadcast" instruction
142 | def makeBroadcast(i):
143 | def _broadcast(v):
144 | def _deliver(j):
145 | mylog(bcolors.OKGREEN + "MSG: [%d] -> [%d]: %s" % (i, j, repr(v)) + bcolors.ENDC)
146 | buffers[j].put((i,v))
147 | mylog(bcolors.OKGREEN + " [%d] -> [%d]: Finish" % (i, j) + bcolors.ENDC)
148 | for j in range(N):
149 | Greenlet(_deliver, j).start_later(random.random()*maxdelay)
150 | return _broadcast
151 |
152 | ts = []
153 | for i in range(N):
154 | bc = makeBroadcast(i)
155 | recv = buffers[i].get
156 | vi = random.randint(0, 1)
157 | decideChannel = Queue(1)
158 | th = Greenlet(binary_consensus, i, N, t, vi, decideChannel, bc, recv)
159 | th.start_later(random.random() * maxdelay)
160 | ts.append(th)
161 |
162 | try:
163 | gevent.joinall(ts)
164 | except gevent.hub.LoopExit: # Manual fix for early stop
165 | agreed = ""
166 | for key, value in globalState.items():
167 | if globalState[key] != "":
168 | agreed = globalState[key]
169 | for key, value in globalState.items():
170 | if globalState[key] == "":
171 | globalState[key] = agreed
172 | if globalState[key] != agreed:
173 | print "Consensus Error"
174 |
175 | print globalState
176 |
177 |
178 | # Run the BV_broadcast protocol with no corruptions and uniform random message delays
179 | def random_delay_multivalue_consensus(N, t, inputs):
180 |
181 | mylog("[Tor] Making circuits...")
182 |
183 | # Now we don't use stem
184 |
185 | maxdelay = 0.01
186 |
187 | buffers = map(lambda _: Queue(1), range(N))
188 |
189 | # Instantiate the "broadcast" instruction
190 | def makeBroadcast(i):
191 | chans = []
192 | # First establish N connections (including a self connection)
193 | for j in range(N):
194 | host, port = TOR_MAPPINGS[j]
195 | chans.append(connect_to_channel(host, port, i))
196 | def _broadcast(v):
197 | mylog(bcolors.OKGREEN + "[%d] Broadcasted %s" % (i, repr(v)) + bcolors.ENDC)
198 | for j in range(N):
199 | chans[j].put( (i,v) )
200 | return _broadcast
201 |
202 | # Create the servers
203 | servers = []
204 | for i in range(N):
205 | _, port = TOR_MAPPINGS[i]
206 | servers.append(listen_to_channel(port))
207 | gevent.sleep(2)
208 | print 'servers started'
209 |
210 |
211 | ts = []
212 | for i in range(N):
213 | bc = makeBroadcast(i)
214 | recv = servers[i].get
215 | vi = inputs[i]
216 | th = Greenlet(mv84consensus, i, N, t, vi, bc, recv)
217 | th.start() # start_later(random.random() * maxdelay)
218 | ts.append(th)
219 |
220 |
221 | try:
222 | gevent.joinall(ts)
223 | except gevent.hub.LoopExit: # Manual fix for early stop
224 | agreed = ""
225 | for key, value in globalState.items():
226 | if globalState[key] != "":
227 | agreed = globalState[key]
228 | for key, value in globalState.items():
229 | if globalState[key] == "":
230 | globalState[key] = agreed
231 | if globalState[key] != agreed:
232 | print "Consensus Error"
233 |
234 |
235 | print globalState
236 |
237 | if __name__=='__main__':
238 | print "[ =========== ]"
239 | print "Testing binary consensus..."
240 | print "Testing multivalue consensus with different inputs..."
241 | random_delay_multivalue_consensus(5, 1, [random.randint(0, 10) for x in range(5)])
242 |
243 |
--------------------------------------------------------------------------------
/experiments/multipleTorLauncher.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | base_socks_port=9050
4 | base_control_port=8118
5 |
6 | # Create data directory if it doesn't exist
7 | if [ ! -d "data" ]; then
8 | mkdir "data"
9 | fi
10 |
11 | #for i in {0..10}
12 | for i in {0..24}
13 |
14 | do
15 | j=$((i+1))
16 | socks_port=$((base_socks_port+i))
17 | control_port=$((base_control_port+i))
18 | if [ ! -d "data/tor$i" ]; then
19 | echo "Creating directory data/tor$i"
20 | mkdir "data/tor$i"
21 | fi
22 | # Take into account that authentication for the control port is disabled. Must be used in secure and controlled environments
23 |
24 | echo "Running: tor --RunAsDaemon 1 --CookieAuthentication 0 --HashedControlPassword \"\" --ControlPort $control_port --PidFile tor$i.pid --SocksPort $socks_port --DataDirectory data/tor$i"
25 |
26 | tor --RunAsDaemon 1 --CookieAuthentication 0 --HashedControlPassword "16:E35FB780CF3BF47E6054EDCF3C6B57FACE5E5EEB5151827F702BE3944D" --ControlPort $control_port --PidFile tor$i.pid --SocksPort $socks_port --DataDirectory data/tor$i
27 | done
28 |
29 |
--------------------------------------------------------------------------------
/experiments/names.txt:
--------------------------------------------------------------------------------
1 | JAMES
2 | JOHN
3 | ROBERT
4 | MICHAEL
5 | MARY
6 | WILLIAM
7 | DAVID
8 | RICHARD
9 | CHARLES
10 | JOSEPH
11 | THOMAS
12 | PATRICIA
13 | CHRISTOPHER
14 | LINDA
15 | BARBARA
16 | DANIEL
17 | PAUL
18 | MARK
19 | ELIZABETH
20 | DONALD
21 | JENNIFER
22 | GEORGE
23 | MARIA
24 | KENNETH
25 | SUSAN
26 | STEVEN
27 | EDWARD
28 | MARGARET
29 | BRIAN
30 | RONALD
31 | DOROTHY
32 | ANTHONY
33 | LISA
34 | KEVIN
35 | NANCY
36 | KAREN
37 | BETTY
38 | HELEN
39 | JASON
40 | MATTHEW
41 | GARY
42 | TIMOTHY
43 | SANDRA
44 | JOSE
45 | LARRY
46 | JEFFREY
47 | FRANK
48 | DONNA
49 | CAROL
50 | RUTH
51 | SCOTT
52 | ERIC
53 | STEPHEN
54 | ANDREW
55 | SHARON
56 | MICHELLE
57 | LAURA
58 | SARAH
59 | KIMBERLY
60 | DEBORAH
61 | JESSICA
62 | RAYMOND
63 | SHIRLEY
64 | CYNTHIA
65 | ANGELA
66 | MELISSA
67 | BRENDA
68 | AMY
69 | JERRY
70 | GREGORY
71 | ANNA
72 | JOSHUA
73 | VIRGINIA
74 | REBECCA
75 | KATHLEEN
76 | DENNIS
77 | PAMELA
78 | MARTHA
79 | DEBRA
80 | AMANDA
81 | WALTER
82 | STEPHANIE
83 | WILLIE
84 | PATRICK
85 | TERRY
86 | CAROLYN
87 | PETER
88 | CHRISTINE
89 | MARIE
90 | JANET
91 | FRANCES
92 | CATHERINE
93 | HAROLD
94 | HENRY
95 | DOUGLAS
96 | JOYCE
97 | ANN
98 | DIANE
99 | ALICE
100 | JEAN
101 | JULIE
102 | CARL
103 | KELLY
104 | HEATHER
105 | ARTHUR
106 | TERESA
107 | GLORIA
108 | DORIS
109 | RYAN
110 | JOE
111 | ROGER
112 | EVELYN
113 | JUAN
114 | ASHLEY
115 | JACK
116 | CHERYL
117 | ALBERT
118 | JOAN
119 | MILDRED
120 | KATHERINE
121 | JUSTIN
122 | JONATHAN
123 | GERALD
124 | KEITH
125 | SAMUEL
126 | JUDITH
127 | ROSE
128 | JANICE
129 | LAWRENCE
130 | RALPH
131 | NICOLE
132 | JUDY
133 | NICHOLAS
134 | CHRISTINA
135 | ROY
136 | KATHY
137 | THERESA
138 | BENJAMIN
139 | BEVERLY
140 | DENISE
141 | BRUCE
142 | BRANDON
143 | ADAM
144 | TAMMY
145 | IRENE
146 | FRED
147 | BILLY
148 | HARRY
149 | JANE
150 | WAYNE
151 | LOUIS
152 | LORI
153 | STEVE
154 | TRACY
155 | JEREMY
156 | RACHEL
157 | ANDREA
158 | AARON
159 | MARILYN
160 | ROBIN
161 | RANDY
162 | LESLIE
163 | KATHRYN
164 | EUGENE
165 | BOBBY
166 | HOWARD
167 | CARLOS
168 | SARA
169 | LOUISE
170 | JACQUELINE
171 | ANNE
172 | WANDA
173 | RUSSELL
174 | SHAWN
175 | VICTOR
176 | JULIA
177 | BONNIE
178 | RUBY
179 | CHRIS
180 | TINA
181 | LOIS
182 | PHYLLIS
183 | JAMIE
184 | NORMA
185 | MARTIN
186 | PAULA
187 | JESSE
188 | DIANA
189 | ANNIE
190 | SHANNON
191 | ERNEST
192 | TODD
193 | PHILLIP
194 | LEE
195 | LILLIAN
196 | PEGGY
197 | EMILY
198 | CRYSTAL
199 | KIM
200 | CRAIG
201 | CARMEN
202 | GLADYS
203 | CONNIE
204 | RITA
205 | ALAN
206 | DAWN
207 | FLORENCE
208 | DALE
209 | SEAN
210 | FRANCIS
211 | JOHNNY
212 | CLARENCE
213 | PHILIP
214 | EDNA
215 | TIFFANY
216 | TONY
217 | ROSA
218 | JIMMY
219 | EARL
220 | CINDY
221 | ANTONIO
222 | LUIS
223 | MIKE
224 | DANNY
225 | BRYAN
226 | GRACE
227 | STANLEY
228 | LEONARD
229 | WENDY
230 | NATHAN
231 | MANUEL
232 | CURTIS
233 | VICTORIA
234 | RODNEY
235 | NORMAN
236 | EDITH
237 | SHERRY
238 | SYLVIA
239 | JOSEPHINE
240 | ALLEN
241 | THELMA
242 | SHEILA
243 | ETHEL
244 | MARJORIE
245 | LYNN
246 | ELLEN
247 | ELAINE
248 | MARVIN
249 | CARRIE
250 | MARION
251 | CHARLOTTE
252 | VINCENT
253 | GLENN
254 | TRAVIS
255 | MONICA
256 | JEFFERY
257 |
--------------------------------------------------------------------------------
/experiments/plots/plot_latency.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | # Measurements from the table
4 | expt = [
5 | (8,2,[
6 | (512,5.96925),
7 | (1024,6.41475),
8 | (2048,6.402),
9 | (4096,7.75725),
10 | (8192,10.99675),
11 | (16384,18.7975),
12 | (32768,36.5285),
13 | (65536,70.299),
14 | (131072, 138.395),
15 | (262144, 295.122)
16 | ]),
17 | (16,4,[
18 | (512, 5.6815),
19 | (1024, 6.40175),
20 | (2048, 8.33675),
21 | (4096, 12.37275),
22 | (8192, 20.29125),
23 | (16384, 39.1535),
24 | (32768, 73.4515),
25 | (65536, 140.214)
26 | ]),
27 | (32,8,[
28 | (512, 9.489),
29 | (1024, 11.5725),
30 | (2048, 15.89175),
31 | (4096, 24.24975),
32 | (8192, 43.201),
33 | (16384, 79.7425),
34 | (32768, 153.85975)
35 | ]),
36 | (64, 16,[
37 | (512, 24.3315),
38 | (1024, 32.17575),
39 | (2048, 39.399625),
40 | (4096, 58.59675),
41 | (8192, 96.832),
42 | (16384, 173.94425)
43 | ]),
44 | (64, 21, [
45 | (512, 26.027),
46 | (1024, 31.5315),
47 | (2048, 42.291),
48 | (4096, 63.3355),
49 | (8192, 105.0055),
50 | (16384, 190.3235)
51 | ]),
52 | (128, 32, [
53 | (256, 89.2895),
54 | (512, 94.507),
55 | (1024, 105.0365),
56 | (2048, 122.69),
57 | (4096, 162.205),
58 | (8192, 241.219),
59 | (16384, 414.118)
60 | ])]
61 |
62 | import os
63 |
64 | def process(s, N=-1, t=-1):
65 | endtime = dict()
66 | starttime = dict()
67 | tList = []
68 | lines = s.split('\n')
69 | for line in lines:
70 | if 'timestampE' in line:
71 | info = eval(line.split('timestampE')[1])
72 | endtime[info[0]] = info[1]
73 | if 'timestampB' in line:
74 | info = eval(line.split('timestampB')[1])
75 | starttime[info[0]] = info[1]
76 | maxLatency = 0
77 | for key, value in endtime.items():
78 | print key, starttime[key], value, value - starttime[key]
79 | tList.append(value - starttime[key])
80 | if value - starttime[key] > maxLatency:
81 | maxLatency = value - starttime[key]
82 | if N < 0 or t < 0 or 3*t < N:
83 | # infer N, t
84 | N = len(starttime.keys())
85 | t = N/4 # follows the convention that 4t = N
86 | print 'N', N, 't', t
87 | if len(endtime) < N - t:
88 | print "!!!!!!!!!!!!! Consensus Unfinished"
89 | return None
90 | return sorted(endtime.values())[N-t-1] - min(starttime.values())
91 |
92 | from collections import defaultdict
93 | def getPointsFromLog(d):
94 | resX = defaultdict(lambda: [])
95 | resY = defaultdict(lambda: [])
96 | for file in os.listdir(d):
97 | if file[-4:] == '.log':
98 | print 'processing', file
99 | N = int(file[:-4].split('_')[0])
100 | t = int(file[:-4].split('_')[1])
101 | Tx = int(file[:-4].split('_')[2])
102 | content = open(d+'/'+file).read().split('servers')[1:]
103 | for s in content:
104 | latency = process(s)
105 | if latency:
106 | resX[(N, t)].append(Tx * N)
107 | resY[(N, t)].append(latency)
108 | return resX, resY
109 |
110 | import matplotlib.cm as cm
111 | import numpy as np
112 |
113 | def do_plot():
114 | f = plt.figure(1, figsize=(7,5));
115 | plt.clf()
116 | ax = f.add_subplot(1, 1, 1)
117 | resX, resY = getPointsFromLog('ec2/timing')
118 | colors = cm.get_cmap('terrain')(np.linspace(0, 0.3, len(resX)))
119 | colorCounter = 0 # we cannot use *next*, bucase nparray is not iteratable
120 | for N, t, entries in expt:
121 | throughput = []
122 | batch = []
123 | for ToverN, latency in entries:
124 | batch.append(ToverN * N)
125 | throughput.append(latency)
126 | ax.plot(batch, throughput, label='%d/%d' % (N,t))
127 | ax.scatter(resX[(N, t)], resY[(N, t)],
128 | label='%d/%d' % (N,t), alpha=0.5, s=1.5, color=colors[colorCounter])
129 | colorCounter += 1
130 |
131 | ax.set_xscale("log")
132 | ax.set_yscale("log")
133 | plt.ylim([10**0.2, 10**2.6])
134 | plt.xlim([10**2.2, 10**6.3])
135 | plt.legend(title='Nodes / Tolerance', loc='best')
136 | plt.ylabel('Latency')
137 | plt.xlabel('Requests (Tx)')
138 | plt.tight_layout()
139 | # plt.show()
140 | plt.savefig('plot_latency.pdf', format='pdf', dpi=1000)
141 |
142 | do_plot()
143 |
--------------------------------------------------------------------------------
/experiments/plots/plot_latency_throughput.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | # Measurements from the table
4 | expt = [
5 | (8,2,[
6 | # (512,5.96925),
7 | (1024,6.41475),
8 | (2048,6.402),
9 | (4096,7.75725),
10 | (8192,10.99675),
11 | (16384,18.7975),
12 | (32768,36.5285),
13 | (65536,70.299),
14 | (131072, 138.395),
15 | # (262144, 295.122)
16 | ], '-o'),
17 | (16,4,[
18 | (512, 5.6815),
19 | (1024, 6.40175),
20 | (2048, 8.33675),
21 | (4096, 12.37275),
22 | (8192, 20.29125),
23 | (16384, 39.1535),
24 | (32768, 73.4515),
25 | (65536, 140.214)
26 | ], '--+'),
27 | (32,8,[
28 | (512, 9.489),
29 | (1024, 11.5725),
30 | (2048, 15.89175),
31 | (4096, 24.24975),
32 | (8192, 43.201),
33 | (16384, 79.7425),
34 | (32768, 153.85975)
35 | ], '-*'),
36 | (64, 16,[
37 | (512, 24.3315),
38 | (1024, 32.17575),
39 | (2048, 39.399625),
40 | (4096, 58.59675),
41 | (8192, 96.832),
42 | (16384, 173.94425)
43 | ], '--^'),
44 | (64, 21, [
45 | (512, 26.027),
46 | (1024, 31.5315),
47 | (2048, 42.291),
48 | (4096, 63.3355),
49 | (8192, 105.0055),
50 | (16384, 190.3235)
51 | ], '-d'),
52 | (128, 32, [
53 | (256, 89.2895),
54 | (512, 94.507),
55 | (1024, 105.0365),
56 | (2048, 122.69),
57 | (4096, 162.205),
58 | (8192, 241.219),
59 | # (16384, 414.118)
60 | ], '--s')]
61 |
62 |
63 |
64 | def do_plot():
65 | f = plt.figure(1, figsize=(7,5));
66 | plt.clf()
67 | ax = f.add_subplot(1, 1, 1)
68 | for N,t, entries, style in expt:
69 | throughput = []
70 | batch = []
71 | for ToverN, latency in entries:
72 | # batch.append(N*ToverN)
73 | # throughput.append(ToverN*(N-t) / latency)
74 | batch.append(ToverN*(N-t) / latency)
75 | throughput.append(latency)
76 | ax.plot(batch, throughput, style, label='%d/%d' % (N,t))
77 | #ax.set_xscale("log")
78 | #ax.set_yscale("log")
79 | plt.ylim([0, 400])
80 | #plt.xlim([10**3.8, 10**6.4])
81 | plt.legend(title='Nodes / Tolerance', loc='best')
82 | #plt.ylabel('Throughput (Tx per second) in log scale')
83 | plt.ylabel('Latency')
84 | plt.xlabel('Throughput')
85 | # plt.xlabel('Requests (Tx) in log scale')
86 | # plt.tight_layout()
87 | # plt.show()
88 | plt.savefig('plot_latency_throughput.pdf', format='pdf', dpi=1000)
89 |
90 | do_plot()
91 |
--------------------------------------------------------------------------------
/experiments/plots/plot_latency_tor.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | # Measurements from the table
4 | expt = [
5 | (4,1,[
6 | (128, [[5.940000057, 293, 1396480, 0.647645058, 0.23413336, 3.25],[7.2099998, 306, 1541178, 0.806241821, 0.750284879, 5.149999857],[5.620000124, 310, 1556392, 0.752677412, 0.294059595, 2.670000076],[10.33999991, 425, 1567290, 0.767552938, 0.722960109, 4.75999999]]),
7 | (256, [[19.1500001, 276, 2833410, 1.421521749, 5.718475913, 16.06999993],[21.70000005, 263, 2928778, 1.825893544, 11.81559673, 18.67000008],[11.3499999, 224, 2447490, 0.940714284, 1.58701201, 6.690000057],[11.68000007, 243, 2639845, 1.010452671, 1.774796935, 10.30999994],[7.899999857, 296, 3090702, 1.117500002, 1.337155898, 6.069999933],[10.36999989, 314, 3093448, 1.181082799, 2.096159356, 7.279999971]]),
8 | (512, [[19.06999993, 226, 5712312, 2.412964605, 13.40428096, 17.77999997],[87.38000011, 482, 6184016, 5.820580907, 295.0460774, 82.98999977],[29.67000008, 294, 5714713, 1.941870746, 13.56463148, 28.17999983],[40.53999996, 252, 5584736, 2.493888873, 38.60236743, 34.68000007],[36.72000003, 268, 5649080, 2.364328353, 28.42150058, 31.20000005]]),
9 | (1024, [[27.11999989, 258, 11793487, 2.097054248, 15.23283466, 20.56999993],[29.98000002, 282, 12308096, 3.841666675, 39.74736357, 25.87999988],[16.97000003, 248, 10128923, 1.322782259, 3.864306318, 13.64999986],[24.73000002, 249, 11409228, 2.415060244, 15.76277366, 20.0999999],[33.74000001, 297, 11666541, 2.900168342, 27.40797012, 25.61000013]]),
10 | (2048, [[54.5, 282, 21010283, 4.64677305, 73.69933616, 37.94999981],[73.30999994, 242, 23568893, 9.918966927, 239.5914324, 63.45000005],[70.8499999, 227, 23312093, 9.411982361, 231.4646233, 64.12000012]])
11 | ], '--o'),
12 | (8,2,[
13 | (128, [[5.230000019, 2255, 6157583, 0.682984484, 0.22252799, 4.069999933],[5.99000001, 2437, 6134364, 0.701973739, 0.331131886, 5.059999943],[5.450000048, 2371, 6093145, 0.706111342, 0.309728953, 3.910000086],[6.150000095, 2660, 6211287, 0.726812037, 0.394393744, 4.810000181],[6.859999895, 2900, 6281059, 0.716162067, 0.339368136, 5.330000162]]),
14 | (256, [[12.31999993, 3102, 12437395, 1.123684721, 2.238323991, 9.789999962],[9.649999857, 2487, 12196387, 1.074238039, 1.507334197, 8.650000095],[12.06999993, 2767, 12261469, 1.104803034, 2.011254519, 10.18000007],[9.109999895, 2484, 12253370, 0.981489531, 1.284007355, 7]]),
15 | (512, [[17.39999986, 2381, 23719388, 1.799202015, 7.454828266, 14.56999993],[20.87000012, 3738, 24853988, 1.500125735, 6.202361069, 15.48000002],[19.25, 2924, 24739350, 1.750058136, 8.150292917, 15.70000005]]),
16 | (1024, [[38.37999988, 3523, 49092077, 2.734007947, 25.16038619, 29.52000022],[38.36999989, 3286, 47280429, 2.590833839, 23.67362793, 32.25999999],[36.27999997, 2895, 48386472, 3.051243521, 29.00638734, 31.6099999]]),
17 | (8192, [[55.75999999, 5313, 95182622, 2.727918312, 43.78314273, 45.75999999],[52.33999991, 4985, 94242230, 2.402098303, 33.93348783, 46.01999998],[319.03, 4915, 94097996, 12.152765, 2208.017538, 316.1799998],[48.97000003, 5606, 98027752, 2.663995716, 34.43750126, 43.26999998]])
18 | ], '--^'),
19 | (16, 4,[
20 | (128, [[26.99000001, 56932, 31694811, 0.761393947, 0.879322338, 24.30999994]]),
21 | (256, [[46.6500001, 57836, 59040154, 1.166456537, 2.762598404, 18.12999988]]),
22 | (512, [[59.63000011, 57301, 111335920, 1.438934224, 6.138471836, 48.33000016]]),
23 | ], '--*')
24 | ]
25 |
26 |
27 |
28 | def do_plot():
29 | f = plt.figure(1, figsize=(7,5));
30 | plt.clf()
31 | ax = f.add_subplot(1, 1, 1)
32 | for N,t, entries, style in expt:
33 | latency = []
34 | latencyErrorL = []
35 | latencyErrorU = []
36 | maxLat = []
37 | msgDelay = []
38 | variance = []
39 | batch = []
40 | for ToverN, samples in entries:
41 | batch.append(N*ToverN)
42 | latencyMean = sum([s[0] for s in samples]) / float(len(samples))
43 | latency.append(latencyMean)
44 | latencyErrorL.append(latencyMean - min([s[0] for s in samples]))
45 | latencyErrorU.append(max([s[0] for s in samples]) - latencyMean)
46 | # ax.plot(batch, latency, style, label='%d/%d' % (N,t))
47 | ax.errorbar(batch, latency, yerr=[latencyErrorL, latencyErrorU], label='%d/%d' % (N,t), fmt=style)
48 | plt.legend(title='Nodes / Tolerance', loc='best')
49 | plt.ylabel('Latency (sec)')
50 | ax.set_xscale("log")
51 | ax.set_yscale("log")
52 | #plt.ylabel('Latency')
53 | #plt.xlabel('Throughput')
54 | plt.xlabel('Requests (Tx)')
55 | # plt.tight_layout()
56 | # plt.show()
57 | plt.savefig('plot_latency_tor.pdf', format='pdf', dpi=1000)
58 |
59 | do_plot()
60 |
--------------------------------------------------------------------------------
/experiments/plots/plot_throughput.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | # Measurements from the table
4 | expt = [
5 | (8,2,[
6 | # (512,5.96925),
7 | (1024,6.41475),
8 | (2048,6.402),
9 | (4096,7.75725),
10 | (8192,10.99675),
11 | (16384,18.7975),
12 | (32768,36.5285),
13 | (65536,70.299),
14 | (131072, 138.395),
15 | # (262144, 295.122)
16 | ], '-o'),
17 | (16,4,[
18 | (512, 5.6815),
19 | (1024, 6.40175),
20 | (2048, 8.33675),
21 | (4096, 12.37275),
22 | (8192, 20.29125),
23 | (16384, 39.1535),
24 | (32768, 73.4515),
25 | (65536, 140.214)
26 | ], '--+'),
27 | (32,8,[
28 | (512, 9.489),
29 | (1024, 11.5725),
30 | (2048, 15.89175),
31 | (4096, 24.24975),
32 | (8192, 43.201),
33 | (16384, 79.7425),
34 | (32768, 153.85975)
35 | ], '-*'),
36 | (64, 16,[
37 | (512, 24.3315),
38 | (1024, 32.17575),
39 | (2048, 39.399625),
40 | (4096, 58.59675),
41 | (8192, 96.832),
42 | (16384, 173.94425)
43 | ], '--^'),
44 | (64, 21, [
45 | (512, 26.027),
46 | (1024, 31.5315),
47 | (2048, 42.291),
48 | (4096, 63.3355),
49 | (8192, 105.0055),
50 | (16384, 190.3235)
51 | ], '-d'),
52 | (128, 32, [
53 | (256, 89.2895),
54 | (512, 94.507),
55 | (1024, 105.0365),
56 | (2048, 122.69),
57 | (4096, 162.205),
58 | (8192, 241.219),
59 | # (16384, 414.118)
60 | ], '--s')]
61 |
62 |
63 |
64 | def do_plot():
65 | f = plt.figure(1, figsize=(7,5));
66 | plt.clf()
67 | ax = f.add_subplot(1, 1, 1)
68 | for N,t, entries, style in expt:
69 | throughput = []
70 | batch = []
71 | for ToverN, latency in entries:
72 | batch.append(N*ToverN)
73 | throughput.append(ToverN*(N-t) / latency)
74 | # batch.append(ToverN*(N-t) / latency)
75 | # throughput.append(latency)
76 | ax.plot(batch, throughput, style, label='%d/%d' % (N,t))
77 | ax.set_xscale("log")
78 | ax.set_yscale("log")
79 | plt.ylim([10**2.1, 10**3.8])
80 | plt.xlim([10**3.8, 10**6.4])
81 | plt.legend(title='Nodes / Tolerance', loc='best')
82 | plt.ylabel('Throughput (Tx per second) in log scale')
83 | #plt.ylabel('Latency')
84 | #plt.xlabel('Throughput')
85 | plt.xlabel('Requests (Tx) in log scale')
86 | # plt.tight_layout()
87 | # plt.show()
88 | plt.savefig('plot_throughput.pdf', format='pdf', dpi=1000)
89 |
90 | do_plot()
91 |
--------------------------------------------------------------------------------
/experiments/run_local.py:
--------------------------------------------------------------------------------
1 | import subprocess, sys, signal
2 |
3 | def runOnTransaction(N, t, Tx):
4 | p = subprocess.check_output(
5 | ['python', '-m', 'HoneyBadgerBFT.test.honest_party_test',
6 | '-k', '%d_%d.key' % (N, t), '-e', 'ecdsa.keys', '-b', '%d' % Tx,
7 | '-n', str(N), '-t', str(t), '-c', 'th_%d_%d.keys' % (N, t)],
8 | shell=False,
9 | )
10 | return p.split('Total Message size ')[1].strip()
11 |
12 | counter = 0
13 | sent = False
14 | while True:
15 | line = p.stdout.readline()
16 | if 'size' in line:
17 | return line.replace('Total Message size ','').strip()
18 | if line == '':
19 | break
20 | if 'synced' in line:
21 | counter += 1
22 | if counter >= N - t and not sent:
23 | p.send_signal(signal.SIGINT)
24 | sent = True
25 |
26 |
27 | import sys
28 | def main(N, t, start_i=0, end_i=11, start_j=0):
29 | for i in range(start_i, end_i):
30 | sys.stdout.write(str(2**i))
31 | for j in range(start_j, 4):
32 | sys.stdout.write(' ' + str(runOnTransaction(N, t, 2**i)))
33 | sys.stdout.write('\n')
34 |
35 |
36 | if __name__=='__main__':
37 | if len(sys.argv) > 3:
38 | main(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]))
39 | else:
40 | main(int(sys.argv[1]), int(sys.argv[2]))
41 |
--------------------------------------------------------------------------------
/experiments/run_local_tor.py:
--------------------------------------------------------------------------------
1 | __author__ = 'aluex'
2 |
3 | import subprocess32 as subprocess
4 | import time
5 | def runOnTransaction(N, t, Tx):
6 | retry = True
7 | while retry:
8 | try:
9 | p = subprocess.check_output(
10 | ['python', '-m', 'HoneyBadgerBFT.test.honest_party_test',
11 | '-k', '%d_%d.key' % (N, t), '-e', 'ecdsa.keys', '-b', '%d' % Tx,
12 | '-n', str(N), '-t', str(t), '-c', 'th_%d_%d.keys' % (N, t)],
13 | timeout = 30
14 | )
15 | retry = False
16 | except subprocess.TimeoutExpired:
17 | retry = True
18 | time.sleep(2)
19 | q = subprocess.check_output(['python', 'process.py', 'msglog.TorMultiple'])
20 | print N, t, Tx, q.replace('\n', ' ')
21 |
22 | import sys
23 | def main(N, t, start_i=0, end_i=11, start_j=0):
24 | for i in range(start_i, end_i):
25 | for j in range(start_j, 4):
26 | runOnTransaction(N, t, 2**i)
27 | print
28 |
29 |
30 | if __name__=='__main__':
31 | if len(sys.argv) > 3:
32 | main(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]))
33 | else:
34 | main(int(sys.argv[1]), int(sys.argv[2]))
35 |
--------------------------------------------------------------------------------
/honeybadgerbft/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/initc3/HoneyBadgerBFT-Python/e8bcbc081dfb5d1e7298039d47bbebf7048b8e62/honeybadgerbft/__init__.py
--------------------------------------------------------------------------------
/honeybadgerbft/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/initc3/HoneyBadgerBFT-Python/e8bcbc081dfb5d1e7298039d47bbebf7048b8e62/honeybadgerbft/core/__init__.py
--------------------------------------------------------------------------------
/honeybadgerbft/core/commoncoin.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from honeybadgerbft.crypto.threshsig.boldyreva import serialize
4 | from collections import defaultdict
5 | from gevent import Greenlet
6 | from gevent.queue import Queue
7 | import hashlib
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 |
12 | class CommonCoinFailureException(Exception):
13 | """Raised for common coin failures."""
14 | pass
15 |
16 |
17 | def hash(x):
18 | return hashlib.sha256(x).digest()
19 |
20 |
21 | def shared_coin(sid, pid, N, f, PK, SK, broadcast, receive):
22 | """A shared coin based on threshold signatures
23 |
24 | :param sid: a unique instance id
25 | :param pid: my id number
26 | :param N: number of parties
27 | :param f: fault tolerance, :math:`f+1` shares needed to get the coin
28 | :param PK: ``boldyreva.TBLSPublicKey``
29 | :param SK: ``boldyreva.TBLSPrivateKey``
30 | :param broadcast: broadcast channel
31 | :param receive: receive channel
32 | :return: a function ``getCoin()``, where ``getCoin(r)`` blocks
33 | """
34 | assert PK.k == f+1
35 | assert PK.l == N # noqa: E741
36 | received = defaultdict(dict)
37 | outputQueue = defaultdict(lambda: Queue(1))
38 |
39 | def _recv():
40 | while True: # main receive loop
41 | logger.debug(f'entering loop',
42 | extra={'nodeid': pid, 'epoch': '?'})
43 | # New shares for some round r, from sender i
44 | (i, (_, r, sig)) = receive()
45 | logger.debug(f'received i, _, r, sig: {i, _, r, sig}',
46 | extra={'nodeid': pid, 'epoch': r})
47 | assert i in range(N)
48 | assert r >= 0
49 | if i in received[r]:
50 | print("redundant coin sig received", (sid, pid, i, r))
51 | continue
52 |
53 | h = PK.hash_message(str((sid, r)))
54 |
55 | # TODO: Accountability: Optimistically skip verifying
56 | # each share, knowing evidence available later
57 | try:
58 | PK.verify_share(sig, i, h)
59 | except AssertionError:
60 | print("Signature share failed!", (sid, pid, i, r))
61 | continue
62 |
63 | received[r][i] = sig
64 |
65 | # After reaching the threshold, compute the output and
66 | # make it available locally
67 | logger.debug(
68 | f'if len(received[r]) == f + 1: {len(received[r]) == f + 1}',
69 | extra={'nodeid': pid, 'epoch': r},
70 | )
71 | if len(received[r]) == f + 1:
72 |
73 | # Verify and get the combined signature
74 | sigs = dict(list(received[r].items())[:f+1])
75 | sig = PK.combine_shares(sigs)
76 | assert PK.verify_signature(sig, h)
77 |
78 | # Compute the bit from the least bit of the hash
79 | bit = hash(serialize(sig))[0] % 2
80 | logger.debug(f'put bit {bit} in output queue',
81 | extra={'nodeid': pid, 'epoch': r})
82 | outputQueue[r].put_nowait(bit)
83 |
84 | # greenletPacker(Greenlet(_recv), 'shared_coin', (pid, N, f, broadcast, receive)).start()
85 | Greenlet(_recv).start()
86 |
87 | def getCoin(round):
88 | """Gets a coin.
89 |
90 | :param round: the epoch/round.
91 | :returns: a coin.
92 |
93 | """
94 | # I have to do mapping to 1..l
95 | h = PK.hash_message(str((sid, round)))
96 | logger.debug(f"broadcast {('COIN', round, SK.sign(h))}",
97 | extra={'nodeid': pid, 'epoch': round})
98 | broadcast(('COIN', round, SK.sign(h)))
99 | return outputQueue[round].get()
100 |
101 | return getCoin
102 |
--------------------------------------------------------------------------------
/honeybadgerbft/core/commonsubset.py:
--------------------------------------------------------------------------------
1 | import gevent
2 |
3 |
4 | def commonsubset(pid, N, f, rbc_out, aba_in, aba_out):
5 | """The BKR93 algorithm for asynchronous common subset.
6 |
7 | :param pid: my identifier
8 | :param N: number of nodes
9 | :param f: fault tolerance
10 | :param rbc_out: an array of :math:`N` (blocking) output functions,
11 | returning a string
12 | :param aba_in: an array of :math:`N` (non-blocking) functions that
13 | accept an input bit
14 | :param aba_out: an array of :math:`N` (blocking) output functions,
15 | returning a bit
16 | :return: an :math:`N`-element array, each element either ``None`` or a
17 | string
18 | """
19 | assert len(rbc_out) == N
20 | assert len(aba_in) == N
21 | assert len(aba_out) == N
22 |
23 | aba_inputted = [False] * N
24 | aba_values = [0] * N
25 | rbc_values = [None] * N
26 |
27 | def _recv_rbc(j):
28 | # Receive output from reliable broadcast
29 | rbc_values[j] = rbc_out[j]()
30 |
31 | if not aba_inputted[j]:
32 | # Provide 1 as input to the corresponding bin agreement
33 | aba_inputted[j] = True
34 | aba_in[j](1)
35 |
36 | r_threads = [gevent.spawn(_recv_rbc, j) for j in range(N)]
37 |
38 | def _recv_aba(j):
39 | # Receive output from binary agreement
40 | aba_values[j] = aba_out[j]() # May block
41 | # print pid, j, 'ENTERING CRITICAL'
42 | if sum(aba_values) >= N - f:
43 | # Provide 0 to all other aba
44 | for k in range(N):
45 | if not aba_inputted[k]:
46 | aba_inputted[k] = True
47 | aba_in[k](0)
48 | # print pid, 'ABA[%d] input -> %d' % (k, 0)
49 | # print pid, j, 'EXITING CRITICAL'
50 |
51 | # Wait for all binary agreements
52 | a_threads = [gevent.spawn(_recv_aba, j) for j in range(N)]
53 | gevent.joinall(a_threads)
54 |
55 | assert sum(aba_values) >= N - f # Must have at least N-f committed
56 |
57 | # Wait for the corresponding broadcasts
58 | for j in range(N):
59 | if aba_values[j]:
60 | r_threads[j].join()
61 | assert rbc_values[j] is not None
62 | else:
63 | r_threads[j].kill()
64 | rbc_values[j] = None
65 |
66 | return tuple(rbc_values)
67 |
--------------------------------------------------------------------------------
/honeybadgerbft/core/honeybadger.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 | from enum import Enum
3 |
4 | import gevent
5 | from gevent.queue import Queue
6 |
7 | from honeybadgerbft.core.commoncoin import shared_coin
8 | from honeybadgerbft.core.binaryagreement import binaryagreement
9 | from honeybadgerbft.core.reliablebroadcast import reliablebroadcast
10 | from honeybadgerbft.core.commonsubset import commonsubset
11 | from honeybadgerbft.core.honeybadger_block import honeybadger_block
12 | from honeybadgerbft.exceptions import UnknownTagError
13 |
14 |
15 | class BroadcastTag(Enum):
16 | ACS_COIN = 'ACS_COIN'
17 | ACS_RBC = 'ACS_RBC'
18 | ACS_ABA = 'ACS_ABA'
19 | TPKE = 'TPKE'
20 |
21 |
22 | BroadcastReceiverQueues = namedtuple(
23 | 'BroadcastReceiverQueues', ('ACS_COIN', 'ACS_ABA', 'ACS_RBC', 'TPKE'))
24 |
25 |
26 | def broadcast_receiver(recv_func, recv_queues):
27 | sender, (tag, j, msg) = recv_func()
28 | if tag not in BroadcastTag.__members__:
29 | # TODO Post python 3 port: Add exception chaining.
30 | # See https://www.python.org/dev/peps/pep-3134/
31 | raise UnknownTagError('Unknown tag: {}! Must be one of {}.'.format(
32 | tag, BroadcastTag.__members__.keys()))
33 | recv_queue = recv_queues._asdict()[tag]
34 |
35 | if tag != BroadcastTag.TPKE.value:
36 | recv_queue = recv_queue[j]
37 |
38 | recv_queue.put_nowait((sender, msg))
39 |
40 |
41 | def broadcast_receiver_loop(recv_func, recv_queues):
42 | while True:
43 | broadcast_receiver(recv_func, recv_queues)
44 |
45 |
46 | class HoneyBadgerBFT():
47 | r"""HoneyBadgerBFT object used to run the protocol.
48 |
49 | :param str sid: The base name of the common coin that will be used to
50 | derive a nonce to uniquely identify the coin.
51 | :param int pid: Node id.
52 | :param int B: Batch size of transactions.
53 | :param int N: Number of nodes in the network.
54 | :param int f: Number of faulty nodes that can be tolerated.
55 | :param str sPK: Public key of the threshold signature
56 | (:math:`\mathsf{TSIG}`) scheme.
57 | :param str sSK: Signing key of the threshold signature
58 | (:math:`\mathsf{TSIG}`) scheme.
59 | :param str ePK: Public key of the threshold encryption
60 | (:math:`\mathsf{TPKE}`) scheme.
61 | :param str eSK: Signing key of the threshold encryption
62 | (:math:`\mathsf{TPKE}`) scheme.
63 | :param send:
64 | :param recv:
65 | """
66 |
67 | def __init__(self, sid, pid, B, N, f, sPK, sSK, ePK, eSK, send, recv):
68 | self.sid = sid
69 | self.pid = pid
70 | self.B = B
71 | self.N = N
72 | self.f = f
73 | self.sPK = sPK
74 | self.sSK = sSK
75 | self.ePK = ePK
76 | self.eSK = eSK
77 | self._send = send
78 | self._recv = recv
79 |
80 | self.round = 0 # Current block number
81 | self.transaction_buffer = []
82 | self._per_round_recv = {} # Buffer of incoming messages
83 |
84 | def submit_tx(self, tx):
85 | """Appends the given transaction to the transaction buffer.
86 |
87 | :param tx: Transaction to append to the buffer.
88 | """
89 | print('submit_tx', self.pid, tx)
90 | self.transaction_buffer.append(tx)
91 |
92 | def run(self):
93 | """Run the HoneyBadgerBFT protocol."""
94 |
95 | def _recv():
96 | """Receive messages."""
97 | while True:
98 | (sender, (r, msg)) = self._recv()
99 |
100 | # Maintain an *unbounded* recv queue for each epoch
101 | if r not in self._per_round_recv:
102 | # Buffer this message
103 | assert r >= self.round # pragma: no cover
104 | self._per_round_recv[r] = Queue()
105 |
106 | _recv = self._per_round_recv[r]
107 | if _recv is not None:
108 | # Queue it
109 | _recv.put((sender, msg))
110 |
111 | # else:
112 | # We have already closed this
113 | # round and will stop participating!
114 |
115 | self._recv_thread = gevent.spawn(_recv)
116 |
117 | while True:
118 | # For each round...
119 | r = self.round
120 | if r not in self._per_round_recv:
121 | self._per_round_recv[r] = Queue()
122 |
123 | # Select all the transactions (TODO: actual random selection)
124 | tx_to_send = self.transaction_buffer[:self.B]
125 |
126 | # TODO: Wait a bit if transaction buffer is not full
127 |
128 | # Run the round
129 | def _make_send(r):
130 | def _send(j, o):
131 | self._send(j, (r, o))
132 | return _send
133 | send_r = _make_send(r)
134 | recv_r = self._per_round_recv[r].get
135 | new_tx = self._run_round(r, tx_to_send[0], send_r, recv_r)
136 | print('new_tx:', new_tx)
137 |
138 | # Remove all of the new transactions from the buffer
139 | self.transaction_buffer = [_tx for _tx in self.transaction_buffer if _tx not in new_tx]
140 |
141 | self.round += 1 # Increment the round
142 | if self.round >= 3:
143 | break # Only run one round for now
144 |
145 | def _run_round(self, r, tx_to_send, send, recv):
146 | """Run one protocol round.
147 |
148 | :param int r: round id
149 | :param tx_to_send: Transaction(s) to process.
150 | :param send:
151 | :param recv:
152 | """
153 | # Unique sid for each round
154 | sid = self.sid + ':' + str(r)
155 | pid = self.pid
156 | N = self.N
157 | f = self.f
158 |
159 | def broadcast(o):
160 | """Multicast the given input ``o``.
161 |
162 | :param o: Input to multicast.
163 | """
164 | for j in range(N):
165 | send(j, o)
166 |
167 | # Launch ACS, ABA, instances
168 | coin_recvs = [None] * N
169 | aba_recvs = [None] * N # noqa: E221
170 | rbc_recvs = [None] * N # noqa: E221
171 |
172 | aba_inputs = [Queue(1) for _ in range(N)] # noqa: E221
173 | aba_outputs = [Queue(1) for _ in range(N)]
174 | rbc_outputs = [Queue(1) for _ in range(N)]
175 |
176 | my_rbc_input = Queue(1)
177 | print(pid, r, 'tx_to_send:', tx_to_send)
178 |
179 | def _setup(j):
180 | """Setup the sub protocols RBC, BA and common coin.
181 |
182 | :param int j: Node index for which the setup is being done.
183 | """
184 | def coin_bcast(o):
185 | """Common coin multicast operation.
186 |
187 | :param o: Value to multicast.
188 | """
189 | broadcast(('ACS_COIN', j, o))
190 |
191 | coin_recvs[j] = Queue()
192 | coin = shared_coin(sid + 'COIN' + str(j), pid, N, f,
193 | self.sPK, self.sSK,
194 | coin_bcast, coin_recvs[j].get)
195 |
196 | def aba_bcast(o):
197 | """Binary Byzantine Agreement multicast operation.
198 |
199 | :param o: Value to multicast.
200 | """
201 | broadcast(('ACS_ABA', j, o))
202 |
203 | aba_recvs[j] = Queue()
204 | gevent.spawn(binaryagreement, sid+'ABA'+str(j), pid, N, f, coin,
205 | aba_inputs[j].get, aba_outputs[j].put_nowait,
206 | aba_bcast, aba_recvs[j].get)
207 |
208 | def rbc_send(k, o):
209 | """Reliable broadcast operation.
210 |
211 | :param o: Value to broadcast.
212 | """
213 | send(k, ('ACS_RBC', j, o))
214 |
215 | # Only leader gets input
216 | rbc_input = my_rbc_input.get if j == pid else None
217 | rbc_recvs[j] = Queue()
218 | rbc = gevent.spawn(reliablebroadcast, sid+'RBC'+str(j), pid, N, f, j,
219 | rbc_input, rbc_recvs[j].get, rbc_send)
220 | rbc_outputs[j] = rbc.get # block for output from rbc
221 |
222 | # N instances of ABA, RBC
223 | for j in range(N):
224 | _setup(j)
225 |
226 | # One instance of TPKE
227 | def tpke_bcast(o):
228 | """Threshold encryption broadcast."""
229 | broadcast(('TPKE', 0, o))
230 |
231 | tpke_recv = Queue()
232 |
233 | # One instance of ACS
234 | acs = gevent.spawn(commonsubset, pid, N, f, rbc_outputs,
235 | [_.put_nowait for _ in aba_inputs],
236 | [_.get for _ in aba_outputs])
237 |
238 | recv_queues = BroadcastReceiverQueues(
239 | ACS_COIN=coin_recvs,
240 | ACS_ABA=aba_recvs,
241 | ACS_RBC=rbc_recvs,
242 | TPKE=tpke_recv,
243 | )
244 | gevent.spawn(broadcast_receiver_loop, recv, recv_queues)
245 |
246 | _input = Queue(1)
247 | _input.put(tx_to_send)
248 | return honeybadger_block(pid, self.N, self.f, self.ePK, self.eSK,
249 | _input.get,
250 | acs_in=my_rbc_input.put_nowait, acs_out=acs.get,
251 | tpke_bcast=tpke_bcast, tpke_recv=tpke_recv.get)
252 |
--------------------------------------------------------------------------------
/honeybadgerbft/core/honeybadger_block.py:
--------------------------------------------------------------------------------
1 | from ..crypto.threshenc import tpke
2 | import os
3 |
4 |
5 | def serialize_UVW(U, V, W):
6 | # U: element of g1 (65 byte serialized for SS512)
7 | U = tpke.serialize(U)
8 | assert len(U) == 65
9 | # V: 32 byte str
10 | assert len(V) == 32
11 | # W: element of g2 (32 byte serialized for SS512)
12 | W = tpke.serialize(W)
13 | assert len(W) == 65
14 | return U, V, W
15 |
16 |
17 | def deserialize_UVW(U, V, W):
18 | assert len(U) == 65
19 | assert len(V) == 32
20 | assert len(W) == 65
21 | U = tpke.deserialize1(U)
22 | W = tpke.deserialize2(W)
23 | return U, V, W
24 |
25 |
26 | def honeybadger_block(pid, N, f, PK, SK, propose_in, acs_in, acs_out, tpke_bcast, tpke_recv):
27 | """The HoneyBadgerBFT algorithm for a single block
28 |
29 | :param pid: my identifier
30 | :param N: number of nodes
31 | :param f: fault tolerance
32 | :param PK: threshold encryption public key
33 | :param SK: threshold encryption secret key
34 | :param propose_in: a function returning a sequence of transactions
35 | :param acs_in: a function to provide input to acs routine
36 | :param acs_out: a blocking function that returns an array of ciphertexts
37 | :param tpke_bcast:
38 | :param tpke_recv:
39 | :return:
40 | """
41 |
42 | # Broadcast inputs are of the form (tenc(key), enc(key, transactions))
43 |
44 | # Threshold encrypt
45 | # TODO: check that propose_in is the correct length, not too large
46 | prop = propose_in()
47 | key = os.urandom(32) # random 256-bit key
48 | ciphertext = tpke.encrypt(key, prop)
49 | tkey = PK.encrypt(key)
50 |
51 | import pickle
52 | to_acs = pickle.dumps((serialize_UVW(*tkey), ciphertext))
53 | acs_in(to_acs)
54 |
55 | # Wait for the corresponding ACS to finish
56 | vall = acs_out()
57 | assert len(vall) == N
58 | assert len([_ for _ in vall if _ is not None]) >= N - f # This many must succeed
59 |
60 | # print pid, 'Received from acs:', vall
61 |
62 | # Broadcast all our decryption shares
63 | my_shares = []
64 | for i, v in enumerate(vall):
65 | if v is None:
66 | my_shares.append(None)
67 | continue
68 | (tkey, ciph) = pickle.loads(v)
69 | tkey = deserialize_UVW(*tkey)
70 | share = SK.decrypt_share(*tkey)
71 | # share is of the form: U_i, an element of group1
72 | my_shares.append(share)
73 |
74 | tpke_bcast(my_shares)
75 |
76 | # Receive everyone's shares
77 | shares_received = {}
78 | while len(shares_received) < f+1:
79 | (j, shares) = tpke_recv()
80 | if j in shares_received:
81 | # TODO: alert that we received a duplicate
82 | print('Received a duplicate decryption share from', j)
83 | continue
84 | shares_received[j] = shares
85 |
86 | assert len(shares_received) >= f+1
87 | # TODO: Accountability
88 | # If decryption fails at this point, we will have evidence of misbehavior,
89 | # but then we should wait for more decryption shares and try again
90 | decryptions = []
91 | for i, v in enumerate(vall):
92 | if v is None:
93 | continue
94 | svec = {}
95 | for j, shares in shares_received.items():
96 | svec[j] = shares[i] # Party j's share of broadcast i
97 | (tkey, ciph) = pickle.loads(v)
98 | tkey = deserialize_UVW(*tkey)
99 | key = PK.combine_shares(*tkey, svec)
100 | plain = tpke.decrypt(key, ciph)
101 | decryptions.append(plain)
102 | # print 'Done!', decryptions
103 |
104 | return tuple(decryptions)
105 |
--------------------------------------------------------------------------------
/honeybadgerbft/core/reliablebroadcast.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | from collections import defaultdict
3 | import zfec
4 | import hashlib
5 | import math
6 |
7 |
8 | #####################
9 | # zfec encode #
10 | #####################
11 | def encode(K, N, m):
12 | """Erasure encodes string ``m`` into ``N`` blocks, such that any ``K``
13 | can reconstruct.
14 |
15 | :param int K: K
16 | :param int N: number of blocks to encode string ``m`` into.
17 | :param bytes m: bytestring to encode.
18 |
19 | :return list: Erasure codes resulting from encoding ``m`` into
20 | ``N`` blocks using ``zfec`` lib.
21 |
22 | """
23 | try:
24 | m = m.encode()
25 | except AttributeError:
26 | pass
27 | encoder = zfec.Encoder(K, N)
28 | assert K <= 256 # TODO: Record this assumption!
29 | # pad m to a multiple of K bytes
30 | padlen = K - (len(m) % K)
31 | m += padlen * chr(K-padlen).encode()
32 | step = len(m)//K
33 | blocks = [m[i*step: (i+1)*step] for i in range(K)]
34 | stripes = encoder.encode(blocks)
35 | return stripes
36 |
37 |
38 | def decode(K, N, stripes):
39 | """Decodes an erasure-encoded string from a subset of stripes
40 |
41 | :param list stripes: a container of :math:`N` elements,
42 | each of which is either a string or ``None``
43 | at least :math:`K` elements are strings
44 | all string elements are the same length
45 |
46 | """
47 | assert len(stripes) == N
48 | blocks = []
49 | blocknums = []
50 | for i, block in enumerate(stripes):
51 | if block is None:
52 | continue
53 | blocks.append(block)
54 | blocknums.append(i)
55 | if len(blocks) == K:
56 | break
57 | else:
58 | raise ValueError("Too few to recover")
59 | decoder = zfec.Decoder(K, N)
60 | rec = decoder.decode(blocks, blocknums)
61 | m = b''.join(rec)
62 | padlen = K - m[-1]
63 | m = m[:-padlen]
64 | return m
65 |
66 |
67 | #####################
68 | # Merkle tree #
69 | #####################
70 | def hash(x):
71 | assert isinstance(x, (str, bytes))
72 | try:
73 | x = x.encode()
74 | except AttributeError:
75 | pass
76 | return hashlib.sha256(x).digest()
77 |
78 |
79 | def ceil(x): return int(math.ceil(x))
80 |
81 |
82 | def merkleTree(strList):
83 | """Builds a merkle tree from a list of :math:`N` strings (:math:`N`
84 | at least 1)
85 |
86 | :return list: Merkle tree, a list of ``2*ceil(N)`` strings. The root
87 | digest is at ``tree[1]``, ``tree[0]`` is blank.
88 |
89 | """
90 | N = len(strList)
91 | assert N >= 1
92 | bottomrow = 2 ** ceil(math.log(N, 2))
93 | mt = [b''] * (2 * bottomrow)
94 | for i in range(N):
95 | mt[bottomrow + i] = hash(strList[i])
96 | for i in range(bottomrow - 1, 0, -1):
97 | mt[i] = hash(mt[i*2] + mt[i*2+1])
98 | return mt
99 |
100 |
101 | def getMerkleBranch(index, mt):
102 | """Computes a merkle tree from a list of leaves.
103 | """
104 | res = []
105 | t = index + (len(mt) >> 1)
106 | while t > 1:
107 | res.append(mt[t ^ 1]) # we are picking up the sibling
108 | t //= 2
109 | return res
110 |
111 |
112 | def merkleVerify(N, val, roothash, branch, index):
113 | """Verify a merkle tree branch proof
114 | """
115 | assert 0 <= index < N
116 | # XXX Python 3 related issue, for now let's tolerate both bytes and
117 | # strings
118 | assert isinstance(val, (str, bytes))
119 | assert len(branch) == ceil(math.log(N, 2))
120 | # Index has information on whether we are facing a left sibling or a right sibling
121 | tmp = hash(val)
122 | tindex = index
123 | for br in branch:
124 | tmp = hash((tindex & 1) and br + tmp or tmp + br)
125 | tindex >>= 1
126 | if tmp != roothash:
127 | print("Verification failed with", hash(val), roothash, branch, tmp == roothash)
128 | return False
129 | return True
130 |
131 |
132 | def reliablebroadcast(sid, pid, N, f, leader, input, receive, send):
133 | """Reliable broadcast
134 |
135 | :param int pid: ``0 <= pid < N``
136 | :param int N: at least 3
137 | :param int f: fault tolerance, ``N >= 3f + 1``
138 | :param int leader: ``0 <= leader < N``
139 | :param input: if ``pid == leader``, then :func:`input()` is called
140 | to wait for the input value
141 | :param receive: :func:`receive()` blocks until a message is
142 | received; message is of the form::
143 |
144 | (i, (tag, ...)) = receive()
145 |
146 | where ``tag`` is one of ``{"VAL", "ECHO", "READY"}``
147 | :param send: sends (without blocking) a message to a designed
148 | recipient ``send(i, (tag, ...))``
149 |
150 | :return str: ``m`` after receiving :math:`2f+1` ``READY`` messages
151 | and :math:`N-2f` ``ECHO`` messages
152 |
153 | .. important:: **Messages**
154 |
155 | ``VAL( roothash, branch[i], stripe[i] )``
156 | sent from ``leader`` to each other party
157 | ``ECHO( roothash, branch[i], stripe[i] )``
158 | sent after receiving ``VAL`` message
159 | ``READY( roothash )``
160 | sent after receiving :math:`N-f` ``ECHO`` messages
161 | or after receiving :math:`f+1` ``READY`` messages
162 |
163 | .. todo::
164 | **Accountability**
165 |
166 | A large computational expense occurs when attempting to
167 | decode the value from erasure codes, and recomputing to check it
168 | is formed correctly. By transmitting a signature along with
169 | ``VAL`` and ``ECHO``, we can ensure that if the value is decoded
170 | but not necessarily reconstructed, then evidence incriminates
171 | the leader.
172 |
173 | """
174 | assert N >= 3*f + 1
175 | assert f >= 0
176 | assert 0 <= leader < N
177 | assert 0 <= pid < N
178 |
179 | K = N - 2 * f # Need this many to reconstruct. (# noqa: E221)
180 | EchoThreshold = N - f # Wait for this many ECHO to send READY. (# noqa: E221)
181 | ReadyThreshold = f + 1 # Wait for this many READY to amplify READY. (# noqa: E221)
182 | OutputThreshold = 2 * f + 1 # Wait for this many READY to output
183 | # NOTE: The above thresholds are chosen to minimize the size
184 | # of the erasure coding stripes, i.e. to maximize K.
185 | # The following alternative thresholds are more canonical
186 | # (e.g., in Bracha '86) and require larger stripes, but must wait
187 | # for fewer nodes to respond
188 | # EchoThreshold = ceil((N + f + 1.)/2)
189 | # K = EchoThreshold - f
190 |
191 | def broadcast(o):
192 | for i in range(N):
193 | send(i, o)
194 |
195 | if pid == leader:
196 | # The leader erasure encodes the input, sending one strip to each participant
197 | m = input() # block until an input is received
198 | # XXX Python 3 related issue, for now let's tolerate both bytes and
199 | # strings
200 | # (with Python 2 it used to be: assert type(m) is str)
201 | assert isinstance(m, (str, bytes))
202 | # print('Input received: %d bytes' % (len(m),))
203 |
204 | stripes = encode(K, N, m)
205 | mt = merkleTree(stripes) # full binary tree
206 | roothash = mt[1]
207 |
208 | for i in range(N):
209 | branch = getMerkleBranch(i, mt)
210 | send(i, ('VAL', roothash, branch, stripes[i]))
211 |
212 | # TODO: filter policy: if leader, discard all messages until sending VAL
213 |
214 | fromLeader = None
215 | stripes = defaultdict(lambda: [None for _ in range(N)])
216 | echoCounter = defaultdict(lambda: 0)
217 | echoSenders = set() # Peers that have sent us ECHO messages
218 | ready = defaultdict(set)
219 | readySent = False
220 | readySenders = set() # Peers that have sent us READY messages
221 |
222 | def decode_output(roothash):
223 | # Rebuild the merkle tree to guarantee decoding is correct
224 | m = decode(K, N, stripes[roothash])
225 | _stripes = encode(K, N, m)
226 | _mt = merkleTree(_stripes)
227 | _roothash = _mt[1]
228 | # TODO: Accountability: If this fails, incriminate leader
229 | assert _roothash == roothash
230 | return m
231 |
232 | while True: # main receive loop
233 | sender, msg = receive()
234 | if msg[0] == 'VAL' and fromLeader is None:
235 | # Validation
236 | (_, roothash, branch, stripe) = msg
237 | if sender != leader:
238 | print("VAL message from other than leader:", sender)
239 | continue
240 | try:
241 | assert merkleVerify(N, stripe, roothash, branch, pid)
242 | except Exception as e:
243 | print("Failed to validate VAL message:", e)
244 | continue
245 |
246 | # Update
247 | fromLeader = roothash
248 | broadcast(('ECHO', roothash, branch, stripe))
249 |
250 | elif msg[0] == 'ECHO':
251 | (_, roothash, branch, stripe) = msg
252 | # Validation
253 | if roothash in stripes and stripes[roothash][sender] is not None \
254 | or sender in echoSenders:
255 | print("Redundant ECHO")
256 | continue
257 | try:
258 | assert merkleVerify(N, stripe, roothash, branch, sender)
259 | except AssertionError as e:
260 | print("Failed to validate ECHO message:", e)
261 | continue
262 |
263 | # Update
264 | stripes[roothash][sender] = stripe
265 | echoSenders.add(sender)
266 | echoCounter[roothash] += 1
267 |
268 | if echoCounter[roothash] >= EchoThreshold and not readySent:
269 | readySent = True
270 | broadcast(('READY', roothash))
271 |
272 | if len(ready[roothash]) >= OutputThreshold and echoCounter[roothash] >= K:
273 | return decode_output(roothash)
274 |
275 | elif msg[0] == 'READY':
276 | (_, roothash) = msg
277 | # Validation
278 | if sender in ready[roothash] or sender in readySenders:
279 | print("Redundant READY")
280 | continue
281 |
282 | # Update
283 | ready[roothash].add(sender)
284 | readySenders.add(sender)
285 |
286 | # Amplify ready messages
287 | if len(ready[roothash]) >= ReadyThreshold and not readySent:
288 | readySent = True
289 | broadcast(('READY', roothash))
290 |
291 | if len(ready[roothash]) >= OutputThreshold and echoCounter[roothash] >= K:
292 | return decode_output(roothash)
293 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/initc3/HoneyBadgerBFT-Python/e8bcbc081dfb5d1e7298039d47bbebf7048b8e62/honeybadgerbft/crypto/__init__.py
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/ecdsa/__init__.py:
--------------------------------------------------------------------------------
1 | """ecdsa modules"""
2 |
3 | __author__ = 'aluex'
4 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/ecdsa/generate_keys_ecdsa.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import pickle
3 |
4 | from coincurve import PrivateKey
5 |
6 |
7 | def generate_key_list(players):
8 | return [PrivateKey().secret for _ in range(players)]
9 |
10 |
11 | def main():
12 | """ """
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument('players', help='The number of players')
15 | args = parser.parse_args()
16 | players = int(args.players)
17 | keylist = generate_key_list(players)
18 | print(pickle.dumps(keylist))
19 |
20 |
21 | if __name__ == '__main__':
22 | main()
23 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshenc/__init__.py:
--------------------------------------------------------------------------------
1 | """Threshold Encryption modules"""
2 |
3 | __author__ = 'aluex'
4 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshenc/generate_keys.py:
--------------------------------------------------------------------------------
1 | from .tpke import dealer, serialize
2 | import argparse
3 | import pickle
4 |
5 |
6 | def _generate_keys(players, k):
7 | if k:
8 | k = int(k)
9 | else:
10 | k = players // 2 # N - 2 * t
11 | PK, SKs = dealer(players=players, k=k)
12 | return (PK.l, PK.k, serialize(PK.VK), [serialize(VKp) for VKp in PK.VKs],
13 | [(SK.i, serialize(SK.SK)) for SK in SKs])
14 |
15 |
16 | def main():
17 | """ """
18 | parser = argparse.ArgumentParser()
19 | parser.add_argument('players', help='The number of players')
20 | parser.add_argument('k', help='k')
21 | args = parser.parse_args()
22 | keys = _generate_keys(int(args.players), args.k)
23 | print(pickle.dumps(keys))
24 |
25 |
26 | if __name__ == '__main__':
27 | main()
28 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshenc/tpke.py:
--------------------------------------------------------------------------------
1 | from charm.toolbox.pairinggroup import PairingGroup, ZR, G1, G2, pair
2 |
3 | from functools import reduce
4 | from base64 import encodestring, decodestring
5 | from operator import mul
6 |
7 | from Crypto.Hash import SHA256
8 | from Crypto import Random
9 | from Crypto.Cipher import AES
10 |
11 | # Threshold encryption based on Gap-Diffie-Hellman
12 | # - Only encrypts messages that are 32-byte strings
13 | # - For use in hybrid encryption schemes - first encrypt
14 | # a random key, use the key for symmetric AES
15 |
16 | # Baek and Zheng
17 | # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.119.1717&rep=rep1&type=pdf
18 |
19 |
20 | # Dependencies: Charm, http://jhuisi.github.io/charm/
21 | # a wrapper for PBC (Pairing based crypto)
22 |
23 |
24 | group = PairingGroup('SS512')
25 | # group = PairingGroup('MNT224')
26 |
27 |
28 | def serialize(g):
29 | """ """
30 | # Only work in G1 here
31 | return decodestring(group.serialize(g)[2:])
32 |
33 |
34 | def deserialize0(g):
35 | """ """
36 | # Only work in G1 here
37 | return group.deserialize(b'0:'+encodestring(g))
38 |
39 |
40 | def deserialize1(g):
41 | """ """
42 | # Only work in G1 here
43 | return group.deserialize(b'1:'+encodestring(g))
44 |
45 |
46 | def deserialize2(g):
47 | """ """
48 | # Only work in G1 here
49 | return group.deserialize(b'2:'+encodestring(g))
50 |
51 |
52 | def xor(x, y):
53 | """ """
54 | assert len(x) == len(y) == 32
55 | return b''.join(bytes([x_ ^ y_]) for x_, y_ in zip(x, y))
56 |
57 |
58 | g1 = group.hash('geng1', G1)
59 | g1.initPP()
60 | g2 = g1
61 | # g2 = group.hash('geng2', G2)
62 | # g2.initPP()
63 | ZERO = group.random(ZR)*0
64 | ONE = group.random(ZR)*0+1
65 |
66 |
67 | def hashG(g):
68 | """ """
69 | return SHA256.new(serialize(g)).digest()
70 |
71 |
72 | def hashH(g, x):
73 | """ """
74 | assert len(x) == 32
75 | return group.hash(serialize(g) + x, G2)
76 |
77 |
78 | class TPKEPublicKey(object):
79 | """ """
80 | def __init__(self, l, k, VK, VKs):
81 | """ """
82 | self.l = l # noqa: E741
83 | self.k = k
84 | self.VK = VK
85 | self.VKs = VKs
86 |
87 | def lagrange(self, S, j):
88 | """ """
89 | # Assert S is a subset of range(0,self.l)
90 | assert len(S) == self.k
91 | assert type(S) is set
92 | assert S.issubset(range(0, self.l))
93 | S = sorted(S)
94 |
95 | assert j in S
96 | assert 0 <= j < self.l
97 | num = reduce(mul, [0 - jj - 1 for jj in S if jj != j], ONE)
98 | den = reduce(mul, [j - jj for jj in S if jj != j], ONE) # noqa: E272
99 | return num / den
100 |
101 | def encrypt(self, m):
102 | """ """
103 | # Only encrypt 32 byte strings
104 | assert len(m) == 32
105 | # print '1'
106 | r = group.random(ZR)
107 | # print '2'
108 | U = g1 ** r
109 | # print '3'
110 | # V = xor(m, hashG(pair(g1, self.VK ** r)))
111 | # V = xor(m, hashG(pair(g1, self.VK ** r)))
112 | V = xor(m, hashG(self.VK ** r))
113 | # print '4'
114 | W = hashH(U, V) ** r
115 | # print '5'
116 | C = (U, V, W)
117 | return C
118 |
119 | def verify_ciphertext(self, U, V, W):
120 | """ """
121 | # Check correctness of ciphertext
122 | H = hashH(U, V)
123 | assert pair(g1, W) == pair(U, H)
124 | return True
125 |
126 | def verify_share(self, i, U_i, U, V, W):
127 | """ """
128 | assert 0 <= i < self.l
129 | Y_i = self.VKs[i]
130 | assert pair(U_i, g2) == pair(U, Y_i)
131 | return True
132 |
133 | def combine_shares(self, U, V, W, shares):
134 | """ """
135 | # sigs: a mapping from idx -> sig
136 | S = set(shares.keys())
137 | assert S.issubset(range(self.l))
138 |
139 | # ASSUMPTION
140 | # assert self.verify_ciphertext((U,V,W))
141 |
142 | # ASSUMPTION
143 | for j, share in shares.items():
144 | self.verify_share(j, share, U, V, W)
145 |
146 | res = reduce(mul,
147 | [share ** self.lagrange(S, j)
148 | for j, share in shares.items()], ONE)
149 | return xor(hashG(res), V)
150 |
151 |
152 | class TPKEPrivateKey(TPKEPublicKey):
153 | """ """
154 | def __init__(self, l, k, VK, VKs, SK, i):
155 | """ """
156 | super(TPKEPrivateKey, self).__init__(l, k, VK, VKs)
157 | assert 0 <= i < self.l
158 | self.i = i
159 | self.SK = SK
160 |
161 | def decrypt_share(self, U, V, W):
162 | """ """
163 | # ASSUMPTION
164 | assert self.verify_ciphertext(U, V, W)
165 |
166 | # print U, V, W
167 | # print U
168 | # print self.SK
169 | U_i = U ** self.SK
170 |
171 | return U_i
172 |
173 |
174 | def dealer(players=10, k=5):
175 | """ """
176 | # Random polynomial coefficients
177 | secret = group.random(ZR)
178 | a = [secret]
179 | for i in range(1, k):
180 | a.append(group.random(ZR))
181 | assert len(a) == k
182 |
183 | # Polynomial evaluation
184 | def f(x):
185 | """ """
186 | y = ZERO
187 | xx = ONE
188 | for coeff in a:
189 | y += coeff * xx
190 | xx *= x
191 | return y
192 |
193 | # Shares of master secret key
194 | SKs = [f(i) for i in range(1, players+1)]
195 | assert f(0) == secret
196 |
197 | # Verification keys
198 | VK = g2 ** secret
199 | VKs = [g2 ** xx for xx in SKs]
200 |
201 | public_key = TPKEPublicKey(players, k, VK, VKs)
202 | private_keys = [TPKEPrivateKey(players, k, VK, VKs, SK, i)
203 | for i, SK in enumerate(SKs)]
204 |
205 | # Check reconstruction of 0
206 | S = set(range(0, k))
207 | lhs = f(0)
208 | rhs = sum(public_key.lagrange(S, j) * f(j+1) for j in S)
209 | assert lhs == rhs
210 | # print i, 'ok'
211 |
212 | return public_key, private_keys
213 |
214 |
215 | # Symmetric cryptography. Use AES with a 32-byte key
216 |
217 | BS = 16
218 |
219 |
220 | def pad(s):
221 | return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
222 |
223 |
224 | def unpad(s):
225 | return s[:-ord(s[len(s)-1:])]
226 |
227 |
228 | def encrypt(key, raw):
229 | """ """
230 | assert len(key) == 32
231 | raw = pad(raw)
232 | iv = Random.new().read(AES.block_size)
233 | cipher = AES.new(key, AES.MODE_CBC, iv)
234 | return (iv + cipher.encrypt(raw))
235 |
236 |
237 | def decrypt(key, enc):
238 | """ """
239 | enc = (enc)
240 | iv = enc[:16]
241 | cipher = AES.new(key, AES.MODE_CBC, iv)
242 | return unpad(cipher.decrypt(enc[16:]))
243 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshsig/README.md:
--------------------------------------------------------------------------------
1 | Implementation of Shoup's RSA-based threshold signature scheme.
2 |
3 |
4 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshsig/__init__.py:
--------------------------------------------------------------------------------
1 | from . import boldyreva
2 | from . import boldyreva_gipc
3 | from . import generate_keys
4 | from . import millerrabin
5 |
6 |
7 | __all__ = ["millerrabin", "boldyreva", "boldyreva_gipc", "generate_keys"]
8 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshsig/boldyreva.py:
--------------------------------------------------------------------------------
1 | """An implementation of (unique) threshold signatures based on
2 | Gap-Diffie-Hellman Boldyreva, 2002 https://eprint.iacr.org/2002/118.pdf
3 |
4 | Dependencies:
5 | Charm, http://jhuisi.github.io/charm/ a wrapper for PBC (Pairing
6 | based crypto)
7 |
8 | """
9 | from charm.toolbox.pairinggroup import PairingGroup, ZR, G1, G2, pair
10 | from base64 import encodestring, decodestring
11 | from operator import mul
12 | from functools import reduce
13 |
14 | # group = PairingGroup('SS512')
15 | # group = PairingGroup('MNT159')
16 | group = PairingGroup('MNT224')
17 |
18 |
19 | def serialize(g):
20 | """ """
21 | # Only work in G1 here
22 | return decodestring(group.serialize(g)[2:])
23 |
24 |
25 | def deserialize0(g):
26 | """ """
27 | # Only work in G1 here
28 | return group.deserialize(b'0:'+encodestring(g))
29 |
30 |
31 | def deserialize1(g):
32 | """ """
33 | # Only work in G1 here
34 | return group.deserialize(b'1:'+encodestring(g))
35 |
36 |
37 | def deserialize2(g):
38 | """ """
39 | # Only work in G1 here
40 | return group.deserialize(b'2:'+encodestring(g))
41 |
42 |
43 | g1 = group.hash('geng1', G1)
44 | g1.initPP()
45 | # g2 = g1
46 | g2 = group.hash('geng2', G2)
47 | g2.initPP()
48 | ZERO = group.random(ZR, seed=59)*0
49 | ONE = group.random(ZR, seed=60)*0+1
50 |
51 |
52 | def polynom_eval(x, coefficients):
53 | """Polynomial evaluation."""
54 | y = ZERO
55 | xx = ONE
56 | for coeff in coefficients:
57 | y += coeff * xx
58 | xx *= x
59 | return y
60 |
61 |
62 | class TBLSPublicKey(object):
63 | """ """
64 | def __init__(self, l, k, VK, VKs):
65 | """ """
66 | self.l = l # noqa: E741
67 | self.k = k
68 | self.VK = VK
69 | self.VKs = VKs
70 |
71 | def __getstate__(self):
72 | """ """
73 | d = dict(self.__dict__)
74 | d['VK'] = serialize(self.VK)
75 | d['VKs'] = list(map(serialize, self.VKs))
76 | return d
77 |
78 | def __setstate__(self, d):
79 | """ """
80 | self.__dict__ = d
81 | self.VK = deserialize2(self.VK)
82 | self.VKs = list(map(deserialize2, self.VKs))
83 | print("I'm being depickled")
84 |
85 | def lagrange(self, S, j):
86 | """ """
87 | # Assert S is a subset of range(0,self.l)
88 | assert len(S) == self.k
89 | assert type(S) is set
90 | assert S.issubset(range(0, self.l))
91 | S = sorted(S)
92 |
93 | assert j in S
94 | assert 0 <= j < self.l
95 | num = reduce(mul, [0 - jj - 1 for jj in S if jj != j], ONE)
96 | den = reduce(mul, [j - jj for jj in S if jj != j], ONE) # noqa: E272
97 | # assert num % den == 0
98 | return num / den
99 |
100 | def hash_message(self, m):
101 | """ """
102 | return group.hash(m, G1)
103 |
104 | def verify_share(self, sig, i, h):
105 | """ """
106 | assert 0 <= i < self.l
107 | B = self.VKs[i]
108 | assert pair(sig, g2) == pair(h, B)
109 | return True
110 |
111 | def verify_signature(self, sig, h):
112 | """ """
113 | assert pair(sig, g2) == pair(h, self.VK)
114 | return True
115 |
116 | def combine_shares(self, sigs):
117 | """ """
118 | # sigs: a mapping from idx -> sig
119 | S = set(sigs.keys())
120 | assert S.issubset(range(self.l))
121 |
122 | res = reduce(mul,
123 | [sig ** self.lagrange(S, j)
124 | for j, sig in sigs.items()], 1)
125 | return res
126 |
127 |
128 | class TBLSPrivateKey(TBLSPublicKey):
129 | """ """
130 |
131 | def __init__(self, l, k, VK, VKs, SK, i):
132 | """ """
133 | super(TBLSPrivateKey, self).__init__(l, k, VK, VKs)
134 | assert 0 <= i < self.l
135 | self.i = i
136 | self.SK = SK
137 |
138 | def sign(self, h):
139 | """ """
140 | return h ** self.SK
141 |
142 |
143 | def dealer(players=10, k=5, seed=None):
144 | """ """
145 | # Random polynomial coefficients
146 | a = group.random(ZR, count=k, seed=seed)
147 | assert len(a) == k
148 | secret = a[0]
149 |
150 | # Shares of master secret key
151 | SKs = [polynom_eval(i, a) for i in range(1, players+1)]
152 | assert polynom_eval(0, a) == secret
153 |
154 | # Verification keys
155 | VK = g2 ** secret
156 | VKs = [g2 ** xx for xx in SKs]
157 |
158 | public_key = TBLSPublicKey(players, k, VK, VKs)
159 | private_keys = [TBLSPrivateKey(players, k, VK, VKs, SK, i)
160 | for i, SK in enumerate(SKs)]
161 |
162 | # Check reconstruction of 0
163 | S = set(range(0, k))
164 | lhs = polynom_eval(0, a)
165 | rhs = sum(public_key.lagrange(S, j) * polynom_eval(j+1, a) for j in S)
166 | assert lhs == rhs
167 | # print i, 'ok'
168 |
169 | return public_key, private_keys
170 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshsig/boldyreva_gipc.py:
--------------------------------------------------------------------------------
1 | from .boldyreva import serialize, deserialize1
2 | import gipc
3 | import random
4 |
5 | if '_procs' in globals():
6 | for p, pipe in _procs:
7 | p.terminate()
8 | p.join()
9 | del _procs
10 | _procs = []
11 |
12 |
13 | def _worker(PK, pipe):
14 | (h, sigs) = pipe.get()
15 | sigs = dict(sigs)
16 | for s in sigs:
17 | sigs[s] = deserialize1(sigs[s])
18 | h = deserialize1(h)
19 | sig = PK.combine_shares(sigs)
20 | res = PK.verify_signature(sig, h)
21 | pipe.put((res, serialize(sig)))
22 |
23 |
24 | def worker_loop(PK, pipe):
25 | """ """
26 | while True:
27 | _worker(PK, pipe)
28 |
29 |
30 | myPK = None
31 |
32 |
33 | def initialize(PK, size=1):
34 | """ """
35 | global _procs, myPK
36 | myPK = PK
37 | _procs = []
38 | for s in range(size):
39 | (r, w) = gipc.pipe(duplex=True)
40 | p = gipc.start_process(worker_loop, args=(PK, r,))
41 | _procs.append((p, w))
42 |
43 |
44 | def combine_and_verify(h, sigs):
45 | """ """
46 | # return True # we are skipping the verification
47 | assert len(sigs) == myPK.k
48 | sigs = dict((s, serialize(v)) for s, v in sigs.items())
49 | h = serialize(h)
50 | # Pick a random process
51 | gipc_process, pipe = _procs[random.choice(range(len(_procs)))] # random.choice(_procs)
52 | pipe.put((h, sigs))
53 | (r, s) = pipe.get()
54 | assert r is True
55 | return s, gipc_process
56 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshsig/boldyreva_pool.py:
--------------------------------------------------------------------------------
1 | from .boldyreva import dealer, serialize, deserialize1
2 |
3 |
4 | _pool_PK = None
5 | _pool = None
6 |
7 |
8 | def initialize(PK):
9 | """ """
10 | from multiprocessing import Pool
11 | global _pool
12 | _pool = Pool()
13 | print('Pool started')
14 |
15 | global _pool_PK
16 | _pool_PK = PK
17 |
18 |
19 | def _combine_and_verify(h, sigs, pk=None):
20 | """ """
21 | global _pool_PK
22 | if pk is None:
23 | pk = PK # XXX PK is a global set in pool_test
24 | sigs = dict(sigs)
25 | for s in sigs:
26 | sigs[s] = deserialize1(sigs[s])
27 | h = deserialize1(h)
28 | sig = pk.combine_shares(sigs)
29 | print(pk.verify_signature(sig, h))
30 | return True
31 |
32 |
33 | def combine_and_verify(h, sigs):
34 | """ """
35 | assert len(sigs) == _pool_PK.k
36 | sigs = dict((s, serialize(v)) for s, v in sigs.items())
37 | h = serialize(h)
38 | promise = _pool.apply_async(
39 | _combine_and_verify, (h, sigs), {'pk': _pool_PK})
40 | assert promise.get() is True
41 |
42 |
43 | def pool_test():
44 | """ """
45 | global PK, SKs
46 | PK, SKs = dealer(players=64, k=17)
47 |
48 | global sigs, h
49 | sigs = {}
50 | h = PK.hash_message('hi')
51 | h.initPP()
52 | for SK in SKs:
53 | sigs[SK.i] = SK.sign(h)
54 |
55 | from multiprocessing import Pool
56 | pool = Pool()
57 | print('Pool started')
58 | import time
59 | sigs2 = dict((s, serialize(sigs[s])) for s in range(PK.k))
60 | _h = serialize(h)
61 |
62 | # Combine 100 times
63 | if 1:
64 | promises = [pool.apply_async(_combine_and_verify,
65 | (_h, sigs2))
66 | for i in range(100)]
67 | print('launched', time.time())
68 | for p in promises:
69 | assert p.get() is True
70 | print('done', time.time())
71 |
72 | # Combine 100 times
73 | if 1:
74 | print('launched', time.time())
75 | for i in range(100):
76 | _combine_and_verify(_h, sigs2)
77 | print('done', time.time())
78 |
79 | print('work done')
80 | pool.terminate()
81 | pool.join()
82 | print('ok')
83 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshsig/generate_keys.py:
--------------------------------------------------------------------------------
1 | from .boldyreva import dealer, serialize
2 | import argparse
3 | import pickle
4 |
5 |
6 | def _generate_keys(players, k):
7 | if k:
8 | k = int(k)
9 | else:
10 | k = players // 2 # N - 2 * t
11 | PK, SKs = dealer(players=players, k=k)
12 | return (PK.l, PK.k, serialize(PK.VK), [serialize(VKp) for VKp in PK.VKs],
13 | [(SK.i, serialize(SK.SK)) for SK in SKs])
14 |
15 |
16 | def main():
17 | """ """
18 | parser = argparse.ArgumentParser()
19 | parser.add_argument('players', help='The number of players')
20 | parser.add_argument('k', help='k')
21 | args = parser.parse_args()
22 | keys = _generate_keys(int(args.players), args.k)
23 | print(pickle.dumps(keys))
24 |
25 |
26 | if __name__ == '__main__':
27 | main()
28 |
--------------------------------------------------------------------------------
/honeybadgerbft/crypto/threshsig/millerrabin.py:
--------------------------------------------------------------------------------
1 | import random
2 | import math
3 |
4 |
5 | def generateLargePrime(k):
6 | # k is the desired bit length
7 | r = 100*(math.log(k, 2)+1) # number of attempts max
8 | r_ = r
9 | while r > 0:
10 | # randrange is mersenne twister and is completely deterministic
11 | # unusable for serious crypto purposes
12 | n = random.randrange(2**(k-1), 2**(k))
13 | r -= 1
14 | if is_probable_prime(n) is True:
15 | return n
16 | return "Failure after {} tries.".format(r_)
17 |
18 |
19 | _mrpt_num_trials = 50 # number of bases to test
20 |
21 |
22 | def is_probable_prime(n):
23 | """
24 | Miller-Rabin primality test.
25 |
26 | A return value of False means n is certainly not prime. A return value of
27 | True means n is very likely a prime.
28 |
29 | >>> is_probable_prime(1)
30 | Traceback (most recent call last):
31 | ...
32 | AssertionError
33 | >>> is_probable_prime(2)
34 | True
35 | >>> is_probable_prime(3)
36 | True
37 | >>> is_probable_prime(4)
38 | False
39 | >>> is_probable_prime(5)
40 | True
41 | >>> is_probable_prime(123456789)
42 | False
43 |
44 | >>> primes_under_1000 = [i for i in range(2, 1000) if is_probable_prime(i)]
45 | >>> len(primes_under_1000)
46 | 168
47 | >>> primes_under_1000[-10:]
48 | [937, 941, 947, 953, 967, 971, 977, 983, 991, 997]
49 |
50 | >>> is_probable_prime(6438080068035544392301298549614926991513861075340134\
51 | 3291807343952413826484237063006136971539473913409092293733259038472039\
52 | 7133335969549256322620979036686633213903952966175107096769180017646161\
53 | 851573147596390153)
54 | True
55 |
56 | >>> is_probable_prime(7438080068035544392301298549614926991513861075340134\
57 | 3291807343952413826484237063006136971539473913409092293733259038472039\
58 | 7133335969549256322620979036686633213903952966175107096769180017646161\
59 | 851573147596390153)
60 | False
61 | """
62 | assert n >= 2
63 | # special case 2
64 | if n == 2:
65 | return True
66 | # ensure n is odd
67 | if n % 2 == 0:
68 | return False
69 | # write n-1 as 2**s * d
70 | # repeatedly try to divide n-1 by 2
71 | s = 0
72 | d = n-1
73 | while True:
74 | quotient, remainder = divmod(d, 2)
75 | if remainder == 1:
76 | break
77 | s += 1
78 | d = quotient
79 | assert(2**s * d == n-1)
80 |
81 | # test the base a to see whether it is a witness for the compositeness of n
82 | def try_composite(a):
83 | if pow(a, d, n) == 1:
84 | return False
85 | for i in range(s):
86 | if pow(a, 2**i * d, n) == n-1:
87 | return False
88 | return True # n is definitely composite
89 |
90 | for i in range(_mrpt_num_trials):
91 | a = random.randrange(2, n)
92 | if try_composite(a):
93 | return False
94 |
95 | return True # no base tested showed n as composite
96 |
--------------------------------------------------------------------------------
/honeybadgerbft/exceptions.py:
--------------------------------------------------------------------------------
1 | class HoneybadgerbftError(Exception):
2 | """Base exception class."""
3 |
4 |
5 | class BroadcastError(HoneybadgerbftError):
6 | """Base class for broadcast errors."""
7 |
8 |
9 | class UnknownTagError(BroadcastError):
10 | """Raised when an unknown broadcast tag is detected."""
11 |
12 |
13 | class RedundantMessageError(BroadcastError):
14 | """Raised when a rdundant message is received."""
15 |
16 |
17 | class AbandonedNodeError(HoneybadgerbftError):
18 | """Raised when a node does not have enough peer to carry on a distirbuted task."""
19 |
--------------------------------------------------------------------------------
/misc/README:
--------------------------------------------------------------------------------
1 | Miscellaneous scripts and tools. Possibly snippets of unfinished ideas.
--------------------------------------------------------------------------------
/misc/shoup_tsig/generate_keys_shoup.py:
--------------------------------------------------------------------------------
1 | from shoup import *
2 | import argparse
3 | import cPickle
4 |
5 |
6 | def main():
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument('players', help='The number of players')
9 | parser.add_argument('k', help='k')
10 | args = parser.parse_args()
11 | players = int(args.players)
12 | k = int(args.k)
13 | PK, SKs = dealer(players=players, k=k)
14 | print cPickle.dumps((PK, SKs))
15 |
16 | if __name__ == '__main__':
17 | main()
18 |
--------------------------------------------------------------------------------
/misc/shoup_tsig/shoup.py:
--------------------------------------------------------------------------------
1 | # Practical Threshold Signatures [Shoup2000]
2 | # http://eprint.iacr.org/1999/011
3 | #
4 | # l: total number of parties
5 | # k: number of shares required to obtain signature
6 | # t: number of corruptible parties
7 | #
8 | # We only care about the case when k = t+1.
9 |
10 | import random
11 | import millerrabin
12 | import gmpy2
13 | import math
14 | from fractions import gcd
15 |
16 | # To generate safe primes:
17 | # $ openssl gendh 1024 | openssl dh -noout -text
18 |
19 | # https://tools.ietf.org/html/draft-ietf-tls-srp-07#ref-MODP
20 | safe_prime_1 = int(''.join("EEAF0AB9 ADB38DD6 9C33F80A FA8FC5E8 60726187 75FF3C0B 9EA2314C 9C256576 D674DF74 96EA81D3 383B4813 D692C6E0 E0D5D8E2 50B98BE4 8E495C1D 6089DAD1 5DC7D7B4 6154D6B6 CE8EF4AD 69B15D49 82559B29 7BCF1885 C529F566 660E57EC 68EDBC3C 05726CC0 2FD4CBF4 976EAA9A FD5138FE 8376435B 9FC61D2F C0EB06E3".split()),16)
21 |
22 | from Crypto.Hash import SHA256
23 | hash = lambda x: long(SHA256.new(x).hexdigest(),16)
24 |
25 | safe_prime_2 = int(''.join("""
26 | 00:d7:3a:cf:a2:50:7d:13:45:56:5c:cb:7a:8b:55:
27 | 9d:3d:59:86:d0:01:58:e7:77:1b:11:6e:a8:a9:0f:
28 | 6e:cc:46:d6:c2:e8:b6:1d:78:0d:4d:62:78:1d:3f:
29 | a2:2f:52:fc:6b:1b:47:61:68:4a:39:da:28:fd:d6:
30 | bb:fb:72:a5:ea:c9:a1:aa:8f:74:83:6d:97:71:11:
31 | 82:ec:13:b8:28:b9:ea:ca:40:29:3c:7c:90:3e:a2:
32 | 91:c9:59:05:c9:a4:fc:1f:b7:57:03:67:c4:28:e7:
33 | 0c:7d:2c:d9:bb:bc:cf:1a:e3:4f:7f:05:95:5e:79:
34 | 7c:ac:63:6a:16:31:c5:01:2b""".replace(':','').split()),16)
35 |
36 | def egcd(a, b):
37 | x,y, u,v = 0,1, 1,0
38 | while a != 0:
39 | q, r = b//a, b%a
40 | m, n = x-u*q, y-v*q
41 | b,a, x,y, u,v = a,r, u,v, m,n
42 | gcd = b
43 | return gcd, x, y
44 |
45 | def generate_safe_prime(bits):
46 | while True:
47 | print 'trying prime'
48 | p_ = millerrabin.generateLargePrime(bits-1)
49 | if type(p_) is str:
50 | print 'failed to find prime, trying again'
51 | continue
52 | p = 2*p_ + 1
53 | if millerrabin.is_probable_prime(p):
54 | return p
55 | else:
56 | 'not a safe prime, trying again'
57 |
58 | def random_Qn(n):
59 | # Generate a square in n
60 | x = random.randrange(0, n)
61 | return pow(x, 2, n)
62 |
63 | def dealer(bits=2048, players=10, k=5):
64 | #random.seed(1203103)
65 | global n, m, p, q, e, d, shares
66 | assert bits == 2048, 'need different parameters'
67 | p = safe_prime_1
68 | q = safe_prime_2
69 | assert p.bit_length() == q.bit_length() == 1024
70 |
71 | n = p*q # RSA modulus
72 | m = (p-1)/2 * (q-1)/2
73 |
74 | trapdoors = dict(p=p, q=q)
75 |
76 | # Public exponent
77 | e = millerrabin.generateLargePrime(players.bit_length()+1)
78 |
79 | # Compute d such that de == 1 mod m
80 | d = gmpy2.divm(1, e, m)
81 | assert (d*e) % m == 1
82 |
83 | public_key = (n,e)
84 | #print 'public_key', public_key
85 |
86 | trapdoor = dict(d=d, p=p, q=q)
87 |
88 | # Random polynomial coefficients
89 | a = [d]
90 | for i in range(1,k):
91 | a.append(random.randrange(0,m))
92 | assert len(a) == k
93 |
94 | # Polynomial evaluation
95 | def f(x):
96 | y = 0
97 | xx = 1
98 | for coeff in a:
99 | y += coeff * xx
100 | xx *= x
101 | return y
102 |
103 | # Shares of master secret key
104 | SKs = []
105 | for i in range(1,players+1):
106 | SKs.append(f(i))
107 |
108 | # Random quadratic residue
109 | VK = v = random_Qn(n)
110 |
111 | # Verification keys
112 | VKs = []
113 | for i in range(players):
114 | VKs.append(gmpy2.powmod(v, SKs[i], n))
115 |
116 | public_key = ShoupPublicKey(n, e, players, k, VK, VKs)
117 | secret_keys = [ShoupPrivateKey(n, e, players, k, VK, VKs, SK, i)
118 | for i, SK in enumerate(SKs,start=1)]
119 |
120 | for i in [0]:
121 | S = set(range(1,k+1))
122 | lhs = (public_key.Delta() * f(i)) % m
123 | rhs = sum(public_key.lambdaS(S,i,j) * f(j) for j in S) % m
124 | assert lhs == rhs
125 | #print i, 'ok'
126 |
127 | return public_key, secret_keys
128 |
129 | class ShoupPublicKey(object):
130 | def __init__(self, n, e, l, k, VK, VKs):
131 | self.n = n
132 | self.e = e
133 | self.l = l
134 | self.k = k
135 | self.VK = VK
136 | self.VKs = VKs
137 |
138 | def lambdaS(self, S, i, j):
139 | # Assert S is a subset of range(0,self.l)
140 | assert len(S) == self.k
141 | assert type(S) is set
142 | assert S.issubset(range(1,self.l+1))
143 | S = sorted(S)
144 |
145 | assert i not in S
146 | assert 0 <= i <= self.l
147 | assert j in S
148 | assert 1 <= j <= self.l
149 | mul = lambda a,b: a*b
150 | num = reduce(mul, [i - jj for jj in S if jj != j], 1)
151 | den = reduce(mul, [j - jj for jj in S if jj != j], 1)
152 | assert (self.Delta()*num) % den == 0
153 | return self.Delta() * num / den
154 |
155 | def Delta(self):
156 | return math.factorial(self.l)
157 |
158 | def combine_shares(self, m, sigs):
159 | # sigs: a mapping from idx -> sig
160 | S = set(sigs.keys())
161 | assert S.issubset(range(1, self.l+1))
162 |
163 | x = hash(m)
164 |
165 | #def ppow(x, e, n):
166 | # if e >= 0: return pow(x, e, n)
167 | # else:
168 | # x_inv = long(gmpy.divm(1, x, n))
169 | # r = pow(x_inv, -e, n)
170 | # return r
171 | def ppow(x, e, n): return gmpy2.powmod(x,e,n)
172 |
173 | w = 1L
174 | for i,sig in sigs.iteritems():
175 | w = (w * ppow(sig, 2*self.lambdaS(S,0,i), self.n)) % self.n
176 |
177 | ep = 4*self.Delta()**2
178 |
179 | #assert pow(w, e, self.n) == pow(x, ep, self.n)
180 | assert gcd(ep, self.e) == 1
181 |
182 | _, a, b = egcd(ep, self.e)
183 | y = (ppow(w, a, self.n) *
184 | ppow(x, b, self.n)) % self.n
185 |
186 | #assert self.verify_signature(y, m)
187 | return y
188 |
189 | def verify_signature(self, sig, m):
190 | y = sig
191 | x = hash(m)
192 | assert x == gmpy2.powmod(y, self.e, self.n)
193 | return True
194 |
195 | class ShoupPrivateKey(ShoupPublicKey):
196 | def __init__(self, n, e, l, k, VK, VKs, SK, i):
197 | super(ShoupPrivateKey,self).__init__(n, e, l, k, VK, VKs)
198 | assert 1 <= i <= self.l
199 | self.i = i
200 | self.SK = SK
201 |
202 | def sign(self, m):
203 | # Generates a signature share on m
204 | x = hash(m)
205 | return gmpy2.powmod(x, 2*self.Delta()*self.SK, self.n)
206 |
207 | def test():
208 | global PK, SKs
209 | PK, SKs = dealer(players=100,k=35)
210 |
211 | global sigs
212 | sigs = {}
213 | for SK in SKs:
214 | sigs[SK.i] = SK.sign('hi')
215 |
216 | SS = range(1,PK.l+1)
217 | for i in range(20):
218 | random.shuffle(SS)
219 | S = set(SS[:PK.k])
220 | sig = PK.combine_shares('hi', dict((s,sigs[s]) for s in S))
221 | assert PK.verify_signature(sig, 'hi')
222 |
--------------------------------------------------------------------------------
/misc/zfec_benchmark.py:
--------------------------------------------------------------------------------
1 | ##########
2 |
3 | import zfec
4 | import time
5 | import os
6 | import math
7 |
8 | def ceil(x):
9 | return int(math.ceil(x))
10 |
11 | def testEncoder(N, t, buf, Threshold, enc):
12 | step = len(buf) % Threshold == 0 and len(buf) / Threshold or (len(buf) / Threshold + 1)
13 | buf = buf.ljust(step * Threshold, '\xFF')
14 | fragList = [buf[i*step:(i+1)*step] for i in range(Threshold)]
15 | return enc.encode(fragList)
16 |
17 | def main():
18 | for i in range(2, 8):
19 | N = 2**i
20 | t = 2**(i-2)
21 | Threshold = ceil((N-t+1)/2.0)
22 | zfecEncoder = zfec.Encoder(Threshold, N)
23 | zfecDecoder = zfec.Decoder(Threshold, N)
24 | for j in range(9, 12):
25 | Tx = os.urandom((2**j) * 250)
26 | start = time.time()
27 | fragList = testEncoder(N, t, Tx, Threshold, zfecEncoder)
28 | print N, t, 2**j, 'encode', time.time() - start
29 | start = time.time()
30 | zfecDecoder.decode(fragList[:Threshold], range(Threshold))
31 | print N, t, 2**j, 'decode', time.time() - start
32 |
33 | main()
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | testpaths = test/
3 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max_line_length = 99
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | Most fault tolerant protocols (including RAFT, PBFT, Zyzzyva, Q/U)
5 | don't guarantee good performance when there are Byzantine faults. Even
6 | the so-called "robust" BFT protocols (like UpRight, RBFT, Prime,
7 | Spinning, and Stellar) have various hard-coded timeout parameters, and
8 | can only guarantee performance when the network behaves approximately as
9 | expected - hence they are best suited to well-controlled settings like
10 | corporate data centers.
11 |
12 | HoneyBadgerBFT is fault tolerance for the wild wild wide-area-network.
13 | HoneyBadger nodes can even stay hidden behind anonymizing relays like
14 | Tor, and the purely-asynchronous protocol will make progress at whatever
15 | rate the network supports.
16 |
17 | """
18 | from setuptools import setup
19 |
20 |
21 | install_requires = [
22 | 'enum34', # TODO post python3 port: remove
23 | 'gevent',
24 | 'gmpy2',
25 | 'pysocks',
26 | 'pycrypto',
27 | 'ecdsa',
28 | 'zfec>=1.5.0',
29 | 'gipc',
30 | 'coincurve',
31 | ]
32 |
33 | tests_require = [
34 | 'coverage',
35 | 'flake8',
36 | 'logutils',
37 | 'pytest',
38 | 'pytest-cov',
39 | 'pytest-mock',
40 | 'pytest-sugar',
41 | 'nose2',
42 | ]
43 |
44 | dev_require = [
45 | 'ipdb',
46 | 'ipython',
47 | ]
48 |
49 | docs_require = [
50 | 'Sphinx',
51 | 'sphinx-autobuild',
52 | 'sphinx_rtd_theme',
53 | ]
54 |
55 | setup(
56 | name='honeybadgerbft',
57 | version='0.1.0',
58 | description='The Honey Badger of BFT Protocols',
59 | long_description=__doc__,
60 | author="Andrew Miller et al.",
61 | url='https://github.com/amiller/HoneyBadgerBFT',
62 | packages=['honeybadgerbft'],
63 | package_dir={'honeybadgerbft': 'honeybadgerbft'},
64 | include_package_data=True,
65 | install_requires=install_requires,
66 | license='CRAPL',
67 | zip_safe=False,
68 | keywords='distributed systems, cryptography, byzantine fault tolerance',
69 | classifiers=[
70 | 'Development Status :: 3 - Alpha',
71 | 'Intended Audience :: Developers',
72 | 'Natural Language :: English',
73 | 'Programming Language :: Python :: 3 :: Only',
74 | 'Programming Language :: Python :: 3.6',
75 | ],
76 | python_requires='>=3.6',
77 | test_suite='tests',
78 | extras_require={
79 | 'test': tests_require,
80 | 'dev': dev_require + tests_require + docs_require,
81 | 'docs': docs_require,
82 | },
83 | )
84 |
--------------------------------------------------------------------------------
/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python -m honeybadgerbft.crypto.threshsig.generate_keys $N $(( t+1 )) > thsig$((N))_$((t)).keys
4 | python -m honeybadgerbft.crypto.ecdsa.generate_keys_ecdsa $N >ecdsa.keys
5 | python -m honeybadgerbft.crypto.threshenc.generate_keys $N $(( N-2*t )) > thenc$((N))_$((t)).keys
6 |
7 | # FIXME: this is outdated:
8 | python -m experiments.honest_party_test -k thsig$((N))_$((t)).keys -e ecdsa.keys -b $B -n $N -t $t -c thenc$((N))_$((t)).keys
9 |
--------------------------------------------------------------------------------
/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | nose2
4 |
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/initc3/HoneyBadgerBFT-Python/e8bcbc081dfb5d1e7298039d47bbebf7048b8e62/test/__init__.py
--------------------------------------------------------------------------------
/test/conftest.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from logutils.colorize import ColorizingStreamHandler
4 |
5 | import pytest
6 |
7 |
8 | def pytest_collection_modifyitems(config, items):
9 | if config.getoption('-m') == 'demo':
10 | # do not skip demo tests
11 | return
12 | skip_demo = pytest.mark.skip(reason='need "-m demo" option to run')
13 | for item in items:
14 | if 'demo' in item.keywords:
15 | item.add_marker(skip_demo)
16 |
17 |
18 | class BadgerColoredLogs(ColorizingStreamHandler):
19 |
20 | nodeid_map = {
21 | 0: (None, 'green', False),
22 | 1: (None, 'cyan', False),
23 | 2: (None, 'blue', False),
24 | 3: (None, 'magenta', False),
25 | }
26 |
27 | def colorize(self, message, record):
28 | """
29 | Colorize a message for a logging event.
30 |
31 | This implementation uses the ``level_map`` class attribute to
32 | map the LogRecord's level to a colour/intensity setting, which is
33 | then applied to the whole message.
34 |
35 | :param message: The message to colorize.
36 | :param record: The ``LogRecord`` for the message.
37 | """
38 | if record.nodeid in self.nodeid_map:
39 | bg, fg, bold = self.nodeid_map[record.nodeid]
40 | params = []
41 | if bg in self.color_map:
42 | params.append(str(self.color_map[bg] + 40))
43 | if fg in self.color_map:
44 | params.append(str(self.color_map[fg] + 30))
45 | if bold:
46 | params.append('1')
47 | if params:
48 | message = ''.join((self.csi, ';'.join(params),
49 | 'm', message, self.reset))
50 | return message
51 |
52 |
53 | logging.basicConfig(
54 | format='node %(nodeid)s|round %(epoch)s> %(module)s:%(funcName)s (%(lineno)d) %(message)s',
55 | level=logging.DEBUG,
56 | handlers=[BadgerColoredLogs()],
57 | )
58 |
--------------------------------------------------------------------------------
/test/crypto/conftest.py:
--------------------------------------------------------------------------------
1 | from charm.toolbox.pairinggroup import PairingGroup, ZR, G2
2 | from pytest import fixture
3 |
4 |
5 | @fixture
6 | def pairing_group_MNT224():
7 | return PairingGroup('MNT224')
8 |
9 | @fixture
10 | def pairing_group_SS512():
11 | return PairingGroup('SS512')
12 |
13 |
14 | @fixture
15 | def pairing_group(request):
16 | curve = request.param
17 | return PairingGroup(curve)
18 |
19 |
20 | @fixture
21 | def g():
22 | return (
23 | b'm\n\x9f\xcc\xb8\xd9(4\x07\xee\xcd\xdeF\xf9\x14\x1c^\\&\x02\xff'
24 | b'\xbd\x8e\xaa\x99\xe9b{\xb7\xa1\xa2\x90&hA\xe7\xf0\xc69\x139\xcc'
25 | b'\xd4\xfbz\xcd\xd1\x14 {\x88w\x11\xae\x04&k2\xeea\x8f\xbe\x91W\x00'
26 | )
27 |
28 |
29 | @fixture
30 | def g2_mnt224(pairing_group_MNT224):
31 | g2 = pairing_group_MNT224.hash('geng2', G2)
32 | g2.initPP()
33 | return g2
34 |
35 |
36 | @fixture(params=({'count': 5, 'seed': None},))
37 | def polynomial_coefficients(request, pairing_group_MNT224):
38 | return pairing_group_MNT224.random(ZR, **request.param)
39 |
40 |
41 | @fixture(params=(10,))
42 | def sks(request, polynomial_coefficients):
43 | from honeybadgerbft.crypto.threshsig.boldyreva import polynom_eval
44 | players = request.param
45 | return [polynom_eval(i, polynomial_coefficients)
46 | for i in range(1, players+1)]
47 |
48 |
49 | @fixture
50 | def vk(g2_mnt224, polynomial_coefficients):
51 | return g2_mnt224 ** polynomial_coefficients[0]
52 |
53 |
54 | @fixture
55 | def vks(g2_mnt224, sks):
56 | return [g2_mnt224 ** sk for sk in sks]
57 |
58 |
59 | @fixture
60 | def tbls_public_key(vk, vks):
61 | from honeybadgerbft.crypto.threshsig.boldyreva import TBLSPublicKey
62 | players = 10 # TODO bind to fixtures
63 | count = 5 # TODO bind to fixtures
64 | return TBLSPublicKey(players, count, vk, vks)
65 |
66 |
67 | @fixture
68 | def tbls_private_keys(vk, vks, sks):
69 | from honeybadgerbft.crypto.threshsig.boldyreva import TBLSPrivateKey
70 | players = 10 # TODO bind to fixtures
71 | count = 5 # TODO bind to fixtures
72 | return [TBLSPrivateKey(players, count, vk, vks, sk, i)
73 | for i, sk in enumerate(sks)]
74 |
75 |
76 | @fixture
77 | def serialized_tbls_public_key_dict(tbls_public_key):
78 | from honeybadgerbft.crypto.threshsig.boldyreva import serialize
79 | return {
80 | 'l': tbls_public_key.l,
81 | 'k': tbls_public_key.k,
82 | 'VK': serialize(tbls_public_key.VK),
83 | 'VKs': [serialize(vk) for vk in tbls_public_key.VKs],
84 | }
85 |
--------------------------------------------------------------------------------
/test/crypto/ecdsa/test_generate_keys_ecdsa.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 |
3 | from pytest import mark
4 |
5 |
6 | def test_generate_key_list():
7 | from honeybadgerbft.crypto.ecdsa.generate_keys_ecdsa import generate_key_list
8 | keylist = generate_key_list(10)
9 | assert len(keylist) == 10
10 |
11 |
12 | def test_main(monkeypatch):
13 | from honeybadgerbft.crypto.ecdsa.generate_keys_ecdsa import main
14 |
15 | def mock_parse_args(players):
16 | Args = namedtuple('Args', ('players',))
17 | args = Args(players)
18 | return args
19 |
20 | monkeypatch.setattr('argparse.ArgumentParser.parse_args', lambda s: mock_parse_args(10))
21 | main()
22 |
--------------------------------------------------------------------------------
/test/crypto/threshenc/test_tpke.py:
--------------------------------------------------------------------------------
1 | from base64 import encodestring, decodestring
2 | from random import shuffle
3 |
4 | from Crypto.Hash import SHA256
5 | from charm.toolbox.pairinggroup import PairingGroup, ZR, G1, G2, pair
6 | from pytest import mark
7 |
8 |
9 | def test_tpke():
10 | from honeybadgerbft.crypto.threshenc.tpke import dealer
11 | PK, SKs = dealer(players=100, k=35)
12 |
13 | m = SHA256.new(b'how').digest()
14 | ciphertext = PK.encrypt(m)
15 | U, V, W = ciphertext
16 | assert PK.verify_ciphertext(U, V, W)
17 |
18 | shares = [sk.decrypt_share(U, V, W) for sk in SKs]
19 | for i, share in enumerate(shares):
20 | assert PK.verify_share(i, share, U, V, W)
21 |
22 | SS = list(range(PK.l))
23 | for i in range(1):
24 | shuffle(SS)
25 | S = set(SS[:PK.k])
26 | m_ = PK.combine_shares(U, V, W, dict((s, shares[s]) for s in S))
27 | assert m_ == m
28 |
29 |
30 | def test_ciphertext_generation():
31 | from honeybadgerbft.crypto.threshenc.tpke import TPKEPublicKey
32 | players = 10
33 | k = 5
34 | group = PairingGroup('SS512')
35 | ZERO = group.random(ZR)*0
36 | ONE = group.random(ZR)*0 + 1
37 | g1 = group.hash('geng1', G1)
38 | g1.initPP()
39 | g2 = g1
40 |
41 | coefficients = [group.random(ZR) for _ in range(k)]
42 | secret = coefficients[0]
43 |
44 | # Polynomial evaluation
45 | def f(x):
46 | y = ZERO
47 | xx = ONE
48 | for coeff in coefficients:
49 | y += coeff * xx
50 | xx *= x
51 | return y
52 |
53 | # Shares of master secret key
54 | SKs = [f(i) for i in range(1, players+1)]
55 | assert f(0) == secret
56 |
57 | # Verification keys
58 | VK = g2 ** secret
59 | VKs = [g2 ** xx for xx in SKs]
60 |
61 | public_key = TPKEPublicKey(players, k, VK, VKs)
62 | message_digest = SHA256.new(b'abc123').digest()
63 | ciphertext = public_key.encrypt(message_digest)
64 | U, V, W = ciphertext
65 |
66 | assert len(V) == 32
67 | UV = decodestring(group.serialize(U)[2:]) + V
68 | H = group.hash(UV, G2)
69 | assert pair(g1, W) == pair(U, H)
70 |
71 |
72 | def test_xor():
73 | from honeybadgerbft.crypto.threshenc.tpke import xor
74 | x = (b'l\xa1=R\xcap\xc8\x83\xe0\xf0\xbb\x10\x1eBZ\x89'
75 | b'\xe8bM\xe5\x1d\xb2\xd29%\x93\xafj\x84\x11\x80\x90')
76 | y = (b'\xb2\xdf\xfeQ3 J7H\xe8yU6S\x05zU\x85\xd3'
77 | b'\xc1o\xa8E\xa9\xef\x02\x98\x05\xe46\xbf\x9c')
78 | expected_result = (b"\xde~\xc3\x03\xf9P\x82\xb4\xa8\x18\xc2E(\x11_"
79 | b"\xf3\xbd\xe7\x9e$r\x1a\x97\x90\xca\x917o`'?\x0c")
80 | assert xor(x, y) == expected_result
81 |
82 |
83 | @mark.parametrize('n', (0, 1, 2))
84 | @mark.parametrize('pairing_group', ('SS512',), indirect=('pairing_group',))
85 | def test_deserialize(pairing_group, n, g):
86 | from honeybadgerbft.crypto.threshenc import tpke
87 | deserialize_func = getattr(tpke, 'deserialize{}'.format(n))
88 | base64_encoded_data = '{}:{}'.format(n, encodestring(g).decode())
89 | assert (deserialize_func(g) ==
90 | pairing_group.deserialize(base64_encoded_data.encode()))
91 |
--------------------------------------------------------------------------------
/test/crypto/threshsig/test_boldyreva.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import random
3 | from base64 import encodestring
4 |
5 | from charm.core.math.pairing import pc_element
6 | from pytest import mark
7 |
8 | from honeybadgerbft.crypto.threshsig.boldyreva import dealer
9 |
10 |
11 | class TestTBLSPublicKey:
12 |
13 | def test_init(self, vk, vks):
14 | from honeybadgerbft.crypto.threshsig.boldyreva import TBLSPublicKey
15 | players = 10 # TODO bind to fixtures
16 | count = 5 # TODO bind to fixtures
17 | public_key = TBLSPublicKey(players, count, vk, vks)
18 | assert public_key.l == players
19 | assert public_key.k == count
20 | assert public_key.VK == vk
21 | assert public_key.VKs == vks
22 |
23 | def test_getstate(self, tbls_public_key, serialized_tbls_public_key_dict):
24 | original_dict = tbls_public_key.__dict__.copy()
25 | state_dict = tbls_public_key.__getstate__()
26 | assert len(state_dict) == len(serialized_tbls_public_key_dict)
27 | assert state_dict['k'] == serialized_tbls_public_key_dict['k']
28 | assert state_dict['l'] == serialized_tbls_public_key_dict['l']
29 | assert state_dict['VK'] == serialized_tbls_public_key_dict['VK']
30 | assert state_dict['VKs'] == serialized_tbls_public_key_dict['VKs']
31 | assert tbls_public_key.__dict__ == original_dict
32 |
33 | def test_setstate(self, tbls_public_key, serialized_tbls_public_key_dict):
34 | from honeybadgerbft.crypto.threshsig.boldyreva import TBLSPublicKey
35 | unset_public_key = TBLSPublicKey(None, None, None, None)
36 | unset_public_key.__setstate__(serialized_tbls_public_key_dict)
37 | assert len(unset_public_key.__dict__) == len(tbls_public_key.__dict__)
38 | assert unset_public_key.__dict__['k'] == tbls_public_key.__dict__['k']
39 | assert unset_public_key.__dict__['l'] == tbls_public_key.__dict__['l']
40 | assert unset_public_key.__dict__['VK'] == tbls_public_key.__dict__['VK']
41 | assert unset_public_key.__dict__['VKs'] == tbls_public_key.__dict__['VKs']
42 |
43 | def test_pickling_and_unpickling(self, tbls_public_key):
44 | pickled_obj = pickle.dumps(tbls_public_key)
45 | unpickled_obj = pickle.loads(pickled_obj)
46 | assert unpickled_obj.__dict__ == tbls_public_key.__dict__
47 |
48 |
49 | def test_boldyreva():
50 | global PK, SKs
51 | PK, SKs = dealer(players=16,k=5)
52 |
53 | global sigs,h
54 | sigs = {}
55 | h = PK.hash_message('hi')
56 | h.initPP()
57 |
58 | for SK in SKs:
59 | sigs[SK.i] = SK.sign(h)
60 |
61 | SS = list(range(PK.l))
62 | for i in range(10):
63 | random.shuffle(SS)
64 | S = set(SS[:PK.k])
65 | sig = PK.combine_shares(dict((s,sigs[s]) for s in S))
66 | assert PK.verify_signature(sig, h)
67 |
68 |
69 | @mark.parametrize('n', (0, 1, 2))
70 | def test_deserialize_arg(n, g, mocker):
71 | from honeybadgerbft.crypto.threshsig import boldyreva
72 | mocked_deserialize = mocker.patch.object(
73 | boldyreva.group, 'deserialize', autospec=True)
74 | deserialize_func = getattr(boldyreva, 'deserialize{}'.format(n))
75 | base64_encoded_data = '{}:{}'.format(n, encodestring(g).decode())
76 | deserialize_func(g)
77 | mocked_deserialize.assert_called_once_with(base64_encoded_data.encode())
78 |
--------------------------------------------------------------------------------
/test/crypto/threshsig/test_boldyreva_gipc.py:
--------------------------------------------------------------------------------
1 | import time
2 | from importlib import reload
3 |
4 | import gevent
5 | import gipc
6 |
7 | from pytest import raises
8 |
9 |
10 | def test_worker(tbls_public_key, tbls_private_keys):
11 | from honeybadgerbft.crypto.threshsig.boldyreva_gipc import _worker
12 | from honeybadgerbft.crypto.threshsig.boldyreva import serialize, deserialize1
13 | r_pipe, w_pipe = gipc.pipe(duplex=True)
14 | h = tbls_public_key.hash_message('hi')
15 | h.initPP()
16 | signature_shares = {sk.i: sk.sign(h) for sk in tbls_private_keys}
17 | serialized_h = serialize(h)
18 | serialized_signature_shares = {
19 | k: serialize(v) for k, v in signature_shares.items()
20 | if k in list(signature_shares.keys())[:tbls_public_key.k]
21 | }
22 | w_pipe.put((serialized_h, serialized_signature_shares))
23 | _worker(tbls_public_key, r_pipe)
24 | siganture_verification_result, serialized_signature = w_pipe.get()
25 | assert siganture_verification_result is True
26 | deserialized_signature_shares = {
27 | k: deserialize1(v) for k, v in serialized_signature_shares.items()}
28 | expected_serialized_signature = serialize(
29 | tbls_public_key.combine_shares(deserialized_signature_shares))
30 | assert serialized_signature == expected_serialized_signature
31 |
32 |
33 | def test_worker_loop(mocker, tbls_public_key):
34 | from honeybadgerbft.crypto.threshsig import boldyreva_gipc
35 | mocked_worker = mocker.patch.object(
36 | boldyreva_gipc, '_worker', autospec=True)
37 | max_calls = 3
38 | mocked_worker.side_effect = ErrorAfter(max_calls)
39 | r_pipe, _ = gipc.pipe(duplex=True)
40 | with raises(CallableExhausted) as err:
41 | boldyreva_gipc.worker_loop(tbls_public_key, r_pipe)
42 | mocked_worker.call_count == max_calls + 1
43 | mocked_worker.assert_called_with(tbls_public_key, r_pipe)
44 |
45 |
46 | def test_pool():
47 | from honeybadgerbft.crypto.threshsig.boldyreva import dealer
48 | from honeybadgerbft.crypto.threshsig import boldyreva_gipc
49 | from honeybadgerbft.crypto.threshsig.boldyreva_gipc import (
50 | initialize, combine_and_verify)
51 | global PK, SKs
52 | PK, SKs = dealer(players=64, k=17)
53 |
54 | global sigs,h
55 | sigs = {}
56 | h = PK.hash_message('hi')
57 | h.initPP()
58 | for SK in SKs:
59 | sigs[SK.i] = SK.sign(h)
60 |
61 | assert not boldyreva_gipc._procs
62 | initialize(PK)
63 | assert boldyreva_gipc._procs
64 |
65 | sigs = dict(list(sigs.items())[:PK.k])
66 |
67 | # Combine 100 times
68 | if 1:
69 | #promises = [pool.apply_async(_combine_and_verify,
70 | # (_h, sigs2))
71 | # for i in range(100)]
72 | threads = []
73 | for i in range(3):
74 | threads.append(gevent.spawn(combine_and_verify, h, sigs))
75 | print('launched', time.time())
76 | greenlets = gevent.joinall(threads, timeout=3)
77 | #for p in promises: assert p.get() == True
78 | for greenlet in greenlets:
79 | assert greenlet.value[0] # TODO check the value
80 | process = greenlet.value[1]
81 | process.terminate()
82 | process.join()
83 | print('done', time.time())
84 |
85 | # Combine 100 times
86 | if 0:
87 | print('launched', time.time())
88 | for i in range(10):
89 | # XXX Since _combine_and_verify is not defined, use
90 | # combine_and_verify instead, although not sure if that was the
91 | # initial intention.
92 | #_combine_and_verify(_h, sigs2)
93 | combine_and_verify(_h, sigs2)
94 | print('done', time.time())
95 |
96 | print('work done')
97 | assert boldyreva_gipc._procs
98 | reload(boldyreva_gipc)
99 | assert not boldyreva_gipc._procs
100 |
101 |
102 | class ErrorAfter(object):
103 | """Callable that will raise ``CallableExhausted``
104 | exception after ``limit`` calls.
105 |
106 | credit: Igor Sobreira
107 | http://igorsobreira.com/2013/03/17/testing-infinite-loops.html
108 | """
109 | def __init__(self, limit):
110 | self.limit = limit
111 | self.calls = 0
112 |
113 | def __call__(self, x, y):
114 | self.calls += 1
115 | if self.calls > self.limit:
116 | raise CallableExhausted
117 |
118 |
119 | class CallableExhausted(Exception):
120 | pass
121 |
--------------------------------------------------------------------------------
/test/crypto/threshsig/test_boldyreva_pool.py:
--------------------------------------------------------------------------------
1 | from multiprocessing.pool import Pool
2 |
3 | from pytest import mark
4 |
5 |
6 | def test_initialize(tbls_public_key):
7 | from honeybadgerbft.crypto.threshsig.boldyreva_pool import (
8 | initialize, _pool, _pool_PK)
9 | assert _pool is None
10 | assert _pool_PK is None
11 | initialize(tbls_public_key)
12 | from honeybadgerbft.crypto.threshsig.boldyreva_pool import _pool, _pool_PK
13 | assert isinstance(_pool, Pool)
14 | assert _pool_PK == tbls_public_key
15 | _pool.terminate()
16 |
17 |
18 | def test_combine_and_verify(tbls_public_key, tbls_private_keys):
19 | from honeybadgerbft.crypto.threshsig.boldyreva_pool import (
20 | initialize, combine_and_verify)
21 | h = tbls_public_key.hash_message('hi')
22 | h.initPP()
23 | signature_shares = {sk.i: sk.sign(h) for sk in tbls_private_keys}
24 | signature_shares = {
25 | k: v for k, v in signature_shares.items()
26 | if k in list(signature_shares.keys())[:tbls_public_key.k]
27 | }
28 | initialize(tbls_public_key)
29 | from honeybadgerbft.crypto.threshsig.boldyreva_pool import _pool
30 | combine_and_verify(h, signature_shares)
31 | _pool.terminate()
32 |
33 |
34 | def test__combine_and_verify(tbls_public_key, tbls_private_keys):
35 | from honeybadgerbft.crypto.threshsig.boldyreva import serialize
36 | from honeybadgerbft.crypto.threshsig.boldyreva_pool import _combine_and_verify
37 | h = tbls_public_key.hash_message('hi')
38 | h.initPP()
39 | serialized_h = serialize(h)
40 | signature_shares = {sk.i: sk.sign(h) for sk in tbls_private_keys}
41 | serialized_signature_shares = {
42 | k: serialize(v) for k, v in signature_shares.items()
43 | if k in list(signature_shares.keys())[:tbls_public_key.k]
44 | }
45 | _combine_and_verify(
46 | serialized_h, serialized_signature_shares, pk=tbls_public_key)
47 |
48 |
49 | def test_pool():
50 | from honeybadgerbft.crypto.threshsig.boldyreva_pool import pool_test
51 | pool_test()
52 |
--------------------------------------------------------------------------------
/test/crypto/threshsig/test_generate_enckeys.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 |
3 | from pytest import mark
4 |
5 |
6 | @mark.parametrize('k', (None, 3))
7 | def test_generate_keys(k):
8 | from honeybadgerbft.crypto.threshenc.generate_keys import _generate_keys
9 | keys = _generate_keys(10, k)
10 | assert len(keys) == 5
11 |
12 |
13 | def test_main(monkeypatch):
14 | from honeybadgerbft.crypto.threshenc.generate_keys import main
15 |
16 | def mock_parse_args(players, k):
17 | Args = namedtuple('Args', ('players', 'k'))
18 | args = Args(players, k)
19 | return args
20 |
21 | monkeypatch.setattr('argparse.ArgumentParser.parse_args', lambda s: mock_parse_args(10, 4))
22 | main()
23 |
--------------------------------------------------------------------------------
/test/crypto/threshsig/test_generate_sigkeys.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 |
3 | from pytest import mark
4 |
5 |
6 | @mark.parametrize('k', (None, 3))
7 | def test_generate_keys(k):
8 | from honeybadgerbft.crypto.threshsig.generate_keys import _generate_keys
9 | keys = _generate_keys(10, k)
10 | assert len(keys) == 5
11 |
12 |
13 | def test_main(monkeypatch):
14 | from honeybadgerbft.crypto.threshsig.generate_keys import main
15 |
16 | def mock_parse_args(players, k):
17 | Args = namedtuple('Args', ('players', 'k'))
18 | args = Args(players, k)
19 | return args
20 |
21 | monkeypatch.setattr('argparse.ArgumentParser.parse_args', lambda s: mock_parse_args(10, 4))
22 | main()
23 |
--------------------------------------------------------------------------------
/test/crypto/threshsig/test_millerrabin.py:
--------------------------------------------------------------------------------
1 | from pytest import mark, raises
2 |
3 |
4 | @mark.parametrize('n', (-1, 0, 1))
5 | def test_is_probable_prime_raises(n):
6 | from honeybadgerbft.crypto.threshsig.millerrabin import is_probable_prime
7 | with raises(AssertionError):
8 | is_probable_prime(n)
9 |
10 |
11 | @mark.parametrize('n,is_prime', (
12 | (2, True),
13 | (3, True),
14 | (4, False),
15 | (5, True),
16 | (123456789, False),
17 | (int('64380800680355443923012985496149269915138610753401343'
18 | '29180734395241382648423706300613697153947391340909229'
19 | '37332590384720397133335969549256322620979036686633213'
20 | '903952966175107096769180017646161851573147596390153'), True),
21 | (int('74380800680355443923012985496149269915138610753401343'
22 | '29180734395241382648423706300613697153947391340909229'
23 | '37332590384720397133335969549256322620979036686633213'
24 | '903952966175107096769180017646161851573147596390153'), False),
25 | ))
26 | def test_is_probable_prime(n, is_prime):
27 | from honeybadgerbft.crypto.threshsig.millerrabin import is_probable_prime
28 | assert is_probable_prime(n) is is_prime
29 |
30 |
31 | def test_is_probable_prime_under_1000():
32 | from honeybadgerbft.crypto.threshsig.millerrabin import is_probable_prime
33 | primes_under_1000 = [i for i in range(2, 1000) if is_probable_prime(i)]
34 | assert len(primes_under_1000) == 168
35 | assert primes_under_1000[-10:] == [937, 941, 947, 953, 967,
36 | 971, 977, 983, 991, 997]
37 |
38 |
39 | @mark.parametrize('bit_length', range(12, 120, 12))
40 | def test_generate_large_prime(bit_length):
41 | from honeybadgerbft.crypto.threshsig.millerrabin import generateLargePrime
42 | assert generateLargePrime(bit_length)
43 |
44 |
45 | def test_generate_large_prime_fails(monkeypatch):
46 | from honeybadgerbft.crypto.threshsig import millerrabin
47 | monkeypatch.setattr(millerrabin, 'is_probable_prime', lambda k: False)
48 | assert millerrabin.generateLargePrime(1) == 'Failure after 100.0 tries.'
49 |
--------------------------------------------------------------------------------
/test/demo_attack_issue59.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import gevent
4 | from gevent.queue import Queue
5 | from pytest import mark
6 |
7 |
8 | @mark.demo
9 | def test_issue59_attack_demo(mocker, monkeypatch):
10 | from .byzantine import byz_ba_issue_59, broadcast_router
11 | from .test_binaryagreement import _make_coins
12 | from honeybadgerbft.core import binaryagreement
13 |
14 | def mocked_conf_message_receiver(**kwargs):
15 | pass
16 |
17 | def mocked_conf_phase_handler(**kwargs):
18 | return kwargs['values']
19 |
20 | monkeypatch.setattr(
21 | binaryagreement, 'handle_conf_messages', mocked_conf_message_receiver)
22 | monkeypatch.setattr(
23 | binaryagreement, 'wait_for_conf_values', mocked_conf_phase_handler)
24 |
25 | N = 4
26 | f = 1
27 | seed = None
28 | sid = 'sidA'
29 | rnd = random.Random(seed)
30 | sends, recvs = broadcast_router(N)
31 | threads = []
32 | inputs = []
33 | outputs = []
34 |
35 | coins_seed = rnd.random()
36 | coins = _make_coins(sid+'COIN', N, f, coins_seed)
37 |
38 | for i in range(4):
39 | inputs.append(Queue())
40 | outputs.append(Queue())
41 |
42 | byz_thread = gevent.spawn(byz_ba_issue_59, sid, 3, N, f, coins[3],
43 | inputs[3].get, outputs[3].put_nowait, sends[3], recvs[3])
44 | threads.append(byz_thread)
45 |
46 | for i in (2, 0, 1):
47 | t = gevent.spawn(binaryagreement.binaryagreement, sid, i, N, f, coins[i],
48 | inputs[i].get, outputs[i].put_nowait, sends[i], recvs[i])
49 | threads.append(t)
50 |
51 | inputs[0].put(0) # A_0
52 | inputs[1].put(0) # A_1
53 | inputs[2].put(1) # B
54 | inputs[3].put(0) # F (x)
55 |
56 | for i in range(N):
57 | outputs[i].get()
58 |
--------------------------------------------------------------------------------
/test/test_commoncoin.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import gevent
3 | import random
4 | from gevent.queue import Queue
5 | from honeybadgerbft.core.commoncoin import shared_coin
6 | from honeybadgerbft.crypto.threshsig.boldyreva import dealer
7 |
8 | def simple_router(N, maxdelay=0.01, seed=None):
9 | """Builds a set of connected channels, with random delay
10 | @return (receives, sends)
11 | """
12 | rnd = random.Random(seed)
13 | #if seed is not None: print 'ROUTER SEED: %f' % (seed,)
14 |
15 | queues = [Queue() for _ in range(N)]
16 |
17 | def makeBroadcast(i):
18 | def _send(j, o):
19 | delay = rnd.random() * maxdelay
20 | #print 'BC %8s [%2d -> %2d] %2.1f' % (o[0], i, j, delay*1000)
21 | gevent.spawn_later(delay, queues[j].put, (i,o))
22 | #queues[j].put((i, o))
23 | def _bc(o):
24 | for j in range(N): _send(j, o)
25 | return _bc
26 |
27 | def makeRecv(j):
28 | def _recv():
29 | (i,o) = queues[j].get()
30 | #print 'RECV %8s [%2d -> %2d]' % (o[0], i, j)
31 | return (i,o)
32 | return _recv
33 |
34 | return ([makeBroadcast(i) for i in range(N)],
35 | [makeRecv(j) for j in range(N)])
36 |
37 |
38 | def byzantine_router(N, maxdelay=0.01, seed=None, **byzargs):
39 | """Builds a set of connected channels, with random delay.
40 |
41 | :return: (receives, sends) endpoints.
42 | """
43 | rnd = random.Random(seed)
44 | #if seed is not None: print 'ROUTER SEED: %f' % (seed,)
45 |
46 | queues = [Queue() for _ in range(N)]
47 |
48 | def makeBroadcast(i):
49 | def _send(j, o):
50 | delay = rnd.random() * maxdelay
51 | gevent.spawn_later(delay, queues[j].put, (i,o))
52 | def _bc(o):
53 | for j in range(N): _send(j, o)
54 | return _bc
55 |
56 | def makeRecv(j):
57 | def _recv():
58 | return queues[j].get()
59 |
60 | def _recv_redundant():
61 | i, o = queues[j].get()
62 | if i == 3 and o[1] == 3:
63 | o = list(o)
64 | o[1] -= 1
65 | o = tuple(o)
66 | return (i,o)
67 |
68 | def _recv_fail_pk_verify_share():
69 | (i,o) = queues[j].get()
70 | if i == 3 and o[1] == 3:
71 | o = list(o)
72 | o[1] += 1
73 | o = tuple(o)
74 | return (i,o)
75 |
76 | if j == byzargs.get('node') and byzargs.get('sig_redundant'):
77 | return _recv_redundant
78 | if j == byzargs.get('node') and byzargs.get('sig_err'):
79 | return _recv_fail_pk_verify_share
80 | return _recv
81 |
82 | return ([makeBroadcast(i) for i in range(N)],
83 | [makeRecv(j) for j in range(N)])
84 |
85 |
86 | ### Test
87 | def _test_commoncoin(N=4, f=1, seed=None):
88 | # Generate keys
89 | PK, SKs = dealer(N, f+1)
90 | sid = 'sidA'
91 | # Test everything when runs are OK
92 | #if seed is not None: print 'SEED:', seed
93 | rnd = random.Random(seed)
94 | router_seed = rnd.random()
95 | sends, recvs = simple_router(N, seed=seed)
96 | coins = [shared_coin(sid, i, N, f, PK, SKs[i], sends[i], recvs[i]) for i in range(N)]
97 |
98 | for i in range(10):
99 | threads = [gevent.spawn(c, i) for c in coins]
100 | gevent.joinall(threads)
101 | assert len(set([t.value for t in threads])) == 1
102 | return True
103 |
104 |
105 | def test_commoncoin():
106 | _test_commoncoin()
107 |
108 |
109 | def test_when_signature_share_verify_fails():
110 | N = 4
111 | f = 1
112 | seed = None
113 | PK, SKs = dealer(N, f+1)
114 | sid = 'sidA'
115 | rnd = random.Random(seed)
116 | router_seed = rnd.random()
117 | sends, recvs = byzantine_router(N, seed=seed, node=2, sig_err=True)
118 | coins = [shared_coin(sid, i, N, f, PK, SKs[i], sends[i], recvs[i]) for i in range(N)]
119 | for i in range(10):
120 | threads = [gevent.spawn(c, i) for c in coins]
121 | gevent.joinall(threads)
122 | assert len(set([t.value for t in threads])) == 1
123 |
124 |
125 | def test_when_redundant_signature_share_is_received():
126 | N = 4
127 | f = 1
128 | seed = None
129 | PK, SKs = dealer(N, f+1)
130 | sid = 'sidA'
131 | rnd = random.Random(seed)
132 | router_seed = rnd.random()
133 | sends, recvs = byzantine_router(N, seed=seed, node=2, sig_redundant=True)
134 | coins = [shared_coin(sid, i, N, f, PK, SKs[i], sends[i], recvs[i]) for i in range(N)]
135 | for i in range(10):
136 | threads = [gevent.spawn(c, i) for c in coins]
137 | gevent.joinall(threads)
138 | assert len(set([t.value for t in threads])) == 1
139 |
--------------------------------------------------------------------------------
/test/test_commonsubset.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import gevent
3 | import random
4 | from gevent.event import Event
5 | from gevent.queue import Queue
6 | from honeybadgerbft.core.commoncoin import shared_coin
7 | from honeybadgerbft.core.binaryagreement import binaryagreement
8 | from honeybadgerbft.core.reliablebroadcast import reliablebroadcast
9 | from honeybadgerbft.core.commonsubset import commonsubset
10 | from honeybadgerbft.crypto.threshsig.boldyreva import dealer
11 | from collections import defaultdict
12 |
13 | from pytest import mark
14 |
15 |
16 | def simple_router(N, maxdelay=0.005, seed=None):
17 | """Builds a set of connected channels, with random delay
18 | @return (receives, sends)
19 | """
20 | rnd = random.Random(seed)
21 | #if seed is not None: print('ROUTER SEED: %f' % (seed,))
22 |
23 | queues = [Queue() for _ in range(N)]
24 | _threads = []
25 |
26 | def makeSend(i):
27 | def _send(j, o):
28 | delay = rnd.random() * maxdelay
29 | #delay = 0.1
30 | #print('SEND %8s [%2d -> %2d] %2.1f' % (o[0], i, j, delay*1000), o[1:])
31 | gevent.spawn_later(delay, queues[j].put_nowait, (i,o))
32 | return _send
33 |
34 | def makeRecv(j):
35 | def _recv():
36 | (i,o) = queues[j].get()
37 | #print('RECV %8s [%2d -> %2d]' % (o[0], i, j))
38 | return (i,o)
39 | return _recv
40 |
41 | return ([makeSend(i) for i in range(N)],
42 | [makeRecv(j) for j in range(N)])
43 |
44 |
45 |
46 | ### Make the threshold signature common coins
47 | def _make_commonsubset(sid, pid, N, f, PK, SK, input, send, recv):
48 |
49 | def broadcast(o):
50 | for j in range(N): send(j, o)
51 |
52 | coin_recvs = [None] * N
53 | aba_recvs = [None] * N
54 | rbc_recvs = [None] * N
55 |
56 | aba_inputs = [Queue(1) for _ in range(N)]
57 | aba_outputs = [Queue(1) for _ in range(N)]
58 | rbc_outputs = [Queue(1) for _ in range(N)]
59 |
60 | def _setup(j):
61 | def coin_bcast(o):
62 | broadcast(('ACS_COIN', j, o))
63 |
64 | coin_recvs[j] = Queue()
65 | coin = shared_coin(sid + 'COIN' + str(j), pid, N, f, PK, SK,
66 | coin_bcast, coin_recvs[j].get)
67 |
68 | def aba_bcast(o):
69 | broadcast(('ACS_ABA', j, o))
70 |
71 | aba_recvs[j] = Queue()
72 | aba = gevent.spawn(binaryagreement, sid+'ABA'+str(j), pid, N, f, coin,
73 | aba_inputs[j].get, aba_outputs[j].put_nowait,
74 | aba_bcast, aba_recvs[j].get)
75 |
76 | def rbc_send(k, o):
77 | send(k, ('ACS_RBC', j, o))
78 |
79 | # Only leader gets input
80 | rbc_input = input if j == pid else None
81 | rbc_recvs[j] = Queue()
82 | rbc = gevent.spawn(reliablebroadcast, sid+'RBC'+str(j), pid, N, f, j,
83 | rbc_input, rbc_recvs[j].get, rbc_send)
84 | rbc_outputs[j] = rbc.get # block for output from rbc
85 |
86 | for j in range(N): _setup(j)
87 |
88 | def _recv():
89 | while True:
90 | (sender, (tag, j, msg)) = recv()
91 | if tag == 'ACS_COIN': coin_recvs[j].put_nowait((sender,msg))
92 | elif tag == 'ACS_RBC' : rbc_recvs [j].put_nowait((sender,msg))
93 | elif tag == 'ACS_ABA' : aba_recvs [j].put_nowait((sender,msg))
94 | else:
95 | print('Unknown tag!!', tag)
96 | raise
97 | gevent.spawn(_recv)
98 |
99 | return commonsubset(pid, N, f, rbc_outputs,
100 | [_.put_nowait for _ in aba_inputs],
101 | [_.get for _ in aba_outputs])
102 |
103 | ### Test asynchronous common subset
104 | def _test_commonsubset(N=4, f=1, seed=None):
105 | # Generate keys
106 | sid = 'sidA'
107 | PK, SKs = dealer(N, f+1, seed=seed)
108 | rnd = random.Random(seed)
109 | #print('SEED:', seed)
110 | router_seed = rnd.random()
111 | sends, recvs = simple_router(N, seed=router_seed)
112 |
113 | inputs = [None] * N
114 | threads = [None] * N
115 | for i in range(N):
116 | inputs[i] = Queue(1)
117 |
118 | threads[i] = gevent.spawn(_make_commonsubset, sid, i, N, f,
119 | PK, SKs[i],
120 | inputs[i].get, sends[i], recvs[i])
121 |
122 | for i in range(N):
123 | if i == 1: continue
124 | inputs[i].put('<[ACS Input %d]>' % i)
125 |
126 | #gevent.killall(threads[N-f:])
127 | #gevent.sleep(3)
128 | #for i in range(N-f, N):
129 | # inputs[i].put(0)
130 | try:
131 | outs = [threads[i].get() for i in range(N)]
132 |
133 | # Consistency check
134 | assert len(set(outs)) == 1
135 |
136 | except KeyboardInterrupt:
137 | gevent.killall(threads)
138 | raise
139 |
140 | from nose2.tools import params
141 |
142 | #@params(*range(100))
143 | #def test_commonsubset(i):
144 | # _test_commonsubset(seed=i)
145 | #_test_commonsubset(seed=1)
146 |
147 |
148 | #@mark.skip('python 3 problem with gevent')
149 | def test_commonsubset():
150 | _test_commonsubset()
151 |
152 |
--------------------------------------------------------------------------------
/test/test_honeybadger.py:
--------------------------------------------------------------------------------
1 | import random
2 | from collections import defaultdict
3 |
4 | import gevent
5 | from gevent.event import Event
6 | from gevent.queue import Queue
7 | from pytest import fixture, mark, raises
8 |
9 | import honeybadgerbft.core.honeybadger
10 | #reload(honeybadgerbft.core.honeybadger)
11 | from honeybadgerbft.core.honeybadger import HoneyBadgerBFT
12 | from honeybadgerbft.crypto.threshsig.boldyreva import dealer
13 | from honeybadgerbft.crypto.threshenc import tpke
14 | from honeybadgerbft.core.honeybadger import BroadcastTag
15 |
16 |
17 | @fixture
18 | def recv_queues(request):
19 | from honeybadgerbft.core.honeybadger import BroadcastReceiverQueues
20 | number_of_nodes = getattr(request, 'N', 4)
21 | queues = {
22 | tag.value: [Queue() for _ in range(number_of_nodes)]
23 | for tag in BroadcastTag if tag != BroadcastTag.TPKE
24 | }
25 | queues[BroadcastTag.TPKE.value] = Queue()
26 | return BroadcastReceiverQueues(**queues)
27 |
28 |
29 | from pytest import mark
30 |
31 |
32 | def simple_router(N, maxdelay=0.005, seed=None):
33 | """Builds a set of connected channels, with random delay
34 |
35 | :return: (receives, sends)
36 | """
37 | rnd = random.Random(seed)
38 | #if seed is not None: print 'ROUTER SEED: %f' % (seed,)
39 |
40 | queues = [Queue() for _ in range(N)]
41 | _threads = []
42 |
43 | def makeSend(i):
44 | def _send(j, o):
45 | delay = rnd.random() * maxdelay
46 | if not i%3:
47 | delay *= 1000
48 | #delay = 0.1
49 | #print 'SEND %8s [%2d -> %2d] %2.1f' % (o[0], i, j, delay*1000), o[1:]
50 | gevent.spawn_later(delay, queues[j].put_nowait, (i,o))
51 | return _send
52 |
53 | def makeRecv(j):
54 | def _recv():
55 | (i,o) = queues[j].get()
56 | #print 'RECV %8s [%2d -> %2d]' % (o[0], i, j)
57 | return (i,o)
58 | return _recv
59 |
60 | return ([makeSend(i) for i in range(N)],
61 | [makeRecv(j) for j in range(N)])
62 |
63 |
64 | ### Test asynchronous common subset
65 | def _test_honeybadger(N=4, f=1, seed=None):
66 | sid = 'sidA'
67 | # Generate threshold sig keys
68 | sPK, sSKs = dealer(N, f+1, seed=seed)
69 | # Generate threshold enc keys
70 | ePK, eSKs = tpke.dealer(N, f+1)
71 |
72 | rnd = random.Random(seed)
73 | #print 'SEED:', seed
74 | router_seed = rnd.random()
75 | sends, recvs = simple_router(N, seed=router_seed)
76 |
77 | badgers = [None] * N
78 | threads = [None] * N
79 | for i in range(N):
80 | badgers[i] = HoneyBadgerBFT(sid, i, 1, N, f,
81 | sPK, sSKs[i], ePK, eSKs[i],
82 | sends[i], recvs[i])
83 | threads[i] = gevent.spawn(badgers[i].run)
84 |
85 | for i in range(N):
86 | #if i == 1: continue
87 | badgers[i].submit_tx('<[HBBFT Input %d]>' % i)
88 |
89 | for i in range(N):
90 | badgers[i].submit_tx('<[HBBFT Input %d]>' % (i+10))
91 |
92 | for i in range(N):
93 | badgers[i].submit_tx('<[HBBFT Input %d]>' % (i+20))
94 |
95 | #gevent.killall(threads[N-f:])
96 | #gevent.sleep(3)
97 | #for i in range(N-f, N):
98 | # inputs[i].put(0)
99 | try:
100 | outs = [threads[i].get() for i in range(N)]
101 |
102 | # Consistency check
103 | assert len(set(outs)) == 1
104 |
105 | except KeyboardInterrupt:
106 | gevent.killall(threads)
107 | raise
108 |
109 |
110 | #@mark.skip('python 3 problem with gevent')
111 | def test_honeybadger():
112 | _test_honeybadger()
113 |
114 |
115 | @mark.parametrize('message', ('broadcast message',))
116 | @mark.parametrize('node_id', range(4))
117 | @mark.parametrize('tag', [e.value for e in BroadcastTag])
118 | @mark.parametrize('sender', range(4))
119 | def test_broadcast_receiver_loop(sender, tag, node_id, message, recv_queues):
120 | from honeybadgerbft.core.honeybadger import broadcast_receiver_loop
121 | recv = Queue()
122 | recv.put((sender, (tag, node_id, message)))
123 | gevent.spawn(broadcast_receiver_loop, recv.get, recv_queues)
124 | recv_queue = getattr(recv_queues, tag)
125 | if tag != BroadcastTag.TPKE.value:
126 | recv_queue = recv_queue[node_id]
127 | assert recv_queue,get() == (sender, message)
128 |
129 |
130 | @mark.parametrize('message', ('broadcast message',))
131 | @mark.parametrize('node_id', range(4))
132 | @mark.parametrize('tag', ('BogusTag', None, 123))
133 | @mark.parametrize('sender', range(4))
134 | def test_broadcast_receiver_loop_raises(sender, tag, node_id, message, recv_queues):
135 | from honeybadgerbft.core.honeybadger import broadcast_receiver_loop
136 | from honeybadgerbft.exceptions import UnknownTagError
137 | recv = Queue()
138 | recv.put((sender, (tag, node_id, message)))
139 | with raises(UnknownTagError) as exc:
140 | broadcast_receiver_loop(recv.get, recv_queues)
141 | expected_err_msg = 'Unknown tag: {}! Must be one of {}.'.format(
142 | tag, BroadcastTag.__members__.keys())
143 | assert exc.value.args[0] == expected_err_msg
144 | recv_queues_dict = recv_queues._asdict()
145 | tpke_queue = recv_queues_dict.pop(BroadcastTag.TPKE.value)
146 | assert tpke_queue.empty()
147 | assert all([q.empty() for queues in recv_queues_dict.values() for q in queues])
148 |
--------------------------------------------------------------------------------
/test/test_honeybadger_block.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import gevent
3 | import random
4 | from gevent.event import Event
5 | from gevent.queue import Queue
6 | from honeybadgerbft.core.commoncoin import shared_coin
7 | from honeybadgerbft.core.binaryagreement import binaryagreement
8 | from honeybadgerbft.core.reliablebroadcast import reliablebroadcast
9 | from honeybadgerbft.core.commonsubset import commonsubset
10 | import honeybadgerbft.core.honeybadger_block
11 | #reload(honeybadgerbft.core.honeybadger_block)
12 | from honeybadgerbft.core.honeybadger_block import honeybadger_block
13 | from honeybadgerbft.crypto.threshsig.boldyreva import dealer
14 | from honeybadgerbft.crypto.threshenc import tpke
15 | from collections import defaultdict
16 |
17 | from pytest import mark
18 |
19 |
20 | def simple_router(N, maxdelay=0.005, seed=None):
21 | """Builds a set of connected channels, with random delay
22 | @return (receives, sends)
23 | """
24 | rnd = random.Random(seed)
25 | #if seed is not None: print('ROUTER SEED: %f' % (seed,))
26 |
27 | queues = [Queue() for _ in range(N)]
28 | _threads = []
29 |
30 | def makeSend(i):
31 | def _send(j, o):
32 | delay = rnd.random() * maxdelay
33 | #delay = 0.1
34 | #print('SEND %8s [%2d -> %2d] %2.1f' % (o[0], i, j, delay*1000), o[1:])
35 | gevent.spawn_later(delay, queues[j].put_nowait, (i,o))
36 | return _send
37 |
38 | def makeRecv(j):
39 | def _recv():
40 | (i,o) = queues[j].get()
41 | #print('RECV %8s [%2d -> %2d]' % (o[0], i, j)
42 | return (i,o)
43 | return _recv
44 |
45 | return ([makeSend(i) for i in range(N)],
46 | [makeRecv(j) for j in range(N)])
47 |
48 |
49 | ### Make the threshold signature common coins
50 | def _make_honeybadger(sid, pid, N, f, sPK, sSK, ePK, eSK, input, send, recv):
51 | from honeybadgerbft.core.honeybadger import (BroadcastTag,
52 | BroadcastReceiverQueues,
53 | broadcast_receiver_loop)
54 |
55 | def broadcast(o):
56 | for j in range(N): send(j, o)
57 |
58 | coin_recvs = [None] * N
59 | aba_recvs = [None] * N
60 | rbc_recvs = [None] * N
61 |
62 | aba_inputs = [Queue(1) for _ in range(N)]
63 | aba_outputs = [Queue(1) for _ in range(N)]
64 | rbc_outputs = [Queue(1) for _ in range(N)]
65 |
66 | my_rbc_input = Queue(1)
67 |
68 | def _setup(j):
69 | def coin_bcast(o):
70 | broadcast(('ACS_COIN', j, o))
71 |
72 | coin_recvs[j] = Queue()
73 | coin = shared_coin(sid + 'COIN' + str(j), pid, N, f, sPK, sSK,
74 | coin_bcast, coin_recvs[j].get)
75 |
76 | def aba_bcast(o):
77 | broadcast(('ACS_ABA', j, o))
78 |
79 | aba_recvs[j] = Queue()
80 | aba = gevent.spawn(binaryagreement, sid+'ABA'+str(j), pid, N, f, coin,
81 | aba_inputs[j].get, aba_outputs[j].put_nowait,
82 | aba_bcast, aba_recvs[j].get)
83 |
84 | def rbc_send(k, o):
85 | send(k, ('ACS_RBC', j, o))
86 |
87 | # Only leader gets input
88 | rbc_input = my_rbc_input.get if j == pid else None
89 | rbc_recvs[j] = Queue()
90 | rbc = gevent.spawn(reliablebroadcast, sid+'RBC'+str(j), pid, N, f, j,
91 | rbc_input, rbc_recvs[j].get, rbc_send)
92 | rbc_outputs[j] = rbc.get # block for output from rbc
93 |
94 | # N instances of ABA, RBC
95 | for j in range(N): _setup(j)
96 |
97 | # One instance of TPKE
98 | def tpke_bcast(o):
99 | broadcast(('TPKE', 0, o))
100 |
101 | tpke_recv = Queue()
102 |
103 | # One instance of ACS
104 | acs = gevent.spawn(commonsubset, pid, N, f, rbc_outputs,
105 | [_.put_nowait for _ in aba_inputs],
106 | [_.get for _ in aba_outputs])
107 |
108 | recv_queues = BroadcastReceiverQueues(**{
109 | BroadcastTag.ACS_COIN.value: coin_recvs,
110 | BroadcastTag.ACS_RBC.value: rbc_recvs,
111 | BroadcastTag.ACS_ABA.value: aba_recvs,
112 | BroadcastTag.TPKE.value: tpke_recv,
113 | })
114 | gevent.spawn(broadcast_receiver_loop, recv, recv_queues)
115 |
116 | return honeybadger_block(pid, N, f, ePK, eSK, input,
117 | acs_in=my_rbc_input.put_nowait, acs_out=acs.get,
118 | tpke_bcast=tpke_bcast, tpke_recv=tpke_recv.get)
119 |
120 |
121 | ### Test asynchronous common subset
122 | def _test_honeybadger(N=4, f=1, seed=None):
123 | # Generate threshold sig keys
124 | sid = 'sidA'
125 | sPK, sSKs = dealer(N, f+1, seed=seed)
126 | ePK, eSKs = tpke.dealer(N, f+1)
127 |
128 | rnd = random.Random(seed)
129 | #print('SEED:', seed)
130 | router_seed = rnd.random()
131 | sends, recvs = simple_router(N, seed=router_seed)
132 |
133 | inputs = [None] * N
134 | threads = [None] * N
135 | for i in range(N):
136 | inputs[i] = Queue(1)
137 | threads[i] = gevent.spawn(_make_honeybadger, sid, i, N, f,
138 | sPK, sSKs[i],
139 | ePK, eSKs[i],
140 | inputs[i].get, sends[i], recvs[i])
141 |
142 | for i in range(N):
143 | #if i == 1: continue
144 | inputs[i].put('<[HBBFT Input %d]>' % i)
145 |
146 | #gevent.killall(threads[N-f:])
147 | #gevent.sleep(3)
148 | #for i in range(N-f, N):
149 | # inputs[i].put(0)
150 | try:
151 | outs = [threads[i].get() for i in range(N)]
152 |
153 | # Consistency check
154 | assert len(set(outs)) == 1
155 |
156 | except KeyboardInterrupt:
157 | gevent.killall(threads)
158 | raise
159 |
160 |
161 | #@mark.skip('python 3 problem with gevent')
162 | def test_honeybadger():
163 | _test_honeybadger()
164 |
165 |
166 | def test_honeybadger_block_with_missing_input():
167 | N = 4
168 | f = 1
169 | seed = None
170 | sid = 'sidA'
171 | sPK, sSKs = dealer(N, f+1, seed=seed)
172 | ePK, eSKs = tpke.dealer(N, f+1)
173 | rnd = random.Random(seed)
174 | router_seed = rnd.random()
175 | sends, recvs = simple_router(N, seed=router_seed)
176 | inputs = [None] * N
177 | threads = [None] * N
178 | for i in range(N):
179 | inputs[i] = Queue(1)
180 | threads[i] = gevent.spawn(_make_honeybadger, sid, i, N, f,
181 | sPK, sSKs[i],
182 | ePK, eSKs[i],
183 | inputs[i].get, sends[i], recvs[i])
184 |
185 | for i in range(N):
186 | if i != 1:
187 | inputs[i].put('<[HBBFT Input %d]>' % i)
188 |
189 | gevent.joinall(threads, timeout=0.5)
190 | assert all([t.value is None for t in threads])
191 |
192 |
193 | def broadcast_receiver_duplicates_share(recv_func, recv_queues):
194 | from honeybadgerbft.core.honeybadger import BroadcastTag
195 | sender, (tag, j, msg) = recv_func()
196 | recv_queue = getattr(recv_queues, tag)
197 |
198 | if tag == BroadcastTag.TPKE.value:
199 | recv_queue.put_nowait((sender, msg))
200 | recv_queue.put_nowait((sender, msg))
201 | else:
202 | recv_queue = recv_queue[j]
203 | recv_queue.put_nowait((sender, msg))
204 |
205 |
206 | def test_when_duplicate_share_is_received(monkeypatch):
207 | from honeybadgerbft.core import honeybadger
208 | monkeypatch.setattr(
209 | honeybadger, 'broadcast_receiver', broadcast_receiver_duplicates_share)
210 | _test_honeybadger()
211 |
--------------------------------------------------------------------------------
/test/test_threshenc.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from charm.toolbox.pairinggroup import PairingGroup,ZR,G1,G2,GT,pair
3 | import random
4 | from honeybadgerbft.crypto.threshenc.tpke import TPKEPublicKey, TPKEPrivateKey, dealer
5 | from Crypto.Hash import SHA256
6 | from Crypto import Random
7 | from Crypto.Cipher import AES
8 |
9 | def test_threshenc():
10 | PK, SKs = dealer(players=100,k=35)
11 |
12 | m = SHA256.new(b'hello world').digest()
13 | C = PK.encrypt(m)
14 |
15 | assert PK.verify_ciphertext(*C)
16 |
17 | shares = [sk.decrypt_share(*C) for sk in SKs]
18 | for i,share in enumerate(shares):
19 | assert PK.verify_share(i, share, *C)
20 |
21 | SS = list(range(PK.l))
22 | for i in range(1):
23 | random.shuffle(SS)
24 | S = set(SS[:PK.k])
25 |
26 | m_ = PK.combine_shares(*C, dict((s,shares[s]) for s in S))
27 | assert m_ == m
28 |
29 | def test_threshenc2():
30 | # Failure cases
31 | PK, SKs = dealer(players=100,k=35)
32 |
33 | m = SHA256.new(b'hello world').digest()
34 | C = PK.encrypt(m)
35 |
36 | assert PK.verify_ciphertext(*C)
37 |
38 | shares = [sk.decrypt_share(*C) for sk in SKs]
39 | for i,share in enumerate(shares):
40 | assert PK.verify_share(i, share, *C)
41 |
42 | SS = list(range(PK.l))
43 | random.shuffle(SS)
44 | # Perturb one of the keys
45 | shares[SS[0]] += shares[SS[0]]
46 | S = set(SS[:PK.k])
47 |
48 | try:
49 | m_ = PK.combine_shares(*C, dict((s,shares[s]) for s in S))
50 | assert m_ == m
51 | except AssertionError: pass
52 | else: assert False, "Combine shares should have raised an error"
53 |
--------------------------------------------------------------------------------