├── .gitignore ├── .gitmodules ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── arch.png ├── docs ├── Makefile ├── README.md ├── conf.py ├── deploy.sh ├── extracted-comments.rst ├── extracted-comments │ └── .gitignore ├── index.rst ├── introduction.rst └── requirements.txt ├── dummy ├── Cargo.toml └── src │ ├── dummy_node.rs │ └── main.rs ├── fal ├── Cargo.toml ├── README.md └── src │ ├── client.rs │ ├── lib.rs │ ├── node.rs │ ├── state.rs │ ├── transaction.rs │ ├── transport.rs │ └── virtual_machine.rs ├── kvdb ├── Cargo.toml └── src │ └── main.rs ├── lachesis-rs ├── Cargo.toml ├── proptest-regressions │ └── event │ │ └── event.txt └── src │ ├── bin │ ├── lachesis_server.rs │ ├── lachesis_tcp.rs │ └── ws_client.rs │ ├── errors.rs │ ├── event.rs │ ├── event │ ├── event_hash.rs │ ├── event_signature.rs │ └── parents.rs │ ├── hashgraph.rs │ ├── lachesis.rs │ ├── lachesis │ ├── frame.rs │ ├── opera.rs │ └── parents_list.rs │ ├── lib.rs │ ├── node.rs │ ├── peer.rs │ ├── printable_hash.rs │ ├── round.rs │ ├── server.rs │ ├── server │ ├── heartbeat.rs │ ├── http_handler.rs │ ├── ws_handler.rs │ └── ws_message.rs │ ├── swirlds.rs │ └── tcp_server.rs └── llvm-vm-backend ├── Cargo.toml └── src └── main.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | *.swp 5 | *.xml 6 | .idea 7 | *.iml 8 | *.patch 9 | *~ 10 | *.bak 11 | *.iml 12 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "serial_hacking_fantom_rbvm"] 2 | path = serial_hacking_fantom_rbvm 3 | url = https://gitlab.com/bibloman/serial_hacking_fantom_rbvm 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - latest 4 | sudo: false 5 | 6 | # only cache cargo subcommand binaries and .so libs 7 | # the build artifacts take a lot of space and are slower to 8 | # cache than to actually rebuild anyway... 9 | # We need to cache the whole .cargo directory to keep the 10 | # .crates.toml file. 11 | cache: 12 | directories: 13 | - /home/travis/install 14 | - /home/travis/.cargo 15 | 16 | # But don't cache the cargo registry 17 | before_cache: 18 | - rm -rf /home/travis/.cargo/registry 19 | before_script: 20 | - | 21 | if command -v cargo >/dev/null; then 22 | export PATH=$HOME/.cargo/bin:$PATH 23 | mkdir $(pwd)/socket 24 | export XDG_RUNTIME_DIR="$(pwd)/socket" 25 | cargo fetch 26 | rustup component add rustfmt-preview 27 | if [ -n "$CLIPPY" ]; then 28 | rustup component add clippy 29 | fi 30 | fi 31 | script: 32 | - | 33 | if [ -n "$BUILD_FMT" ]; then 34 | cargo fmt --all -- --check 35 | rustfmt --check src/*.rs 36 | elif [ -n "$CLIPPY" ]; then 37 | cargo clippy --all -- -D warnings \ 38 | -A clippy::deprecated_cfg_attr \ 39 | -A clippy::for_loop_over_option 40 | elif [ -n "$CARGO_TEST" ]; then 41 | cargo test --all --features "$FEATURES" 42 | fi 43 | 44 | matrix: 45 | include: 46 | - rust: stable 47 | env: BUILD_FMT=1 48 | - rust: stable 49 | env: CLIPPY=1 50 | - rust: stable 51 | env: CARGO_TEST=1 52 | notifications: 53 | slack: 54 | secure: 'UzMDp+QAQmykQRh3X2zPZnxICvEH9YPynCPQ6AEsmdVb9Or3VleaYM8i+E7x5jT1HFloBJyI1G0mqrFQIbYDQug5nFUsfB2CYi7L0dpUtvoWbDMGoJ/lDI/wJDt64lqhHf9hs1aeYq454Zqcmx6/oKCgnRSyXip6y/mtOlQYjwhGN0D+TARF+6IohqJMg5iEQg4sWvugVur3GbFbatNeevLOYuhezdun4S8vhzQKsFJBK5QnIuufEyeHHjA3Ii5/yyqXUnxtRnLctjPWR+EROBO/mgqBUXPTwheiYqg9Gw9lYSRuBrtP+eixSKWnknju12YzlhAIF/HegRGzLG2PUGgLuB0TBAraaLYOODpBanVfN7DMvhg72Embdy0lZzVYPN8ImwGIU7jisWPI3x6Zz3zdI1lXZaMFS2ijlbRbYZE1YoOhS0iFQuYUc4dwu+D9Ql+IqZZ+BwE5AOy3cY149WrwGX0xaqwPad80HNHy5PUKKLxE8ZLXD0AQsWxi5M7UrHvJ7XOMRPEkskm6zNBuSXrU1vRibbtOaAC+xdPWop5j0Chf99CuIyH6NeqWVwpvl8ddTJGFZeoNXBXOp6+WAxyAJa1Z77rOvmjhdgrvcFh0o3176KCpFBQM0rWt1IVy1LFFsCP9KzMjBN1vAeEaCJZF0yQjXg/58lUTnR5a28A=' 55 | - language: python 56 | sudo: required 57 | dist: xenial 58 | python: 59 | - '3.7' 60 | env: 61 | - COMMENT_EXTRACTION=comment-extraction-master 62 | - EXTRACTED_DIR=docs/extracted-comments 63 | - INPUT_DIR=fantom-common-rs/src 64 | - LANGUAGE=rust 65 | - ROOT_URL=https://github.com/Fantom-Foundation/fantom-common-rs 66 | before_install: 67 | - curl -L https://github.com/dev10/comment-extraction/archive/master.tar.gz -o 68 | comment-extraction.tgz 69 | - tar xf comment-extraction.tgz 70 | install: 71 | - pip install -r $COMMENT_EXTRACTION/requirements.txt 72 | script: 73 | - > 74 | python $COMMENT_EXTRACTION/antlr.py 75 | -l $LANGUAGE 76 | -o $EXTRACTED_DIR 77 | -ct $TRAVIS_COMMIT 78 | -ru $ROOT_URL 79 | -d $INPUT_DIR 80 | after_success: 81 | - cd docs/ 82 | - pip install -r requirements.txt 83 | - make html 84 | - "./deploy.sh" 85 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "dummy", 5 | "kvdb", 6 | "lachesis-rs", 7 | "llvm-vm-backend", 8 | "fal", 9 | "solidity-frontend", 10 | ] 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Fantom Foundation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | lachesis-rs 2 | =========== 3 | [![Build Status](https://travis-ci.org/Fantom-foundation/lachesis-rs.svg?branch=master)](https://travis-ci.org/Fantom-foundation/lachesis-rs) 4 | 5 | Lachesis BFT consensus for permission-less networks, in Rust. 6 | 7 | ## RFCs 8 | 9 | https://github.com/Fantom-foundation/fantom-rfcs 10 | 11 | ## Architecture 12 | Each layer will have 1 or more library crates, for shared functionality and traits. Square borders refer to crates, rounded borders refers to logical distinctions, with a shared interface. 13 | 14 | ![Crate relationship](arch.png) 15 | 16 | 60 | 61 | ## Developer guide 62 | 63 | Install the latest version of [Rust](https://www.rust-lang.org). We tend to use nightly versions. [CLI tool for installing Rust](https://rustup.rs). Also install LLVM-7 development package. 64 | 65 | We use [rust-clippy](https://github.com/rust-lang-nursery/rust-clippy) linters to improve code quality. 66 | 67 | There are plenty of [IDEs](https://areweideyet.com) and other [Rust development tools to consider](https://github.com/rust-unofficial/awesome-rust#development-tools). 68 | 69 | ### Step-by-step guide 70 | ```bash 71 | # Install Rust (nightly) 72 | $ curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain nightly 73 | # Install cargo-make (cross-platform feature-rich reimplementation of Make) 74 | $ cargo install --force cargo-make 75 | # Install rustfmt (Rust formatter) 76 | $ rustup component add rustfmt 77 | # Install llvm-7 development package. 78 | # NB: this command is for Ubuntu 18.04, adjust it according to your system 79 | $ apt install llvm-7-dev 80 | # Clone this repo 81 | $ git clone https://github.com/Fantom-foundation/lachesis-rs && cd lachesis-rs 82 | # Run tests 83 | $ cargo test 84 | # Format, build and test 85 | $ cargo make 86 | ``` 87 | -------------------------------------------------------------------------------- /arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Fantom-foundation/lachesis-rs/92919d0554966a9025d6658508d1d13f11c9eb1c/arch.png -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS ?= 6 | SPHINXBUILD ?= python -msphinx 7 | SPHINXPROJ = Lachesis 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Building the docs 2 | 3 | ```bash 4 | pip install virtualenvwrapper 5 | source /usr/local/bin/virtualenvwrapper.sh 6 | (mkvirtualenv -r requirements.txt docs) 7 | workon docs 8 | make html 9 | google-chrome _build/html/index.html 10 | ```` 11 | 12 | To have Travis deploy to Github Pages, the GITHUB_USER and GITHUB_TOKEN need to be set in either the Travis dashboard 13 | or in a Travis encrypted environment variable eg: 14 | 15 | ```bash 16 | travis encrypt GH_TOKEN=github_user:1230000000000000000000000000000000000000 --com --add env.matrix 17 | ``` 18 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Lachesis documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Oct 18 09:22:15 2017. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | # import os 20 | # import sys 21 | # sys.path.insert(0, os.path.abspath('.')) 22 | from datetime import date 23 | import sphinx_rtd_theme 24 | 25 | # -- General configuration ------------------------------------------------ 26 | 27 | # If your documentation needs a minimal Sphinx version, state it here. 28 | # 29 | # needs_sphinx = '1.0' 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [] 35 | 36 | # Add any paths that contain templates here, relative to this directory. 37 | templates_path = ['_templates'] 38 | 39 | # The suffix(es) of source filenames. 40 | # You can specify multiple suffix as a list of string: 41 | # 42 | # source_suffix = ['.rst', '.md'] 43 | source_suffix = '.rst' 44 | 45 | # The master toctree document. 46 | master_doc = 'index' 47 | 48 | # General information about the project. 49 | project = u'Lachesis-rs' 50 | copyright = u'%d, Mosaic Networks' % date.today().year 51 | author = u'Mosaic Networks' 52 | 53 | # The version info for the project you're documenting, acts as replacement for 54 | # |version| and |release|, also used in various other places throughout the 55 | # built documents. 56 | # 57 | # The short X.Y version. 58 | version = u'0' 59 | # The full version, including alpha/beta/rc tags. 60 | release = u'0' 61 | 62 | # The language for content autogenerated by Sphinx. Refer to documentation 63 | # for a list of supported languages. 64 | # 65 | # This is also used if you do content translation via gettext catalogs. 66 | # Usually you set "language" from the command line for these cases. 67 | language = None 68 | 69 | # List of patterns, relative to source directory, that match files and 70 | # directories to ignore when looking for source files. 71 | # This patterns also effect to html_static_path and html_extra_path 72 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 73 | 74 | # The name of the Pygments (syntax highlighting) style to use. 75 | pygments_style = 'sphinx' 76 | 77 | # If true, `todo` and `todoList` produce output, else they produce nothing. 78 | todo_include_todos = False 79 | 80 | 81 | # -- Options for HTML output ---------------------------------------------- 82 | 83 | # The theme to use for HTML and HTML Help pages. See the documentation for 84 | # a list of builtin themes. 85 | # 86 | html_theme = 'sphinx_rtd_theme' 87 | 88 | # Theme options are theme-specific and customize the look and feel of a theme 89 | # further. For a list of options available for each theme, see the 90 | # documentation. 91 | # 92 | # html_theme_options = {} 93 | 94 | # Add any paths that contain custom static files (such as style sheets) here, 95 | # relative to this directory. They are copied after the builtin static files, 96 | # so a file named "default.css" will overwrite the builtin "default.css". 97 | html_static_path = ['_static'] 98 | 99 | # Custom sidebar templates, must be a dictionary that maps document names 100 | # to template names. 101 | # 102 | # This is required for the alabaster theme 103 | # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars 104 | html_sidebars = { 105 | '**': [ 106 | 'about.html', 107 | 'navigation.html', 108 | 'relations.html', # needs 'show_related': True theme option to display 109 | 'searchbox.html', 110 | 'donate.html', 111 | ] 112 | } 113 | 114 | 115 | # -- Options for HTMLHelp output ------------------------------------------ 116 | 117 | # Output file base name for HTML help builder. 118 | htmlhelp_basename = 'Lachesisdoc' 119 | 120 | 121 | # -- Options for LaTeX output --------------------------------------------- 122 | 123 | latex_elements = { 124 | # The paper size ('letterpaper' or 'a4paper'). 125 | # 126 | # 'papersize': 'letterpaper', 127 | 128 | # The font size ('10pt', '11pt' or '12pt'). 129 | # 130 | # 'pointsize': '10pt', 131 | 132 | # Additional stuff for the LaTeX preamble. 133 | # 134 | # 'preamble': '', 135 | 136 | # Latex figure (float) alignment 137 | # 138 | # 'figure_align': 'htbp', 139 | } 140 | 141 | # Grouping the document tree into LaTeX files. List of tuples 142 | # (source start file, target name, title, 143 | # author, documentclass [howto, manual, or own class]). 144 | latex_documents = [ 145 | (master_doc, 'Lachesis.tex', u'Lachesis Documentation', 146 | u'Mosaic Networks', 'manual'), 147 | ] 148 | 149 | 150 | # -- Options for manual page output --------------------------------------- 151 | 152 | # One entry per manual page. List of tuples 153 | # (source start file, name, description, authors, manual section). 154 | man_pages = [ 155 | (master_doc, 'lachesis', u'Lachesis Documentation', 156 | [author], 1) 157 | ] 158 | 159 | 160 | # -- Options for Texinfo output ------------------------------------------- 161 | 162 | # Grouping the document tree into Texinfo files. List of tuples 163 | # (source start file, target name, title, author, 164 | # dir menu entry, description, category) 165 | texinfo_documents = [ 166 | (master_doc, 'Lachesis', u'Lachesis Documentation', 167 | author, 'Mosaic Networks ltd', 'Modular Blockchain Software', 168 | 'Miscellaneous'), 169 | ] 170 | 171 | 172 | 173 | -------------------------------------------------------------------------------- /docs/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e # Exit with nonzero exit code if anything fails 3 | 4 | SOURCE_BRANCH="master" 5 | TARGET_BRANCH="gh-pages" 6 | 7 | REPO=$(git config remote.origin.url) 8 | SHA=$(git rev-parse --verify HEAD) 9 | HTTPS_REPO=${REPO/https:\/\/github.com\//https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com/} 10 | OUT_DIR="cloned-gh-pages" 11 | 12 | echo "Repo: " ${REPO} 13 | echo "HTTPS_REPO: " ${HTTPS_REPO} 14 | 15 | # Pull requests and commits to other branches shouldn't try to deploy, just build to verify 16 | if [[ ${TRAVIS_PULL_REQUEST} != "false" || ${TRAVIS_BRANCH} != ${SOURCE_BRANCH} ]]; then 17 | echo "Skipping deploy; just doing a build." 18 | exit 0 19 | fi 20 | 21 | # Clone the existing gh-pages for this repo into gh-pages/ 22 | # Create a new empty branch if gh-pages doesn't exist yet (should only happen on first deploy) 23 | git clone ${HTTPS_REPO} ${OUT_DIR} 24 | 25 | echo "Entering gh-pages output folder '${OUT_DIR}'" 26 | cd ${OUT_DIR} 27 | git checkout ${TARGET_BRANCH} || git checkout --orphan ${TARGET_BRANCH} 28 | 29 | # Clean out existing contents 30 | git rm -rf . || exit 0 31 | 32 | echo "currently in dir: " $(pwd) 33 | ls -l 34 | git status 35 | 36 | SPHINX_BUILD_HTML=../_build/html 37 | echo "Copying Sphinx html from " 38 | ls -l ${SPHINX_BUILD_HTML} 39 | cp -R ${SPHINX_BUILD_HTML}/* . 40 | 41 | echo "gh-pages contents now looks like:" 42 | ls -la 43 | 44 | git config user.name "Travis CI" 45 | git config user.email "travis@travis-ci.org" 46 | 47 | echo "adding .nojekyll file" 48 | touch .nojekyll 49 | 50 | git add . 51 | git status 52 | 53 | # Commit and push changes using $GITHUB_TOKEN 54 | git commit -m "Deploy to Github Pages from commit: ${SHA}" 55 | git status 56 | echo "Pushing changes to ${HTTPS_REPO} ${TARGET_BRANCH} from dir $(pwd)" 57 | git push --set-upstream origin ${TARGET_BRANCH} 58 | 59 | echo "Done updating gh-pages" 60 | -------------------------------------------------------------------------------- /docs/extracted-comments.rst: -------------------------------------------------------------------------------- 1 | Extracted Comments 2 | ------------------ 3 | 4 | .. toctree:: 5 | :glob: 6 | 7 | extracted-comments/* 8 | -------------------------------------------------------------------------------- /docs/extracted-comments/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory, files are generated from code comments 2 | * 3 | # Except this file 4 | !.gitignore 5 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Lachesis documentation master file, created by 2 | sphinx-quickstart on Wed Oct 18 09:22:15 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. _index: 7 | 8 | Welcome to Lachesis-rs's documentation! 9 | ======================================= 10 | 11 | Lachesis-rs 12 | ----------- 13 | 14 | .. toctree:: 15 | :maxdepth: 2 16 | 17 | introduction.rst 18 | extracted-comments.rst 19 | -------------------------------------------------------------------------------- /docs/introduction.rst: -------------------------------------------------------------------------------- 1 | .. _introduction: 2 | 3 | Introduction 4 | ============ 5 | 6 | What is Lachesis? 7 | ----------------- 8 | 9 | Lachesis is an open-source software component intended for developers who want to 10 | build peer-to-peer (p2p) applications, mobile or other, without having to 11 | implement their own p2p networking layer from scratch. Under the hood, it 12 | enables many computers to behave as one; a technique known as state machine 13 | replication. 14 | 15 | Lachesis is designed to easily plug into applications written in any programming 16 | language. Developers can focus on building the application logic and simply 17 | integrate with Lachesis to handle the replication aspect. Basically, Lachesis will 18 | connect to other Lachesis nodes and guarantee that everyone processes the same 19 | commands in the same order. To do this, it uses p2p networking and a Byzantine 20 | Fault Tolerant (BFT) consensus algorithm. 21 | 22 | Lachesis is: 23 | 24 | - **Asynchronous**: 25 | Participants have the freedom to process commands at different times. 26 | - **Leaderless**: 27 | No participant plays a 'special' role. 28 | - **Byzantine Fault-Tolerant**: 29 | Supports one third of faulty nodes, including malicious behavior. 30 | - **Final**: 31 | Lachesis's output can be used immediately, no need for block confirmations, 32 | etc. -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx-autobuild 3 | recommonmark 4 | sphinx_rtd_theme 5 | -------------------------------------------------------------------------------- /dummy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dummy" 3 | version = "0.1.0" 4 | authors = ["Agustin Chiappe Berrini "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | env_logger = "0.6.0" 9 | failure = "0.1.3" 10 | lachesis-rs = { path = "../lachesis-rs" } 11 | log = "0.4" 12 | rand = "0.6.0" 13 | ring = "0.13.4" 14 | untrusted = "0.6.2" 15 | -------------------------------------------------------------------------------- /dummy/src/dummy_node.rs: -------------------------------------------------------------------------------- 1 | use failure::Error; 2 | use lachesis_rs::{BTreeHashgraph, EventHash, HashgraphWire, Node, Peer, PeerId, Swirlds}; 3 | use ring::rand::SystemRandom; 4 | use ring::signature; 5 | 6 | fn create_node(rng: &mut SystemRandom) -> Result, Error> { 7 | let hashgraph = BTreeHashgraph::new(); 8 | let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(rng) 9 | .map_err(|e| Error::from_boxed_compat(Box::new(e)))?; 10 | let kp = signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)) 11 | .map_err(|e| Error::from_boxed_compat(Box::new(e)))?; 12 | Swirlds::new(kp, hashgraph) 13 | } 14 | 15 | pub struct DummyNode { 16 | id: PeerId, 17 | pub node: Swirlds, 18 | } 19 | 20 | impl DummyNode { 21 | pub fn new(rng: &mut SystemRandom) -> Result { 22 | match create_node(rng) { 23 | Ok(node) => Ok(DummyNode { 24 | id: node.get_id(), 25 | node, 26 | }), 27 | Err(e) => Err(e), 28 | } 29 | } 30 | } 31 | 32 | impl Peer for DummyNode { 33 | fn get_sync( 34 | &self, 35 | _pk: PeerId, 36 | _h: Option<&BTreeHashgraph>, 37 | ) -> Result<(EventHash, BTreeHashgraph), Error> { 38 | let (eh, wire): (EventHash, HashgraphWire) = self.node.respond_message(None)?; 39 | let hashgraph = BTreeHashgraph::from(wire); 40 | Ok((eh, hashgraph)) 41 | } 42 | fn address(&self) -> String { 43 | String::from(String::from_utf8_lossy(&self.id)) 44 | } 45 | fn id(&self) -> &PeerId { 46 | &self.id 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /dummy/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(never_type)] 2 | 3 | #[macro_use] 4 | extern crate log; 5 | 6 | mod dummy_node; 7 | 8 | use std::env::args; 9 | use std::sync::Arc; 10 | use std::thread; 11 | use std::time::Duration; 12 | 13 | use failure::Error; 14 | use rand; 15 | 16 | use self::dummy_node::DummyNode; 17 | use lachesis_rs::Node; 18 | 19 | const USAGE: &'static str = "Usage: dummy [number of nodes]"; 20 | 21 | fn create_node(rng: &mut ring::rand::SystemRandom) -> Result { 22 | DummyNode::new(rng) 23 | } 24 | 25 | fn spawn_node(node: &Arc) -> (thread::JoinHandle, thread::JoinHandle) { 26 | let answer_thread_node = node.clone(); 27 | let sync_thread_node = node.clone(); 28 | let answer_handler = thread::spawn(move || loop { 29 | answer_thread_node.node.respond_message(None).unwrap(); 30 | thread::sleep(Duration::from_millis(100)); 31 | }); 32 | let sync_handle = thread::spawn(move || { 33 | let mut rng = rand::thread_rng(); 34 | let mut counter = 0usize; 35 | let node_id = sync_thread_node.node.get_id(); 36 | loop { 37 | if counter % 100 == 0 { 38 | let head = sync_thread_node.node.get_head().unwrap(); 39 | let (n_rounds, n_events) = sync_thread_node.node.get_stats().unwrap(); 40 | info!( 41 | "Node {:?}: Head {:?} Rounds {:?} Pending events {:?}", 42 | node_id, head, n_rounds, n_events 43 | ); 44 | } 45 | match sync_thread_node.node.run(&mut rng) { 46 | Ok(_) => {} 47 | Err(e) => panic!("Error! {}", e), 48 | }; 49 | counter += 1; 50 | thread::sleep(Duration::from_millis(100)); 51 | } 52 | }); 53 | (answer_handler, sync_handle) 54 | } 55 | 56 | fn main() { 57 | env_logger::init(); 58 | let args: Vec = args().collect(); 59 | if args.len() != 2 { 60 | panic!(USAGE); 61 | } 62 | let mut rng = ring::rand::SystemRandom::new(); 63 | let n_nodes = args[1].parse::().unwrap(); 64 | let mut nodes = Vec::with_capacity(n_nodes); 65 | for _ in 0..n_nodes { 66 | nodes.push(Arc::new(create_node(&mut rng).unwrap())); 67 | } 68 | let mut handles = Vec::with_capacity(n_nodes * 2); 69 | 70 | for node in nodes.iter() { 71 | for peer in nodes.iter() { 72 | if peer.node.get_id() != node.node.get_id() { 73 | node.node.add_node(peer.clone()).unwrap(); 74 | } 75 | } 76 | } 77 | 78 | for node in nodes.iter() { 79 | let (handle1, handle2) = spawn_node(node); 80 | handles.push(handle1); 81 | handles.push(handle2); 82 | } 83 | for handle in handles { 84 | handle.join().unwrap(); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /fal/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fal" 3 | version = "0.1.0" 4 | authors = ["rishflab "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | failure = "0.1.5" 9 | -------------------------------------------------------------------------------- /fal/README.md: -------------------------------------------------------------------------------- 1 | # Fantom Abstraction Layer 2 | 3 | An abstration layer for consensus algorithms, virtual machines, nodes and clients that 4 | form the Fantom Framework. -------------------------------------------------------------------------------- /fal/src/client.rs: -------------------------------------------------------------------------------- 1 | use crate::transaction::{Transaction, TransactionHash, TransactionStatus}; 2 | use crate::transport::{Message, Transport, TransportError}; 3 | 4 | pub trait Client, W: Message, X: TransportError> { 5 | fn submit_transaction(tx_hash: TransactionHash, tx: T) -> TransactionStatus; 6 | fn check_transaction_status(tx_hash: TransactionHash) -> TransactionStatus; 7 | } 8 | -------------------------------------------------------------------------------- /fal/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | pub mod node; 3 | pub mod state; 4 | pub mod transaction; 5 | pub mod transport; 6 | pub mod virtual_machine; 7 | -------------------------------------------------------------------------------- /fal/src/node.rs: -------------------------------------------------------------------------------- 1 | use crate::transaction::{AbsoluteOrdering, Transaction, TransactionHash}; 2 | 3 | pub trait Node { 4 | fn get_transaction_by_hash(tx_hash: TransactionHash) -> T; 5 | fn get_transaction_by_order(tx_order: AbsoluteOrdering) -> T; 6 | } 7 | -------------------------------------------------------------------------------- /fal/src/state.rs: -------------------------------------------------------------------------------- 1 | pub trait State {} 2 | 3 | pub trait Program {} 4 | -------------------------------------------------------------------------------- /fal/src/transaction.rs: -------------------------------------------------------------------------------- 1 | pub enum TransactionStatus { 2 | Pending, 3 | Failed, 4 | Complete, 5 | } 6 | 7 | pub trait Transaction { 8 | fn get_absolute_ordering() -> AbsoluteOrdering; 9 | } 10 | 11 | pub type AbsoluteOrdering = u64; 12 | 13 | pub type TransactionHash = [u8; 32]; 14 | -------------------------------------------------------------------------------- /fal/src/transport.rs: -------------------------------------------------------------------------------- 1 | pub trait Transport { 2 | fn send_message(msg: T) -> Result; 3 | } 4 | 5 | pub trait Message {} 6 | 7 | pub trait TransportError {} 8 | -------------------------------------------------------------------------------- /fal/src/virtual_machine.rs: -------------------------------------------------------------------------------- 1 | use crate::state::{Program, State}; 2 | 3 | pub trait VirtualMachine { 4 | fn transition(state: T, program: U) -> U; 5 | } 6 | -------------------------------------------------------------------------------- /kvdb/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kvdb" 3 | version = "0.1.0" 4 | authors = ["Agustin Chiappe Berrini "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | bincode = "1.0.1" 9 | configure = "0.1.1" 10 | env_logger = "0.6.0" 11 | failure = "0.1.3" 12 | lachesis-rs = { path = "../lachesis-rs" } 13 | ring = "0.13.4" 14 | serde = "1.0.80" 15 | serde_derive = "1.0.80" 16 | -------------------------------------------------------------------------------- /kvdb/src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate serde_derive; 3 | 4 | use bincode::{deserialize, serialize}; 5 | use configure::Configure; 6 | use failure::{Error, Fail}; 7 | use lachesis_rs::tcp_server::{TcpApp, TcpNode, TcpPeer}; 8 | use lachesis_rs::{BTreeHashgraph, Node, Swirlds}; 9 | use std::collections::HashMap; 10 | use std::io::{Read, Write}; 11 | use std::net::TcpListener; 12 | use std::str::FromStr; 13 | use std::sync::{Arc, Mutex}; 14 | use std::thread::{sleep, spawn, JoinHandle}; 15 | use std::time::Duration; 16 | 17 | #[derive(Debug, Fail)] 18 | enum KvdbError { 19 | #[fail(display = "Wrong address {}", addr)] 20 | WrongAddressFormat { addr: String }, 21 | } 22 | 23 | #[derive(Configure, Deserialize)] 24 | #[serde(default)] 25 | struct Config { 26 | lachesis_port: usize, 27 | peer_hosts: String, 28 | peer_ids: String, 29 | server_port: usize, 30 | } 31 | 32 | impl Default for Config { 33 | fn default() -> Config { 34 | Config { 35 | lachesis_port: 9000, 36 | peer_ids: String::from(""), 37 | peer_hosts: String::from(""), 38 | server_port: 8080, 39 | } 40 | } 41 | } 42 | 43 | #[derive(Deserialize)] 44 | enum ServerMessage { 45 | Get(String), 46 | Put(String, String), 47 | Delete(String), 48 | } 49 | 50 | #[derive(Serialize)] 51 | enum ServerResponse { 52 | GetResponse(Option), 53 | DeleteResponse(String), 54 | PutResponse(Option), 55 | } 56 | 57 | struct Server { 58 | db: Arc>>, 59 | node: Arc>>, 60 | port: usize, 61 | } 62 | 63 | impl Server { 64 | fn new(port: usize, node: Arc>>) -> Server { 65 | Server { 66 | db: Arc::new(Mutex::new(HashMap::new())), 67 | node, 68 | port, 69 | } 70 | } 71 | 72 | fn run(self) -> (JoinHandle<()>, JoinHandle<()>) { 73 | let server = self.get_server_handle(); 74 | let node = self.node.clone(); 75 | let db_mutex = self.db.clone(); 76 | let queue_consumer = spawn(move || { 77 | let next_to_process = 0; 78 | loop { 79 | let events = node.node.get_ordered_events().unwrap(); 80 | let transactions: Vec> = events.iter().flat_map(|e| e.payload()).collect(); 81 | if transactions.len() > next_to_process { 82 | for i in next_to_process..transactions.len() - 1 { 83 | let transaction = &transactions[i]; 84 | match deserialize(transaction).unwrap() { 85 | ServerMessage::Put(id, value) => { 86 | db_mutex.lock().unwrap().insert(id, value); 87 | } 88 | ServerMessage::Delete(id) => { 89 | db_mutex.lock().unwrap().remove(&id); 90 | } 91 | _ => {} 92 | }; 93 | } 94 | } 95 | sleep(Duration::from_millis(100)); 96 | } 97 | }); 98 | (server, queue_consumer) 99 | } 100 | 101 | fn get_server_handle(&self) -> JoinHandle<()> { 102 | let port = self.port; 103 | let node = self.node.clone(); 104 | let db_mutex = self.db.clone(); 105 | spawn(move || { 106 | let address = format!("0.0.0.0:{}", port); 107 | let listener = TcpListener::bind(address).unwrap(); 108 | for stream_result in listener.incoming() { 109 | let mut stream = stream_result.unwrap(); 110 | let mut content = Vec::new(); 111 | stream.read_to_end(&mut content).unwrap(); 112 | match deserialize(&content).unwrap() { 113 | ServerMessage::Get(id) => { 114 | let v = db_mutex.lock().unwrap().get(&id).map(|v| v.clone()); 115 | let response = serialize(&ServerResponse::GetResponse(v)).unwrap(); 116 | stream.write(&response).unwrap(); 117 | } 118 | ServerMessage::Delete(id) => { 119 | let response = serialize(&ServerResponse::DeleteResponse(id)).unwrap(); 120 | stream.write(&response).unwrap(); 121 | node.node.add_transaction(content).unwrap(); 122 | } 123 | ServerMessage::Put(id, _) => { 124 | let prev = db_mutex.lock().unwrap().get(&id).map(|v| v.clone()); 125 | let response = serialize(&ServerResponse::PutResponse(prev)).unwrap(); 126 | stream.write(&response).unwrap(); 127 | node.node.add_transaction(content).unwrap(); 128 | } 129 | } 130 | } 131 | }) 132 | } 133 | } 134 | 135 | fn parse_peer(input: String) -> Result<(String, usize), Error> { 136 | let elements: Vec = input.clone().split(':').map(|s| s.to_string()).collect(); 137 | if elements.len() == 2 { 138 | Ok((elements[0].clone(), usize::from_str(&elements[1])?)) 139 | } else { 140 | Err(Error::from(KvdbError::WrongAddressFormat { addr: input })) 141 | } 142 | } 143 | 144 | fn parse_peers(input: String) -> Result, Error> { 145 | input 146 | .split(',') 147 | .map(|ps| parse_peer(ps.to_string())) 148 | .collect() 149 | } 150 | 151 | fn main() { 152 | env_logger::init(); 153 | let config: Config = Config::generate().unwrap(); 154 | let ids: Vec = config.peer_ids.split(',').map(|s| s.to_string()).collect(); 155 | let peers = parse_peers(config.peer_hosts).unwrap(); 156 | if peers.len() != ids.len() { 157 | panic!("Number of peer ids mismatches number of peer addresses"); 158 | } 159 | let peers: Vec = ids 160 | .iter() 161 | .zip(peers.iter()) 162 | .map(|(id, (a, p))| TcpPeer { 163 | address: format!("{}:{}", a, p), 164 | id: id.as_bytes().to_vec(), 165 | }) 166 | .collect(); 167 | let mut rng = ring::rand::SystemRandom::new(); 168 | let local_address = format!("0.0.0.0:{}", config.lachesis_port); 169 | let node = Arc::new(TcpNode::new(&mut rng, local_address).unwrap()); 170 | for peer in peers.iter() { 171 | node.node.add_node(Arc::new(peer.clone())).unwrap(); 172 | } 173 | let app = TcpApp::new(node.clone()); 174 | let server = Server::new(config.server_port, node.clone()); 175 | let (handle1, handle2) = app.run().unwrap(); 176 | let (server_handle1, server_handle2) = server.run(); 177 | handle1.join().unwrap(); 178 | handle2.join().unwrap(); 179 | server_handle1.join().unwrap(); 180 | server_handle2.join().unwrap(); 181 | } 182 | -------------------------------------------------------------------------------- /lachesis-rs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lachesis-rs" 3 | version = "0.1.0" 4 | authors = ["Agustin Chiappe Berrini , Rishab Sharma "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | base64 = "0.10.0" 9 | bincode = "1.0.1" 10 | failure = "0.1.5" 11 | log = "0.4" 12 | proptest = "0.8.7" 13 | rand = "0.6.0" 14 | ring = "0.13.4" 15 | untrusted = "0.6.2" 16 | env_logger = "0.6.0" 17 | actix = "0.7.9" 18 | actix-web = "0.7.18" 19 | tokio-io = "*" 20 | tokio-tcp = "*" 21 | byteorder = "*" 22 | bytes = "*" 23 | serde_json = "*" 24 | serde = "1.0.80" 25 | serde_derive = "1.0.80" 26 | futures = "*" 27 | json = "*" -------------------------------------------------------------------------------- /lachesis-rs/proptest-regressions/event/event.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | xs 573141862 2244303776 2464086936 57866245 # shrinks to tx1 = "", tx2 = "\u{b}" 8 | -------------------------------------------------------------------------------- /lachesis-rs/src/bin/lachesis_server.rs: -------------------------------------------------------------------------------- 1 | use lachesis_rs::Server; 2 | 3 | /** 4 | * Main lachesis-rs entrypoint. Starts HTTP server. 5 | */ 6 | fn main() { 7 | std::env::set_var("RUST_LOG", "actix_web=info"); 8 | env_logger::init(); 9 | 10 | let sys = actix::System::new("heartbeat-example"); 11 | 12 | let host = "127.0.0.1:8080"; 13 | Server::init().bind(host).unwrap().start(); 14 | 15 | println!("Started http server: {}", host); 16 | let _ = sys.run(); 17 | } 18 | -------------------------------------------------------------------------------- /lachesis-rs/src/bin/lachesis_tcp.rs: -------------------------------------------------------------------------------- 1 | extern crate lachesis_rs; 2 | 3 | use lachesis_rs::tcp_server::{TcpApp, TcpNode, TcpPeer}; 4 | use std::env::args; 5 | use std::sync::Arc; 6 | 7 | const BASE_PORT: usize = 9000; 8 | const USAGE: &'static str = "Usage: tcp-client [number of nodes] [consensus-algorithm]"; 9 | 10 | /** 11 | * Main lachesis-rs TCP client entrypoint. Starts multiple TCP node peers. 12 | */ 13 | fn main() { 14 | env_logger::init(); 15 | let args: Vec = args().collect(); 16 | if args.len() != 3 { 17 | panic!(USAGE); 18 | } 19 | let mut rng = ring::rand::SystemRandom::new(); 20 | let n_nodes = args[1].parse::().unwrap(); 21 | let algorithm = args[2].clone(); 22 | let mut nodes = Vec::with_capacity(n_nodes); 23 | let mut peers = Vec::with_capacity(n_nodes); 24 | for i in 0..n_nodes { 25 | let a = format!("0.0.0.0:{}", BASE_PORT + i); 26 | let node = TcpNode::new(&mut rng, a.clone()).unwrap(); 27 | peers.push(TcpPeer { 28 | address: a, 29 | id: node.node.get_id().clone(), 30 | }); 31 | nodes.push(Arc::new(node)); 32 | } 33 | for node in nodes.iter() { 34 | for peer in peers.iter() { 35 | if peer.id.clone() != node.node.get_id() { 36 | node.node.add_node(Arc::new(peer.clone())).unwrap(); 37 | } 38 | } 39 | } 40 | let mut handles = Vec::with_capacity(n_nodes * 2); 41 | for node in nodes { 42 | let app = TcpApp::new(node.clone()); 43 | let (handle1, handle2) = app.run().unwrap(); 44 | handles.push(handle1); 45 | handles.push(handle2); 46 | } 47 | for handle in handles { 48 | handle.join().unwrap(); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /lachesis-rs/src/bin/ws_client.rs: -------------------------------------------------------------------------------- 1 | use std::{io, thread}; 2 | 3 | use actix::*; 4 | 5 | #[macro_use] 6 | extern crate log; 7 | 8 | use actix_web::ws::{Client, ClientWriter, Message, ProtocolError}; 9 | use futures::Future; 10 | 11 | use lachesis_rs::InternodeMessage; 12 | 13 | /** 14 | * Main lachesis-rs WebSocket client entrypoint. Starts client and connects to server. 15 | */ 16 | fn main() { 17 | ::std::env::set_var("RUST_LOG", "actix_web=info"); 18 | let _ = env_logger::init(); 19 | 20 | let sys = actix::System::new("ws-client"); 21 | 22 | Arbiter::spawn( 23 | Client::new("http://127.0.0.1:8080/ws") 24 | .connect() 25 | .map_err(|e| { 26 | error!("Error: {}", e); 27 | () 28 | }) 29 | .map(|(reader, writer)| { 30 | let addr = InternodeClient::create(|ctx| { 31 | InternodeClient::add_stream(reader, ctx); 32 | InternodeClient(writer) 33 | }); 34 | 35 | thread::spawn(move || loop { 36 | let mut cmd = String::new(); 37 | if io::stdin().read_line(&mut cmd).is_err() { 38 | error!("error"); 39 | } 40 | addr.do_send(InternodeMessage::SyncRequest) 41 | }); 42 | 43 | () 44 | }), 45 | ); 46 | 47 | let _ = sys.run(); 48 | } 49 | 50 | struct InternodeClient(ClientWriter); 51 | 52 | /** 53 | * Implement Actor for InternodeClient to start and stop 54 | */ 55 | impl Actor for InternodeClient { 56 | type Context = Context; 57 | 58 | /** 59 | * Start connection 60 | */ 61 | fn started(&mut self, _ctx: &mut Context) { 62 | info!("Connected"); 63 | } 64 | 65 | /** 66 | * Stop connection 67 | */ 68 | fn stopped(&mut self, _: &mut Context) { 69 | info!("Disconnected"); 70 | 71 | System::current().stop(); 72 | } 73 | } 74 | 75 | impl Handler for InternodeClient { 76 | type Result = (); 77 | 78 | fn handle(&mut self, msg: InternodeMessage, _ctx: &mut Context) { 79 | info!("Handling message"); 80 | self.0.binary(msg) 81 | } 82 | } 83 | 84 | impl StreamHandler for InternodeClient { 85 | fn handle(&mut self, msg: Message, _ctx: &mut Context) { 86 | match msg { 87 | Message::Binary(bin) => println!("Server: {:?}", bin), 88 | Message::Text(text) => println!("Server: {:?}", text), 89 | _ => (), 90 | } 91 | } 92 | 93 | fn started(&mut self, _ctx: &mut Context) { 94 | println!("Connected"); 95 | } 96 | 97 | fn finished(&mut self, ctx: &mut Context) { 98 | info!("Server disconnected"); 99 | ctx.stop() 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /lachesis-rs/src/errors.rs: -------------------------------------------------------------------------------- 1 | use crate::event::event_hash::EventHash; 2 | use crate::failure::Backtrace; 3 | use crate::peer::PeerId; 4 | use crate::printable_hash::PrintableHash; 5 | 6 | use std::fmt; 7 | use std::sync::PoisonError; 8 | 9 | #[derive(Debug, Fail)] 10 | pub enum ParentsError { 11 | #[fail(display = "Parents are empty")] 12 | EmptyParents, 13 | } 14 | 15 | #[derive(Debug)] 16 | pub(crate) enum NodeErrorType { 17 | PeerNotFound(PeerId), 18 | EmptyNetwork, 19 | NoHead, 20 | } 21 | 22 | impl fmt::Display for NodeErrorType { 23 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 24 | let msg = match self { 25 | NodeErrorType::EmptyNetwork => String::from("The node network it's empty"), 26 | NodeErrorType::NoHead => String::from("The node has no head"), 27 | NodeErrorType::PeerNotFound(p) => format!("Peer {} not found", p.printable_hash()), 28 | }; 29 | write!(f, "{}", msg) 30 | } 31 | } 32 | 33 | #[derive(Debug, Fail)] 34 | #[fail( 35 | display = "Node failed with error: {}\nTraceback: {}", 36 | error_type, backtrace 37 | )] 38 | pub(crate) struct NodeError { 39 | backtrace: Backtrace, 40 | error_type: NodeErrorType, 41 | } 42 | 43 | impl NodeError { 44 | pub(crate) fn new(error_type: NodeErrorType) -> NodeError { 45 | NodeError { 46 | backtrace: Backtrace::new(), 47 | error_type, 48 | } 49 | } 50 | } 51 | 52 | #[derive(Debug, Fail)] 53 | pub(crate) enum EventErrorType { 54 | UnsignedEvent { hash: EventHash }, 55 | RoundNotSet { hash: EventHash }, 56 | NoSelfParent { hash: EventHash }, 57 | NoParents { hash: EventHash }, 58 | NoSignature { hash: EventHash }, 59 | NoTimestamp { hash: EventHash }, 60 | } 61 | 62 | impl fmt::Display for EventErrorType { 63 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 64 | let msg = match self { 65 | EventErrorType::UnsignedEvent { hash } => { 66 | format!("The event {} it's unsigned", hash.printable_hash()) 67 | } 68 | EventErrorType::RoundNotSet { hash } => { 69 | format!("The event {} round isn't set", hash.printable_hash()) 70 | } 71 | EventErrorType::NoSelfParent { hash } => { 72 | format!("The event {} self parent isn't set", hash.printable_hash()) 73 | } 74 | EventErrorType::NoParents { hash } => { 75 | format!("The event {} parents aren't set", hash.printable_hash()) 76 | } 77 | EventErrorType::NoSignature { hash } => { 78 | format!("The event {} signature isn't set", hash.printable_hash()) 79 | } 80 | EventErrorType::NoTimestamp { hash } => { 81 | format!("The event {} timestamp isn't set", hash.printable_hash()) 82 | } 83 | }; 84 | write!(f, "{}", msg) 85 | } 86 | } 87 | 88 | #[derive(Debug, Fail)] 89 | #[fail( 90 | display = "Event failed with error: {}\nTraceback: {}", 91 | error_type, backtrace 92 | )] 93 | pub(crate) struct EventError { 94 | backtrace: Backtrace, 95 | error_type: EventErrorType, 96 | } 97 | 98 | impl EventError { 99 | pub(crate) fn new(error_type: EventErrorType) -> EventError { 100 | EventError { 101 | backtrace: Backtrace::new(), 102 | error_type, 103 | } 104 | } 105 | } 106 | 107 | #[derive(Debug)] 108 | pub(crate) enum HashgraphErrorType { 109 | EventNotFound, 110 | NoLamportTimeSet, 111 | } 112 | 113 | impl fmt::Display for HashgraphErrorType { 114 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 115 | let msg = match self { 116 | HashgraphErrorType::EventNotFound => "Event not found in hashgraph", 117 | HashgraphErrorType::NoLamportTimeSet => "No root has lamport timestamp set", 118 | }; 119 | write!(f, "{}", msg) 120 | } 121 | } 122 | 123 | #[derive(Debug, Fail)] 124 | #[fail( 125 | display = "Hashgraph failed with error: {}\nTraceback: {}", 126 | error_type, backtrace 127 | )] 128 | pub(crate) struct HashgraphError { 129 | backtrace: Backtrace, 130 | error_type: HashgraphErrorType, 131 | } 132 | 133 | impl HashgraphError { 134 | pub(crate) fn new(error_type: HashgraphErrorType) -> HashgraphError { 135 | HashgraphError { 136 | backtrace: Backtrace::new(), 137 | error_type, 138 | } 139 | } 140 | } 141 | 142 | #[derive(Debug, Fail)] 143 | #[fail(display = "Hashgraph Mutex was poisoned")] 144 | pub struct ResourceHashgraphPoisonError { 145 | backtrace: Backtrace, 146 | } 147 | 148 | impl ResourceHashgraphPoisonError { 149 | pub fn new() -> ResourceHashgraphPoisonError { 150 | ResourceHashgraphPoisonError { 151 | backtrace: Backtrace::new(), 152 | } 153 | } 154 | } 155 | 156 | //for op-?, "auto" type conversion 157 | impl From> for ResourceHashgraphPoisonError { 158 | fn from(_: PoisonError) -> Self { 159 | ResourceHashgraphPoisonError::new() 160 | } 161 | } 162 | 163 | #[derive(Debug, Fail)] 164 | #[fail(display = "Frames Mutex was poisoned")] 165 | pub struct ResourceFramesPoisonError { 166 | backtrace: Backtrace, 167 | } 168 | 169 | impl ResourceFramesPoisonError { 170 | pub fn new() -> ResourceFramesPoisonError { 171 | ResourceFramesPoisonError { 172 | backtrace: Backtrace::new(), 173 | } 174 | } 175 | } 176 | 177 | //for op-?, "auto" type conversion 178 | impl From> for ResourceFramesPoisonError { 179 | fn from(_: PoisonError) -> Self { 180 | ResourceFramesPoisonError::new() 181 | } 182 | } 183 | 184 | #[derive(Debug, Fail)] 185 | #[fail(display = "Head Mutex was poisoned")] 186 | pub struct ResourceHeadPoisonError { 187 | backtrace: Backtrace, 188 | } 189 | 190 | impl ResourceHeadPoisonError { 191 | pub fn new() -> ResourceHeadPoisonError { 192 | ResourceHeadPoisonError { 193 | backtrace: Backtrace::new(), 194 | } 195 | } 196 | } 197 | 198 | //for op-?, "auto" type conversion 199 | impl From> for ResourceHeadPoisonError { 200 | fn from(_: PoisonError) -> Self { 201 | ResourceHeadPoisonError::new() 202 | } 203 | } 204 | 205 | #[derive(Debug, Fail)] 206 | #[fail(display = "Network Mutex was poisoned")] 207 | pub struct ResourceNetworkPoisonError { 208 | backtrace: Backtrace, 209 | } 210 | 211 | impl ResourceNetworkPoisonError { 212 | pub fn new() -> ResourceNetworkPoisonError { 213 | ResourceNetworkPoisonError { 214 | backtrace: Backtrace::new(), 215 | } 216 | } 217 | } 218 | 219 | //for op-?, "auto" type conversion 220 | impl From> for ResourceNetworkPoisonError { 221 | fn from(_: PoisonError) -> Self { 222 | ResourceNetworkPoisonError::new() 223 | } 224 | } 225 | 226 | #[derive(Debug, Fail)] 227 | #[fail(display = "Node internal state Mutex was poisoned")] 228 | pub struct ResourceNodeInternalStatePoisonError { 229 | backtrace: Backtrace, 230 | } 231 | 232 | impl ResourceNodeInternalStatePoisonError { 233 | pub fn new() -> ResourceNodeInternalStatePoisonError { 234 | ResourceNodeInternalStatePoisonError { 235 | backtrace: Backtrace::new(), 236 | } 237 | } 238 | } 239 | 240 | //for op-?, "auto" type conversion 241 | impl From> for ResourceNodeInternalStatePoisonError { 242 | fn from(_: PoisonError) -> Self { 243 | ResourceNodeInternalStatePoisonError::new() 244 | } 245 | } 246 | -------------------------------------------------------------------------------- /lachesis-rs/src/event.rs: -------------------------------------------------------------------------------- 1 | use crate::errors::{EventError, EventErrorType}; 2 | use crate::peer::PeerId; 3 | use bincode::serialize; 4 | use failure::Error; 5 | use ring::digest::{digest, SHA256}; 6 | use serde::Serialize; 7 | use std::collections::HashMap; 8 | 9 | pub mod event_hash; 10 | pub mod event_signature; 11 | pub mod parents; 12 | 13 | use self::event_hash::EventHash; 14 | use self::event_signature::EventSignature; 15 | use self::parents::Parents; 16 | 17 | #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] 18 | pub struct Event { 19 | #[serde(skip)] 20 | can_see: HashMap, 21 | #[serde(skip)] 22 | famous: Option, 23 | payload: Vec>, 24 | parents: Option

, 25 | timestamp: Option, 26 | creator: PeerId, 27 | signature: Option, 28 | #[serde(skip)] 29 | round: Option, 30 | #[serde(skip)] 31 | round_received: Option, 32 | } 33 | 34 | impl Event

{ 35 | pub fn new(payload: Vec>, parents: Option

, creator: PeerId) -> Event

{ 36 | Event { 37 | can_see: HashMap::new(), 38 | creator, 39 | famous: None, 40 | payload, 41 | parents, 42 | round: None, 43 | round_received: None, 44 | signature: None, 45 | timestamp: None, 46 | } 47 | } 48 | 49 | #[inline] 50 | pub fn set_timestamp(&mut self, timestamp: u64) { 51 | self.timestamp = Some(timestamp); 52 | } 53 | 54 | #[inline] 55 | pub fn timestamp(&self) -> Result { 56 | self.timestamp 57 | .clone() 58 | .ok_or(Error::from(EventError::new(EventErrorType::NoTimestamp { 59 | hash: self.hash()?, 60 | }))) 61 | } 62 | 63 | #[inline] 64 | pub fn set_round_received(&mut self, round_received: usize) { 65 | self.round_received = Some(round_received); 66 | } 67 | 68 | #[inline] 69 | pub fn is_self_parent(&self, hash: &EventHash) -> Result { 70 | let mut error: Option = None; 71 | let r = self 72 | .parents 73 | .clone() 74 | .map(|p| match p.self_parent() { 75 | Ok(self_parent) => self_parent == hash.clone(), 76 | Err(e) => { 77 | error = Some(e); 78 | false 79 | } 80 | }) 81 | .unwrap_or(false); 82 | if error.is_some() { 83 | return Err(error.unwrap()); 84 | } 85 | Ok(r) 86 | } 87 | 88 | #[inline] 89 | pub fn signature(&self) -> Result { 90 | self.signature 91 | .clone() 92 | .ok_or(Error::from(EventError::new(EventErrorType::NoSignature { 93 | hash: self.hash()?, 94 | }))) 95 | } 96 | 97 | #[inline] 98 | pub fn payload(&self) -> Vec> { 99 | self.payload.clone() 100 | } 101 | 102 | #[inline] 103 | pub fn famous(&mut self, famous: bool) { 104 | self.famous = Some(famous) 105 | } 106 | 107 | #[inline] 108 | pub fn is_famous(&self) -> bool { 109 | self.famous.unwrap_or(false) 110 | } 111 | 112 | #[inline] 113 | pub fn is_undefined(&self) -> bool { 114 | self.famous.is_none() 115 | } 116 | 117 | #[inline] 118 | pub fn can_see(&self) -> &HashMap { 119 | &self.can_see 120 | } 121 | 122 | #[inline] 123 | pub fn set_can_see(&mut self, can_see: HashMap) { 124 | self.can_see = can_see; 125 | } 126 | 127 | #[inline] 128 | pub fn round(&self) -> Result { 129 | self.round 130 | .ok_or(Error::from(EventError::new(EventErrorType::RoundNotSet { 131 | hash: self.hash()?, 132 | }))) 133 | } 134 | 135 | #[inline] 136 | pub fn maybe_round(&self) -> Option { 137 | self.round.clone() 138 | } 139 | 140 | #[inline] 141 | pub fn add_can_see(&mut self, peer: PeerId, hash: EventHash) { 142 | self.can_see.insert(peer, hash); 143 | } 144 | 145 | #[inline] 146 | pub fn is_root(&self) -> bool { 147 | self.parents.is_none() 148 | } 149 | 150 | #[inline] 151 | pub fn self_parent(&self) -> Result { 152 | let mut error: Option = None; 153 | let none_error = format_err!("self_parent() returned None"); 154 | 155 | match self 156 | .parents 157 | .clone() 158 | .map(|p| match p.self_parent() { 159 | Ok(sp) => Some(sp), 160 | Err(e) => { 161 | debug!(target: "event", "{}", e); 162 | 163 | let hash: EventHash = match self.hash() { 164 | Ok(hash) => hash, 165 | Err(e) => { 166 | debug!(target: "hash", "{}", e); 167 | EventHash([0; 32]) 168 | } 169 | }; 170 | error = Some(Error::from(EventError::new(EventErrorType::NoSelfParent { 171 | hash: hash, 172 | }))); 173 | None 174 | } 175 | }) 176 | .filter(|p| p.is_some()) 177 | .unwrap() 178 | { 179 | Some(p) => Ok(p), 180 | None => Err(if error.is_some() { 181 | error.unwrap() 182 | } else { 183 | none_error 184 | }), 185 | } 186 | } 187 | 188 | #[inline] 189 | pub fn parents(&self) -> &Option

{ 190 | &self.parents 191 | } 192 | 193 | #[inline] 194 | pub fn creator(&self) -> &PeerId { 195 | &self.creator 196 | } 197 | 198 | pub fn sign(&mut self, signature: EventSignature) { 199 | self.signature = Some(signature); 200 | } 201 | 202 | #[inline] 203 | pub fn set_round(&mut self, round: usize) { 204 | self.round = Some(round); 205 | } 206 | 207 | pub fn hash(&self) -> Result { 208 | let value = ( 209 | self.payload.clone(), 210 | self.parents.clone(), 211 | self.timestamp.clone(), 212 | self.creator.clone(), 213 | ); 214 | let bytes = serialize(&value)?; 215 | Ok(EventHash::new(digest(&SHA256, bytes.as_ref()).as_ref())) 216 | } 217 | 218 | pub fn is_valid(&self, hash: &EventHash) -> Result { 219 | self.signature 220 | .clone() 221 | .map(|s| s.verify(&self, &self.creator)) 222 | .unwrap_or(Err(Error::from(EventError::new( 223 | EventErrorType::UnsignedEvent { hash: self.hash()? }, 224 | ))))?; 225 | Ok(hash.as_ref() == self.hash()?.as_ref()) 226 | } 227 | } 228 | 229 | proptest! { 230 | #[test] 231 | fn root_event_shouldnt_have_self_parents(hash in ".*") { 232 | use crate::event::{EventHash, parents::ParentsPair}; 233 | use ring::digest::{digest, SHA256}; 234 | let event: Event = Event::new(Vec::new(), None, Vec::new()); 235 | let hash = EventHash::new(digest(&SHA256, hash.as_bytes()).as_ref()); 236 | assert!(!event.is_self_parent(&hash).unwrap()) 237 | } 238 | 239 | #[test] 240 | fn it_should_report_correctly_self_parent(self_parent_hash in ".*", p_try in ".*") { 241 | use crate::event::{EventHash, parents::ParentsPair}; 242 | use ring::digest::{digest, SHA256}; 243 | let self_parent = EventHash::new(digest(&SHA256, self_parent_hash.as_bytes()).as_ref()); 244 | let other_parent = EventHash::new(digest(&SHA256, b"fish").as_ref()); 245 | let event = Event::new(Vec::new(), Some(ParentsPair(self_parent.clone(), other_parent)), Vec::new()); 246 | let hash = EventHash::new(digest(&SHA256, p_try.as_bytes()).as_ref()); 247 | assert!(event.is_self_parent(&self_parent).unwrap()); 248 | assert_eq!(self_parent_hash == p_try, event.is_self_parent(&hash).unwrap()) 249 | } 250 | 251 | #[test] 252 | fn it_should_have_different_hashes_on_different_transactions(tx1 in "[a-z]*", tx2 in "[a-z]*") { 253 | use crate::event::parents::ParentsPair; 254 | let event1: Event = Event::new(vec![tx1.as_bytes().to_vec()], None, Vec::new()); 255 | let event2: Event = Event::new(vec![tx2.as_bytes().to_vec()], None, Vec::new()); 256 | let event3: Event = Event::new(vec![tx2.as_bytes().to_vec()], None, Vec::new()); 257 | let hash1 = event1.hash().unwrap(); 258 | let hash2 = event2.hash().unwrap(); 259 | let hash3 = event3.hash().unwrap(); 260 | assert!(hash2 == hash3); 261 | assert_eq!(tx1 == tx2, hash1 == hash2); 262 | } 263 | 264 | #[test] 265 | fn it_should_have_different_hashes_on_different_self_parents(tx1 in ".*", tx2 in ".*") { 266 | use crate::event::{EventHash, parents::ParentsPair}; 267 | use ring::digest::{digest, SHA256}; 268 | let other_parent = EventHash::new(digest(&SHA256, b"42").as_ref()); 269 | let self_parent1 = EventHash::new(digest(&SHA256, tx1.as_bytes()).as_ref()); 270 | let self_parent2 = EventHash::new(digest(&SHA256, tx2.as_bytes()).as_ref()); 271 | let self_parent3 = EventHash::new(digest(&SHA256, tx2.as_bytes()).as_ref()); 272 | let event1 = Event::new(vec![], Some(ParentsPair(self_parent1, other_parent.clone())), Vec::new()); 273 | let event2 = Event::new(vec![], Some(ParentsPair(self_parent2, other_parent.clone())), Vec::new()); 274 | let event3 = Event::new(vec![], Some(ParentsPair(self_parent3, other_parent.clone())), Vec::new()); 275 | let hash1 = event1.hash().unwrap(); 276 | let hash2 = event2.hash().unwrap(); 277 | let hash3 = event3.hash().unwrap(); 278 | assert!(hash2 == hash3); 279 | assert_eq!(tx1 == tx2, hash1 == hash2); 280 | } 281 | 282 | #[test] 283 | fn it_should_have_different_hashes_on_different_other_parents(tx1 in ".*", tx2 in ".*") { 284 | use crate::event::{EventHash, parents::ParentsPair}; 285 | use ring::digest::{digest, SHA256}; 286 | let self_parent = EventHash::new(digest(&SHA256, b"42").as_ref()); 287 | let other_parent1 = EventHash::new(digest(&SHA256, tx1.as_bytes()).as_ref()); 288 | let other_parent2 = EventHash::new(digest(&SHA256, tx2.as_bytes()).as_ref()); 289 | let other_parent3 = EventHash::new(digest(&SHA256, tx2.as_bytes()).as_ref()); 290 | let event1 = Event::new(vec![], Some(ParentsPair(self_parent.clone(), other_parent1)), Vec::new()); 291 | let event2 = Event::new(vec![], Some(ParentsPair(self_parent.clone(), other_parent2)), Vec::new()); 292 | let event3 = Event::new(vec![], Some(ParentsPair(self_parent.clone(), other_parent3)), Vec::new()); 293 | let hash1 = event1.hash().unwrap(); 294 | let hash2 = event2.hash().unwrap(); 295 | let hash3 = event3.hash().unwrap(); 296 | assert!(hash2 == hash3); 297 | assert_eq!(tx1 == tx2, hash1 == hash2); 298 | } 299 | 300 | #[test] 301 | fn it_should_have_different_hash_on_different_creators(c1 in ".*", c2 in ".*") { 302 | use crate::event::parents::ParentsPair; 303 | let event1: Event = Event::new(vec![], None, c1.as_bytes().to_vec()); 304 | let event2: Event = Event::new(vec![], None, c2.as_bytes().to_vec()); 305 | let event3: Event = Event::new(vec![], None, c2.as_bytes().to_vec()); 306 | let hash1 = event1.hash().unwrap(); 307 | let hash2 = event2.hash().unwrap(); 308 | let hash3 = event3.hash().unwrap(); 309 | assert!(hash2 == hash3); 310 | assert_eq!(c1 == c2, hash1 == hash2); 311 | } 312 | 313 | #[test] 314 | fn it_should_have_different_hash_on_different_timestamps(s1 in 0u64..10000, s2 in 0u64..10000) { 315 | use crate::event::parents::ParentsPair; 316 | let mut event1: Event = Event::new(vec![], None, Vec::new()); 317 | let mut event2: Event = Event::new(vec![], None, Vec::new()); 318 | let mut event3: Event = Event::new(vec![], None, Vec::new()); 319 | event1.set_timestamp(s1); 320 | event2.set_timestamp(s2); 321 | event3.set_timestamp(s2); 322 | let hash1 = event1.hash().unwrap(); 323 | let hash2 = event2.hash().unwrap(); 324 | let hash3 = event3.hash().unwrap(); 325 | assert!(hash2 == hash3); 326 | assert_eq!(s1 == s2, hash1 == hash2); 327 | } 328 | } 329 | 330 | #[cfg(test)] 331 | mod tests { 332 | use crate::event::{parents::ParentsPair, Event, EventHash, EventSignature}; 333 | use ring::digest::{digest, SHA256}; 334 | use ring::{rand, signature}; 335 | 336 | #[test] 337 | fn it_should_succeed_when_verifying_correct_event() { 338 | let rng = rand::SystemRandom::new(); 339 | let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap(); 340 | let kp = 341 | signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap(); 342 | let mut event: Event = 343 | Event::new(vec![], None, kp.public_key_bytes().to_vec()); 344 | let hash = event.hash().unwrap(); 345 | let sign = kp.sign(hash.as_ref()); 346 | let event_signature = EventSignature::new(sign.as_ref()); 347 | event.sign(event_signature); 348 | assert!(event.is_valid(&hash).unwrap()); 349 | } 350 | 351 | #[test] 352 | fn it_shouldnt_succeed_when_verifying_correct_event_with_wrong_hash() { 353 | let rng = rand::SystemRandom::new(); 354 | let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap(); 355 | let kp = 356 | signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap(); 357 | let mut event: Event = 358 | Event::new(vec![], None, kp.public_key_bytes().to_vec()); 359 | let hash = event.hash().unwrap(); 360 | let sign = kp.sign(hash.as_ref()); 361 | let event_signature = EventSignature::new(sign.as_ref()); 362 | let wrong_hash = EventHash::new(digest(&SHA256, b"42").as_ref()); 363 | event.sign(event_signature); 364 | assert!(!event.is_valid(&wrong_hash).unwrap()); 365 | } 366 | 367 | #[test] 368 | #[should_panic(expected = "Unspecified")] 369 | fn it_should_error_when_verifying_wrong_event() { 370 | let rng = rand::SystemRandom::new(); 371 | let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap(); 372 | let kp = 373 | signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap(); 374 | let mut event: Event = Event::new(vec![], None, vec![]); 375 | let hash = event.hash().unwrap(); 376 | let sign = kp.sign(hash.as_ref()); 377 | let event_signature = EventSignature::new(sign.as_ref()); 378 | event.sign(event_signature); 379 | assert!(!event.is_valid(&hash).unwrap()); 380 | } 381 | } 382 | -------------------------------------------------------------------------------- /lachesis-rs/src/event/event_hash.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] 2 | pub struct EventHash(pub [u8; 32]); 3 | 4 | impl EventHash { 5 | pub fn new(digest: &[u8]) -> EventHash { 6 | let mut a: [u8; 32] = [0; 32]; 7 | a.copy_from_slice(&digest[0..32]); 8 | EventHash(a) 9 | } 10 | } 11 | 12 | impl AsRef<[u8]> for EventHash { 13 | fn as_ref(&self) -> &[u8] { 14 | self.0.as_ref() 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /lachesis-rs/src/event/event_signature.rs: -------------------------------------------------------------------------------- 1 | use crate::event::parents::Parents; 2 | use crate::event::Event; 3 | use crate::peer::PeerId; 4 | use failure::Error; 5 | use ring::signature::{verify, ED25519}; 6 | use serde::{Deserialize, Deserializer, Serialize, Serializer}; 7 | use std::fmt::{self, Debug}; 8 | 9 | #[derive(Clone, Deserialize, Serialize)] 10 | pub struct EventSignature( 11 | #[serde(serialize_with = "serialize_array")] 12 | #[serde(deserialize_with = "deserialize_array")] 13 | pub [u8; 64], 14 | ); 15 | 16 | impl EventSignature { 17 | pub fn new(digest: &[u8]) -> EventSignature { 18 | let mut a: [u8; 64] = [0; 64]; 19 | a.copy_from_slice(&digest[0..64]); 20 | EventSignature(a) 21 | } 22 | pub fn verify( 23 | &self, 24 | event: &Event

, 25 | peer: &PeerId, 26 | ) -> Result<(), Error> { 27 | let public_key = untrusted::Input::from(peer.as_ref()); 28 | let hash = event.hash()?; 29 | let msg = untrusted::Input::from(hash.as_ref()); 30 | let signature = untrusted::Input::from(self.0.as_ref()); 31 | verify(&ED25519, public_key, msg, signature).map_err(|e| Error::from(e)) 32 | } 33 | } 34 | 35 | fn serialize_array(array: &[T], serializer: S) -> Result 36 | where 37 | S: Serializer, 38 | T: Serialize, 39 | { 40 | array.serialize(serializer) 41 | } 42 | 43 | fn deserialize_array<'de, D>(deserializer: D) -> Result<[u8; 64], D::Error> 44 | where 45 | D: Deserializer<'de>, 46 | { 47 | let mut result: [u8; 64] = [0; 64]; 48 | let slice: Vec = Deserialize::deserialize(deserializer)?; 49 | if slice.len() != 64 { 50 | return Err(::serde::de::Error::custom("input slice has wrong length")); 51 | } 52 | result.copy_from_slice(&slice); 53 | Ok(result) 54 | } 55 | 56 | impl AsRef<[u8]> for EventSignature { 57 | fn as_ref(&self) -> &[u8] { 58 | self.0.as_ref() 59 | } 60 | } 61 | 62 | impl Debug for EventSignature { 63 | fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 64 | self.0[..].fmt(formatter) 65 | } 66 | } 67 | 68 | impl Eq for EventSignature {} 69 | 70 | impl PartialEq for EventSignature { 71 | #[inline] 72 | fn eq(&self, other: &EventSignature) -> bool { 73 | self.0[..] == other.0[..] 74 | } 75 | #[inline] 76 | fn ne(&self, other: &EventSignature) -> bool { 77 | self.0[..] != other.0[..] 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /lachesis-rs/src/event/parents.rs: -------------------------------------------------------------------------------- 1 | use crate::event::event_hash::EventHash; 2 | use crate::hashgraph::Hashgraph; 3 | use failure::Error; 4 | use std::cmp::max; 5 | 6 | pub trait Parents { 7 | fn self_parent(&self) -> Result; 8 | } 9 | 10 | #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] 11 | pub struct ParentsPair(pub EventHash, pub EventHash); 12 | 13 | impl ParentsPair { 14 | pub fn max_round(&self, hg: H) -> Result { 15 | let other_round = hg.get(&self.1)?.round()?; 16 | let self_round = hg.get(&self.0)?.round()?; 17 | Ok(max(other_round, self_round)) 18 | } 19 | } 20 | 21 | impl Parents for ParentsPair { 22 | fn self_parent(&self) -> Result { 23 | Ok(self.0.clone()) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /lachesis-rs/src/hashgraph.rs: -------------------------------------------------------------------------------- 1 | use crate::errors::{HashgraphError, HashgraphErrorType}; 2 | use crate::event::event_hash::EventHash; 3 | use crate::event::parents::ParentsPair; 4 | use crate::event::Event; 5 | use crate::peer::PeerId; 6 | use failure::Error; 7 | use std::collections::{BTreeMap, HashMap}; 8 | use std::iter::repeat_with; 9 | 10 | #[derive(Deserialize, Serialize)] 11 | pub struct HashgraphWire(BTreeMap>); 12 | 13 | pub trait Hashgraph: Send + Sync { 14 | fn get_mut(&mut self, id: &EventHash) -> Result<&mut Event, Error>; 15 | fn get(&self, id: &EventHash) -> Result<&Event, Error>; 16 | fn insert(&mut self, hash: EventHash, event: Event); 17 | fn ancestors<'a>(&'a self, id: &'a EventHash) -> Result, Error>; 18 | fn other_ancestors<'a>(&'a self, id: &'a EventHash) -> Result, Error>; 19 | fn self_ancestors<'a>(&'a self, id: &'a EventHash) -> Result, Error>; 20 | fn higher(&self, a: &EventHash, b: &EventHash) -> Result; 21 | fn events_parents_can_see(&self, hash: &EventHash) 22 | -> Result, Error>; 23 | fn difference(&self, g: H) -> Vec; 24 | fn is_valid_event(&self, event: &Event) -> Result; 25 | fn contains_key(&self, id: &EventHash) -> bool; 26 | fn wire(&self) -> HashgraphWire; 27 | fn find_roots(&self) -> Vec; 28 | fn find_self_child(&self, eh: &EventHash) -> Result, Error>; 29 | fn get_events(&self) -> Vec; 30 | } 31 | 32 | #[derive(Clone, Debug)] 33 | pub struct BTreeHashgraph(BTreeMap>); 34 | 35 | impl BTreeHashgraph { 36 | pub fn new() -> BTreeHashgraph { 37 | BTreeHashgraph(BTreeMap::new()) 38 | } 39 | } 40 | 41 | impl From for BTreeHashgraph { 42 | fn from(v: HashgraphWire) -> Self { 43 | BTreeHashgraph(v.0) 44 | } 45 | } 46 | 47 | enum ParentPairElem { 48 | OtherParent, 49 | SelfParent, 50 | } 51 | 52 | fn _get_ancestors<'a>( 53 | graph: &'a BTreeHashgraph, 54 | id: &'a EventHash, 55 | parent_pair_elem: ParentPairElem, 56 | ) -> Result, Error> { 57 | let mut prev = Some(id); 58 | let mut error: Option = None; // Store only most recent error 59 | let v_eh: Vec<&EventHash> = repeat_with(|| { 60 | if let Some(previous) = prev { 61 | let send = Some(previous); 62 | match graph.get(previous) { 63 | Ok(event) => { 64 | prev = match event.parents() { 65 | Some(ParentsPair(self_parent, other_parent)) => { 66 | Some(match parent_pair_elem { 67 | ParentPairElem::OtherParent => other_parent, 68 | ParentPairElem::SelfParent => self_parent, 69 | }) 70 | } 71 | None => None, 72 | }; 73 | send 74 | } 75 | Err(e) => { 76 | error = Some(e); 77 | None 78 | } 79 | } 80 | } else { 81 | None 82 | } 83 | }) 84 | .take_while(|e| e.is_some()) 85 | .map(|v| v.unwrap()) // This is safe because of the `take_while` 86 | .collect(); 87 | if error.is_some() { 88 | return Err(error.unwrap()); 89 | } 90 | Ok(v_eh) 91 | } 92 | 93 | impl Hashgraph for BTreeHashgraph { 94 | fn get_mut(&mut self, id: &EventHash) -> Result<&mut Event, Error> { 95 | self.0.get_mut(id).ok_or(Error::from(HashgraphError::new( 96 | HashgraphErrorType::EventNotFound, 97 | ))) 98 | } 99 | 100 | fn get(&self, id: &EventHash) -> Result<&Event, Error> { 101 | self.0.get(id).ok_or(Error::from(HashgraphError::new( 102 | HashgraphErrorType::EventNotFound, 103 | ))) 104 | } 105 | 106 | fn insert(&mut self, hash: EventHash, event: Event) { 107 | self.0.insert(hash, event); 108 | } 109 | 110 | fn ancestors<'a>(&'a self, id: &'a EventHash) -> Result, Error> { 111 | let mut other_ancestors = self.other_ancestors(id)?; 112 | let self_ancestors = self.self_ancestors(id)?; 113 | other_ancestors.retain(|h| *h != id); 114 | other_ancestors.extend(self_ancestors.into_iter()); 115 | Ok(other_ancestors) 116 | } 117 | 118 | fn other_ancestors<'a>(&'a self, id: &'a EventHash) -> Result, Error> { 119 | _get_ancestors(self, id, ParentPairElem::OtherParent) 120 | } 121 | 122 | fn self_ancestors<'a>(&'a self, id: &'a EventHash) -> Result, Error> { 123 | _get_ancestors(self, id, ParentPairElem::SelfParent) 124 | } 125 | 126 | #[inline] 127 | fn higher(&self, a: &EventHash, b: &EventHash) -> Result { 128 | let a_self_ancestors = self.self_ancestors(a)?; 129 | if a_self_ancestors.contains(&b) { 130 | return Ok(true); 131 | } 132 | let b_self_ancestors = self.self_ancestors(b)?; 133 | if b_self_ancestors.contains(&a) { 134 | return Ok(false); 135 | } 136 | Ok(a_self_ancestors.len() > b_self_ancestors.len()) 137 | } 138 | 139 | #[inline] 140 | fn events_parents_can_see( 141 | &self, 142 | hash: &EventHash, 143 | ) -> Result, Error> { 144 | match self.get(hash)?.parents() { 145 | Some(ParentsPair(self_parent, other_parent)) => { 146 | let self_parent_event = self.get(self_parent)?; 147 | let other_parent_event = self.get(other_parent)?; 148 | let mut result = HashMap::new(); 149 | for (k, v) in self_parent_event.can_see().into_iter() { 150 | result.insert(k.clone(), v.clone()); 151 | } 152 | for (k, other) in other_parent_event.can_see().into_iter() { 153 | if result.contains_key(k) { 154 | let value = (&result[k]).clone(); 155 | if self.higher(other, &value)? { 156 | result.insert(k.clone(), other.clone()); 157 | } 158 | } else { 159 | result.insert(k.clone(), other.clone()); 160 | } 161 | } 162 | Ok(result) 163 | } 164 | None => Ok(HashMap::new()), 165 | } 166 | } 167 | 168 | fn difference(&self, g: H) -> Vec { 169 | self.0 170 | .keys() 171 | .filter(|e| !g.contains_key(e)) 172 | .map(|e| (*e).clone()) 173 | .collect() 174 | } 175 | 176 | fn is_valid_event(&self, event: &Event) -> Result { 177 | match event.parents() { 178 | Some(ParentsPair(self_parent, other_parent)) => Ok(self.0.contains_key(self_parent) 179 | && self.0.contains_key(other_parent) 180 | && self.0[self_parent].creator() == event.creator() 181 | && self.0[other_parent].creator() != event.creator()), 182 | None => Ok(true), 183 | } 184 | } 185 | 186 | fn contains_key(&self, id: &EventHash) -> bool { 187 | self.0.contains_key(id) 188 | } 189 | 190 | fn wire(&self) -> HashgraphWire { 191 | HashgraphWire(self.0.clone()) 192 | } 193 | 194 | fn find_roots(&self) -> Vec { 195 | self.0 196 | .values() 197 | .filter(|e| e.is_root()) 198 | .map(|e| match e.hash() { 199 | Ok(hash) => Some(hash), 200 | Err(e) => { 201 | debug!(target: "swirlds", "{}", e); 202 | return None; 203 | } 204 | }) 205 | .filter(|e| e.is_some()) 206 | .map(|e| e.unwrap()) 207 | .collect() 208 | } 209 | 210 | fn find_self_child(&self, eh: &EventHash) -> Result, Error> { 211 | let error: Option = None; 212 | let r = self 213 | .0 214 | .values() 215 | .find(|e| { 216 | let e = *e; 217 | match e.parents() { 218 | Some(ParentsPair(sp, _)) => sp == eh, 219 | None => false, 220 | } 221 | }) 222 | .map(|e| match e.hash() { 223 | Ok(parents_pair) => Some(parents_pair), 224 | Err(e) => { 225 | debug!(target: "hashgraph", "{}", e); 226 | None 227 | } 228 | }); 229 | if error.is_some() { 230 | Err(error.unwrap()) 231 | } else if r.is_some() { 232 | Ok(r.unwrap()) 233 | } else { 234 | Err(format_err!("find_self_child() returned None")) 235 | } 236 | } 237 | 238 | fn get_events(&self) -> Vec { 239 | self.0 240 | .keys() 241 | .map(|h| h.clone()) 242 | .collect::>() 243 | .clone() 244 | } 245 | } 246 | 247 | #[cfg(test)] 248 | mod tests { 249 | use super::{BTreeHashgraph, Hashgraph}; 250 | use crate::event::{event_hash::EventHash, parents::ParentsPair, Event}; 251 | use std::collections::HashMap; 252 | 253 | #[test] 254 | fn it_should_succeed_on_event_with_no_parents() { 255 | let mut hashgraph = BTreeHashgraph::new(); 256 | let event = Event::new(vec![], None, Vec::new()); 257 | let hash = event.hash().unwrap(); 258 | hashgraph.insert(hash.clone(), event.clone()); 259 | assert!(hashgraph.is_valid_event(&event).unwrap()); 260 | } 261 | 262 | #[test] 263 | fn it_should_succeed_on_event_with_correct_parents() { 264 | let mut hashgraph = BTreeHashgraph::new(); 265 | let n1 = vec![42]; 266 | let n2 = vec![43]; 267 | let self_parent = Event::new(vec![], None, n1.clone()); 268 | let other_parent = Event::new(vec![], None, n2); 269 | let sphash = self_parent.hash().unwrap(); 270 | let ophash = other_parent.hash().unwrap(); 271 | let event = Event::new( 272 | vec![], 273 | Some(ParentsPair(sphash.clone(), ophash.clone())), 274 | n1, 275 | ); 276 | let hash = event.hash().unwrap(); 277 | hashgraph.insert(ophash.clone(), other_parent); 278 | hashgraph.insert(sphash.clone(), self_parent); 279 | hashgraph.insert(hash.clone(), event.clone()); 280 | assert!(hashgraph.is_valid_event(&event).unwrap()); 281 | } 282 | 283 | #[test] 284 | fn it_should_fail_if_self_parent_creator_differs() { 285 | let mut hashgraph = BTreeHashgraph::new(); 286 | let n1 = vec![42]; 287 | let n2 = vec![43]; 288 | let n3 = vec![44]; 289 | let self_parent = Event::new(vec![], None, n1); 290 | let other_parent = Event::new(vec![], None, n2); 291 | let sphash = self_parent.hash().unwrap(); 292 | let ophash = other_parent.hash().unwrap(); 293 | let event = Event::new( 294 | vec![], 295 | Some(ParentsPair(sphash.clone(), ophash.clone())), 296 | n3, 297 | ); 298 | let hash = event.hash().unwrap(); 299 | hashgraph.insert(ophash.clone(), other_parent); 300 | hashgraph.insert(sphash.clone(), self_parent); 301 | hashgraph.insert(hash.clone(), event.clone()); 302 | assert!(!hashgraph.is_valid_event(&event).unwrap()); 303 | } 304 | 305 | #[test] 306 | fn it_should_fail_if_other_parent_its_sent_by_same_node() { 307 | let mut hashgraph = BTreeHashgraph::new(); 308 | let n1 = vec![42]; 309 | let n2 = vec![43]; 310 | let self_parent = Event::new(vec![], None, n1); 311 | let other_parent = Event::new(vec![], None, n2.clone()); 312 | let sphash = self_parent.hash().unwrap(); 313 | let ophash = other_parent.hash().unwrap(); 314 | let event = Event::new( 315 | vec![], 316 | Some(ParentsPair(sphash.clone(), ophash.clone())), 317 | n2.clone(), 318 | ); 319 | let hash = event.hash().unwrap(); 320 | hashgraph.insert(ophash.clone(), other_parent); 321 | hashgraph.insert(sphash.clone(), self_parent); 322 | hashgraph.insert(hash.clone(), event.clone()); 323 | assert!(!hashgraph.is_valid_event(&event).unwrap()); 324 | } 325 | 326 | #[test] 327 | fn it_should_fail_if_self_parent_isnt_in_the_graph() { 328 | let mut hashgraph = BTreeHashgraph::new(); 329 | let n1 = vec![42]; 330 | let n2 = vec![43]; 331 | let self_parent: Event = Event::new(vec![], None, n1); 332 | let other_parent = Event::new(vec![], None, n2.clone()); 333 | let sphash = self_parent.hash().unwrap(); 334 | let ophash = other_parent.hash().unwrap(); 335 | let event = Event::new( 336 | vec![], 337 | Some(ParentsPair(sphash.clone(), ophash.clone())), 338 | n2.clone(), 339 | ); 340 | let hash = event.hash().unwrap(); 341 | hashgraph.insert(ophash.clone(), other_parent); 342 | hashgraph.insert(hash.clone(), event.clone()); 343 | assert!(!hashgraph.is_valid_event(&event).unwrap()); 344 | } 345 | 346 | #[test] 347 | fn it_should_fail_if_other_parent_isnt_in_the_graph() { 348 | let mut hashgraph = BTreeHashgraph::new(); 349 | let n1 = vec![42]; 350 | let n2 = vec![43]; 351 | let self_parent = Event::new(vec![], None, n1); 352 | let other_parent: Event = Event::new(vec![], None, n2.clone()); 353 | let sphash = self_parent.hash().unwrap(); 354 | let ophash = other_parent.hash().unwrap(); 355 | let event = Event::new( 356 | vec![], 357 | Some(ParentsPair(sphash.clone(), ophash.clone())), 358 | n2.clone(), 359 | ); 360 | let hash = event.hash().unwrap(); 361 | hashgraph.insert(sphash.clone(), self_parent); 362 | hashgraph.insert(hash.clone(), event.clone()); 363 | assert!(!hashgraph.is_valid_event(&event).unwrap()); 364 | } 365 | 366 | #[test] 367 | fn it_should_calculate_the_difference_of_two_hashgraphs() { 368 | let event1 = Event::new(vec![b"42".to_vec()], None, Vec::new()); 369 | let hash1 = event1.hash().unwrap(); 370 | let event2 = Event::new(vec![b"fish".to_vec()], None, Vec::new()); 371 | let hash2 = event2.hash().unwrap(); 372 | let event3 = Event::new(vec![b"ford prefect".to_vec()], None, Vec::new()); 373 | let hash3 = event3.hash().unwrap(); 374 | let mut hg1 = BTreeHashgraph::new(); 375 | let mut hg2 = BTreeHashgraph::new(); 376 | hg1.insert(hash1.clone(), event1); 377 | hg1.insert(hash2.clone(), event2); 378 | hg2.insert(hash3.clone(), event3); 379 | let mut expected = vec![hash1.clone(), hash2.clone()]; 380 | expected.sort(); 381 | let mut actual = hg1.difference(hg2); 382 | actual.sort(); 383 | assert_eq!(expected, actual) 384 | } 385 | 386 | #[test] 387 | fn it_should_return_self_ancestors() { 388 | let event1 = Event::new(vec![b"42".to_vec()], None, Vec::new()); 389 | let hash1 = event1.hash().unwrap(); 390 | let event2 = Event::new(vec![b"fish".to_vec()], None, vec![1]); 391 | let hash2 = event2.hash().unwrap(); 392 | let event3 = Event::new( 393 | vec![b"ford prefect".to_vec()], 394 | Some(ParentsPair(hash1.clone(), hash2.clone())), 395 | Vec::new(), 396 | ); 397 | let hash3 = event3.hash().unwrap(); 398 | let event4 = Event::new(vec![b"42".to_vec()], None, vec![1]); 399 | let hash4 = event4.hash().unwrap(); 400 | let event5 = Event::new( 401 | vec![b"ford prefect".to_vec()], 402 | Some(ParentsPair(hash3.clone(), hash4.clone())), 403 | Vec::new(), 404 | ); 405 | let hash5 = event5.hash().unwrap(); 406 | let event6 = Event::new(vec![b"42".to_vec()], None, vec![2]); 407 | let hash6 = event6.hash().unwrap(); 408 | let event7 = Event::new( 409 | vec![b"ford prefect".to_vec()], 410 | Some(ParentsPair(hash5.clone(), hash6.clone())), 411 | Vec::new(), 412 | ); 413 | let hash7 = event7.hash().unwrap(); 414 | let mut hashgraph = BTreeHashgraph::new(); 415 | hashgraph.insert(hash1.clone(), event1.clone()); 416 | hashgraph.insert(hash2.clone(), event2.clone()); 417 | hashgraph.insert(hash3.clone(), event3.clone()); 418 | hashgraph.insert(hash4.clone(), event4.clone()); 419 | hashgraph.insert(hash5.clone(), event5.clone()); 420 | hashgraph.insert(hash6.clone(), event6.clone()); 421 | hashgraph.insert(hash7.clone(), event7.clone()); 422 | let mut expected = vec![&hash1, &hash3, &hash5, &hash7]; 423 | expected.sort(); 424 | let mut actual = hashgraph.self_ancestors(&hash7).unwrap(); 425 | actual.sort(); 426 | assert_eq!(expected, actual); 427 | } 428 | 429 | #[test] 430 | fn it_should_return_other_ancestors() { 431 | let event1 = Event::new(vec![b"42".to_vec()], None, Vec::new()); 432 | let hash1 = event1.hash().unwrap(); 433 | let event2 = Event::new(vec![b"fish".to_vec()], None, vec![1]); 434 | let hash2 = event2.hash().unwrap(); 435 | let event3 = Event::new( 436 | vec![b"ford prefect".to_vec()], 437 | Some(ParentsPair(hash2.clone(), hash1.clone())), 438 | Vec::new(), 439 | ); 440 | let hash3 = event3.hash().unwrap(); 441 | let event4 = Event::new(vec![b"42".to_vec()], None, vec![1]); 442 | let hash4 = event4.hash().unwrap(); 443 | let event5 = Event::new( 444 | vec![b"ford prefect".to_vec()], 445 | Some(ParentsPair(hash4.clone(), hash3.clone())), 446 | Vec::new(), 447 | ); 448 | let hash5 = event5.hash().unwrap(); 449 | let event6 = Event::new(vec![b"42".to_vec()], None, vec![2]); 450 | let hash6 = event6.hash().unwrap(); 451 | let event7 = Event::new( 452 | vec![b"ford prefect".to_vec()], 453 | Some(ParentsPair(hash6.clone(), hash5.clone())), 454 | Vec::new(), 455 | ); 456 | let hash7 = event7.hash().unwrap(); 457 | let mut hashgraph = BTreeHashgraph::new(); 458 | hashgraph.insert(hash1.clone(), event1.clone()); 459 | hashgraph.insert(hash2.clone(), event2.clone()); 460 | hashgraph.insert(hash3.clone(), event3.clone()); 461 | hashgraph.insert(hash4.clone(), event4.clone()); 462 | hashgraph.insert(hash5.clone(), event5.clone()); 463 | hashgraph.insert(hash6.clone(), event6.clone()); 464 | hashgraph.insert(hash7.clone(), event7.clone()); 465 | let mut expected = vec![&hash1, &hash3, &hash5, &hash7]; 466 | expected.sort(); 467 | let mut actual = hashgraph.other_ancestors(&hash7).unwrap(); 468 | actual.sort(); 469 | assert_eq!(expected, actual); 470 | } 471 | 472 | #[test] 473 | fn it_should_return_ancestors() { 474 | let event1 = Event::new(vec![b"42".to_vec()], None, Vec::new()); 475 | let hash1 = event1.hash().unwrap(); 476 | let event2 = Event::new(vec![b"fish".to_vec()], None, vec![1]); 477 | let hash2 = event2.hash().unwrap(); 478 | let event3 = Event::new( 479 | vec![b"ford prefect".to_vec()], 480 | Some(ParentsPair(hash2.clone(), hash1.clone())), 481 | Vec::new(), 482 | ); 483 | let hash3 = event3.hash().unwrap(); 484 | let event4 = Event::new(vec![b"42".to_vec()], None, vec![1]); 485 | let hash4 = event4.hash().unwrap(); 486 | let event5 = Event::new( 487 | vec![b"ford prefect".to_vec()], 488 | Some(ParentsPair(hash4.clone(), hash3.clone())), 489 | Vec::new(), 490 | ); 491 | let hash5 = event5.hash().unwrap(); 492 | let event6 = Event::new(vec![b"42".to_vec()], None, vec![2]); 493 | let hash6 = event6.hash().unwrap(); 494 | let event7 = Event::new( 495 | vec![b"ford prefect".to_vec()], 496 | Some(ParentsPair(hash6.clone(), hash5.clone())), 497 | Vec::new(), 498 | ); 499 | let hash7 = event7.hash().unwrap(); 500 | let mut hashgraph = BTreeHashgraph::new(); 501 | hashgraph.insert(hash1.clone(), event1.clone()); 502 | hashgraph.insert(hash2.clone(), event2.clone()); 503 | hashgraph.insert(hash3.clone(), event3.clone()); 504 | hashgraph.insert(hash4.clone(), event4.clone()); 505 | hashgraph.insert(hash5.clone(), event5.clone()); 506 | hashgraph.insert(hash6.clone(), event6.clone()); 507 | hashgraph.insert(hash7.clone(), event7.clone()); 508 | let mut expected = vec![&hash1, &hash3, &hash5, &hash6, &hash7]; 509 | expected.sort(); 510 | let mut actual = hashgraph.ancestors(&hash7).unwrap(); 511 | actual.sort(); 512 | assert_eq!(expected, actual); 513 | } 514 | 515 | #[test] 516 | fn it_should_not_be_higher_if_its_ancestor() { 517 | let event1 = Event::new(vec![b"42".to_vec()], None, Vec::new()); 518 | let hash1 = event1.hash().unwrap(); 519 | let event2 = Event::new(vec![b"fish".to_vec()], None, vec![1]); 520 | let hash2 = event2.hash().unwrap(); 521 | let event3 = Event::new( 522 | vec![b"ford prefect".to_vec()], 523 | Some(ParentsPair(hash2.clone(), hash1.clone())), 524 | Vec::new(), 525 | ); 526 | let hash3 = event3.hash().unwrap(); 527 | let event4 = Event::new(vec![b"42".to_vec()], None, vec![1]); 528 | let hash4 = event4.hash().unwrap(); 529 | let event5 = Event::new( 530 | vec![b"ford prefect".to_vec()], 531 | Some(ParentsPair(hash4.clone(), hash3.clone())), 532 | Vec::new(), 533 | ); 534 | let hash5 = event5.hash().unwrap(); 535 | let event6 = Event::new(vec![b"42".to_vec()], None, vec![2]); 536 | let hash6 = event6.hash().unwrap(); 537 | let event7 = Event::new( 538 | vec![b"ford prefect".to_vec()], 539 | Some(ParentsPair(hash6.clone(), hash5.clone())), 540 | Vec::new(), 541 | ); 542 | let hash7 = event7.hash().unwrap(); 543 | let mut hashgraph = BTreeHashgraph::new(); 544 | hashgraph.insert(hash1.clone(), event1.clone()); 545 | hashgraph.insert(hash2.clone(), event2.clone()); 546 | hashgraph.insert(hash3.clone(), event3.clone()); 547 | hashgraph.insert(hash4.clone(), event4.clone()); 548 | hashgraph.insert(hash5.clone(), event5.clone()); 549 | hashgraph.insert(hash6.clone(), event6.clone()); 550 | hashgraph.insert(hash7.clone(), event7.clone()); 551 | assert!(!hashgraph.higher(&hash6, &hash7).unwrap()); 552 | } 553 | 554 | #[test] 555 | fn it_should_be_higher_if_its_child() { 556 | let payloads = [b"42".to_vec(), b"fish".to_vec(), b"ford prefect".to_vec()]; 557 | 558 | let event1 = Event::new(vec![payloads[0].clone()], None, Vec::new()); 559 | let hash1 = event1.hash().unwrap(); 560 | let event2 = Event::new(vec![payloads[1].clone()], None, vec![1]); 561 | let hash2 = event2.hash().unwrap(); 562 | let event3 = Event::new( 563 | vec![payloads[2].clone()], 564 | Some(ParentsPair(hash2.clone(), hash1.clone())), 565 | Vec::new(), 566 | ); 567 | let hash3 = event3.hash().unwrap(); 568 | let event4 = Event::new(vec![payloads[0].clone()], None, vec![1]); 569 | let hash4 = event4.hash().unwrap(); 570 | let event5 = Event::new( 571 | vec![payloads[2].clone()], 572 | Some(ParentsPair(hash4.clone(), hash3.clone())), 573 | Vec::new(), 574 | ); 575 | let hash5 = event5.hash().unwrap(); 576 | let event6 = Event::new(vec![payloads[0].clone()], None, vec![2]); 577 | let hash6 = event6.hash().unwrap(); 578 | let event7 = Event::new( 579 | vec![payloads[2].clone()], 580 | Some(ParentsPair(hash6.clone(), hash5.clone())), 581 | Vec::new(), 582 | ); 583 | let hash7 = event7.hash().unwrap(); 584 | let mut hashgraph = BTreeHashgraph::new(); 585 | hashgraph.insert(hash1.clone(), event1.clone()); 586 | hashgraph.insert(hash2.clone(), event2.clone()); 587 | hashgraph.insert(hash3.clone(), event3.clone()); 588 | hashgraph.insert(hash4.clone(), event4.clone()); 589 | hashgraph.insert(hash5.clone(), event5.clone()); 590 | hashgraph.insert(hash6.clone(), event6.clone()); 591 | hashgraph.insert(hash7.clone(), event7.clone()); 592 | assert!(hashgraph.higher(&hash7, &hash6).unwrap()); 593 | } 594 | 595 | #[test] 596 | fn it_should_return_expected_events_that_parents_can_see() { 597 | let event1 = Event::new(vec![b"42".to_vec()], None, Vec::new()); 598 | let hash1 = event1.hash().unwrap(); 599 | let event2 = Event::new(vec![b"fish".to_vec()], None, vec![1]); 600 | let hash2 = event2.hash().unwrap(); 601 | let event3 = Event::new( 602 | vec![b"ford prefect".to_vec()], 603 | Some(ParentsPair(hash2.clone(), hash1.clone())), 604 | Vec::new(), 605 | ); 606 | let hash3 = event3.hash().unwrap(); 607 | let event4 = Event::new(vec![b"42".to_vec()], None, vec![1]); 608 | let hash4 = event4.hash().unwrap(); 609 | let event5 = Event::new( 610 | vec![b"ford prefect".to_vec()], 611 | Some(ParentsPair(hash4.clone(), hash3.clone())), 612 | Vec::new(), 613 | ); 614 | let hash5 = event5.hash().unwrap(); 615 | let event6 = Event::new(vec![b"42".to_vec()], None, vec![2]); 616 | let hash6 = event6.hash().unwrap(); 617 | let event7 = Event::new( 618 | vec![b"ford prefect".to_vec()], 619 | Some(ParentsPair(hash6.clone(), hash5.clone())), 620 | Vec::new(), 621 | ); 622 | let hash7 = event7.hash().unwrap(); 623 | let mut hashgraph = BTreeHashgraph::new(); 624 | hashgraph.insert(hash1.clone(), event1.clone()); 625 | hashgraph.insert(hash2.clone(), event2.clone()); 626 | hashgraph.insert(hash3.clone(), event3.clone()); 627 | hashgraph.insert(hash4.clone(), event4.clone()); 628 | hashgraph.insert(hash5.clone(), event5.clone()); 629 | hashgraph.insert(hash6.clone(), event6.clone()); 630 | hashgraph.insert(hash7.clone(), event7.clone()); 631 | assert!(hashgraph.higher(&hash5, &hash6).unwrap()); 632 | } 633 | 634 | #[test] 635 | fn it_should_be_higher_if_has_more_ancestors() { 636 | let event1 = Event::new(vec![b"42".to_vec()], None, Vec::new()); 637 | let hash1 = event1.hash().unwrap(); 638 | let event2 = Event::new(vec![b"fish".to_vec()], None, vec![1]); 639 | let hash2 = event2.hash().unwrap(); 640 | let event3 = Event::new( 641 | vec![b"ford prefect".to_vec()], 642 | Some(ParentsPair(hash2.clone(), hash1.clone())), 643 | Vec::new(), 644 | ); 645 | let hash3 = event3.hash().unwrap(); 646 | let event4 = Event::new(vec![b"42".to_vec()], None, vec![1]); 647 | let hash4 = event4.hash().unwrap(); 648 | let mut event5 = Event::new( 649 | vec![b"ford prefect".to_vec()], 650 | Some(ParentsPair(hash4.clone(), hash3.clone())), 651 | Vec::new(), 652 | ); 653 | event5.add_can_see(vec![2], hash3.clone()); 654 | event5.add_can_see(vec![1], hash4.clone()); 655 | let hash5 = event5.hash().unwrap(); 656 | let mut event6 = Event::new(vec![b"42".to_vec()], None, vec![2]); 657 | event6.add_can_see(vec![2], hash4.clone()); 658 | let hash6 = event6.hash().unwrap(); 659 | let event7 = Event::new( 660 | vec![b"ford prefect".to_vec()], 661 | Some(ParentsPair(hash6.clone(), hash5.clone())), 662 | Vec::new(), 663 | ); 664 | let hash7 = event7.hash().unwrap(); 665 | let mut hashgraph = BTreeHashgraph::new(); 666 | hashgraph.insert(hash1.clone(), event1.clone()); 667 | hashgraph.insert(hash2.clone(), event2.clone()); 668 | hashgraph.insert(hash3.clone(), event3.clone()); 669 | hashgraph.insert(hash4.clone(), event4.clone()); 670 | hashgraph.insert(hash5.clone(), event5.clone()); 671 | hashgraph.insert(hash6.clone(), event6.clone()); 672 | hashgraph.insert(hash7.clone(), event7.clone()); 673 | let actual = hashgraph.events_parents_can_see(&hash7).unwrap(); 674 | let expected: HashMap, EventHash> = 675 | [(vec![2], hash3.clone()), (vec![1], hash4.clone())] 676 | .iter() 677 | .cloned() 678 | .collect(); 679 | assert_eq!(expected, actual); 680 | } 681 | } 682 | -------------------------------------------------------------------------------- /lachesis-rs/src/lachesis.rs: -------------------------------------------------------------------------------- 1 | use crate::errors::{ 2 | HashgraphError, HashgraphErrorType, ResourceFramesPoisonError, ResourceHashgraphPoisonError, 3 | ResourceHeadPoisonError, 4 | }; 5 | use crate::event::event_hash::EventHash; 6 | use crate::event::Event; 7 | use crate::lachesis::opera::Opera; 8 | use crate::node::Node; 9 | use crate::peer::{Peer, PeerId}; 10 | use failure::Error; 11 | use rand::prelude::IteratorRandom; 12 | use rand::Rng; 13 | use ring::signature::Ed25519KeyPair; 14 | use std::collections::{HashMap, HashSet}; 15 | use std::sync::atomic::{AtomicUsize, Ordering}; 16 | use std::sync::Mutex; 17 | 18 | pub mod frame; 19 | pub mod opera; 20 | pub mod parents_list; 21 | 22 | use self::frame::Frame; 23 | use self::opera::OperaWire; 24 | use self::parents_list::ParentsList; 25 | 26 | const H: usize = 3; 27 | 28 | pub struct Lachesis + Clone> { 29 | current_frame: AtomicUsize, 30 | frames: Mutex>, 31 | head: Mutex>, 32 | k: usize, 33 | network: HashMap, 34 | opera: Mutex, 35 | pk: Ed25519KeyPair, 36 | } 37 | 38 | impl + Clone> Lachesis

{ 39 | pub fn new(k: usize, pk: Ed25519KeyPair) -> Lachesis

{ 40 | let frame = Frame::new(0); 41 | let current_frame = AtomicUsize::new(frame.id()); 42 | let frames = Mutex::new(vec![frame]); 43 | let network = HashMap::new(); 44 | let opera = Mutex::new(Opera::new()); 45 | let head = Mutex::new(None); 46 | Lachesis { 47 | current_frame, 48 | frames, 49 | head, 50 | k, 51 | network, 52 | opera, 53 | pk, 54 | } 55 | } 56 | 57 | pub fn add_peer(&mut self, p: P) { 58 | self.network.insert(p.id().clone(), p); 59 | } 60 | 61 | #[inline] 62 | fn select_peers(&self, rng: &mut R) -> Result, Error> { 63 | Ok(self 64 | .network 65 | .values() 66 | .choose_multiple(rng, self.k - 1) 67 | .into_iter() 68 | .map(|p| p.clone()) 69 | .collect()) 70 | } 71 | 72 | fn sync(&self, rng: &mut R) -> Result<(), Error> { 73 | let peers = self.select_peers(rng)?; 74 | let mut opera = get_from_mutex!(self.opera, ResourceHashgraphPoisonError)?; 75 | let mut parent_hashes = vec![]; 76 | let peer_id = self.pk.public_key_bytes().to_vec(); 77 | for p in peers { 78 | let (h, new_events) = p.get_sync(peer_id.clone(), Some(&opera))?; 79 | opera.sync(new_events); 80 | parent_hashes.push(h); 81 | } 82 | let parents = ParentsList(parent_hashes); 83 | let new_head = Event::new(vec![], Some(parents), peer_id.clone()); 84 | let new_head_hash = new_head.hash()?; 85 | let mut head = get_from_mutex!(self.head, ResourceHeadPoisonError)?; 86 | *head = Some(new_head_hash.clone()); 87 | opera.insert( 88 | new_head_hash.clone(), 89 | new_head, 90 | self.current_frame.load(Ordering::Relaxed), 91 | )?; 92 | Ok(()) 93 | } 94 | 95 | fn root_selection(&self) -> Result<(), Error> { 96 | let new_frame = self.assign_new_roots()?; 97 | self.maybe_create_new_frame(new_frame)?; 98 | Ok(()) 99 | } 100 | 101 | fn clotho_selection(&self) -> Result<(), Error> { 102 | let current_frame_id = self.current_frame.load(Ordering::Relaxed); 103 | if current_frame_id > 0 { 104 | let (current_frame, previous_frame): (Frame, Frame) = 105 | self.get_frame_and_previous_frame(current_frame_id)?; 106 | for root in previous_frame.root_set.iter() { 107 | let seen_by = self.get_how_many_can_see(¤t_frame, root)?; 108 | if seen_by > self.network.len() / 3 { 109 | self.set_clotho(root)?; 110 | self.set_clotho_time(root, current_frame_id)?; 111 | } 112 | } 113 | } 114 | Ok(()) 115 | } 116 | 117 | fn get_how_many_can_see( 118 | &self, 119 | current_frame: &Frame, 120 | root: &EventHash, 121 | ) -> Result { 122 | let opera = get_from_mutex!(self.opera, ResourceHashgraphPoisonError)?; 123 | let mut error: Option = None; 124 | 125 | let count = current_frame 126 | .root_set 127 | .iter() 128 | .map(|eh| match opera.can_see(&*eh, root) { 129 | Ok(seen) => Some(seen), 130 | Err(e) => { 131 | error = Some(e); 132 | None 133 | } 134 | }) 135 | .filter(|eh| eh.is_some()) 136 | .map(|eh| eh.unwrap()) 137 | .count(); 138 | if error.is_some() { 139 | return Err(error.unwrap()); 140 | } 141 | Ok(count) 142 | } 143 | 144 | fn set_clotho(&self, root: &EventHash) -> Result<(), Error> { 145 | let mut opera = get_from_mutex!(self.opera, ResourceHashgraphPoisonError)?; 146 | opera.set_clotho(root)?; 147 | Ok(()) 148 | } 149 | 150 | fn get_frame_and_previous_frame( 151 | &self, 152 | current_frame_id: usize, 153 | ) -> Result<(Frame, Frame), Error> { 154 | let frames = get_from_mutex!(self.frames, ResourceFramesPoisonError)?; 155 | let current_frame = &frames[current_frame_id]; 156 | let previous_frame = &frames[current_frame_id - 1]; 157 | Ok((current_frame.clone(), previous_frame.clone())) 158 | } 159 | 160 | fn set_clotho_time(&self, hash: &EventHash, current_frame_id: usize) -> Result<(), Error> { 161 | let mut frames = get_from_mutex!(self.frames, ResourceFramesPoisonError)?; 162 | let frame = &mut frames[current_frame_id - 1]; 163 | let current_frame = self.current_frame.load(Ordering::Relaxed); 164 | let cloth_frame_id = frame.id(); 165 | for d in 3..(current_frame - cloth_frame_id) { 166 | let previous_frame = cloth_frame_id - d; 167 | for root in frame.root_set.clone().iter() { 168 | if d == 3 { 169 | self.set_clotho_time_from_event(root, frame)?; 170 | } else { 171 | self.set_clotho_from_reslection(hash, root, previous_frame, d)?; 172 | } 173 | } 174 | } 175 | Ok(()) 176 | } 177 | 178 | fn set_clotho_time_from_event(&self, root: &EventHash, frame: &mut Frame) -> Result<(), Error> { 179 | let opera = get_from_mutex!(self.opera, ResourceHashgraphPoisonError)?; 180 | let event = opera.get_event(root)?; 181 | frame.set_clotho_time(root.clone(), event.lamport_timestamp); 182 | Ok(()) 183 | } 184 | 185 | fn set_clotho_from_reslection( 186 | &self, 187 | hash: &EventHash, 188 | root: &EventHash, 189 | previous_frame: usize, 190 | d: usize, 191 | ) -> Result<(), Error> { 192 | let mut frames = get_from_mutex!(self.frames, ResourceFramesPoisonError)?; 193 | let frame: &mut Frame = &mut frames[previous_frame]; 194 | let t = self.clotho_time_reselection(frame.root_set.clone())?; 195 | let mut opera = get_from_mutex!(self.opera, ResourceHashgraphPoisonError)?; 196 | 197 | let mut error: Option = None; 198 | 199 | let k = frame 200 | .root_set 201 | .iter() 202 | .map(|h| match opera.get_event(h) { 203 | Ok(event) => Some(event.lamport_timestamp), 204 | Err(e) => { 205 | error = Some(e); 206 | None 207 | } 208 | }) 209 | .filter(|t1| t1.is_some() && t == t1.unwrap()) 210 | .count(); 211 | if error.is_some() { 212 | return Err(error.unwrap()); 213 | } else if d % H > 0 { 214 | if k > self.network.len() * 2 / 3 { 215 | opera.set_consensus_time(hash, t)?; 216 | } 217 | frame.set_clotho_time(root.clone(), t); 218 | } else { 219 | let t = frame 220 | .root_set 221 | .iter() 222 | .map(|h: &EventHash| -> Option { 223 | match opera.get_event(h) { 224 | Ok(event) => Some(event.lamport_timestamp), 225 | Err(e) => { 226 | error = Some(e); 227 | None 228 | } 229 | } 230 | }) 231 | .filter(|h: &Option| h.is_some()) 232 | .map(|h: Option| h.unwrap()) 233 | .min() 234 | .ok_or(Error::from(HashgraphError::new( 235 | HashgraphErrorType::NoLamportTimeSet, 236 | )))?; 237 | frame.set_clotho_time(root.clone(), t); 238 | } 239 | if error.is_some() { 240 | return Err(error.unwrap()); 241 | } 242 | Ok(()) 243 | } 244 | 245 | fn clotho_time_reselection(&self, root_set: HashSet) -> Result { 246 | let mut counts: HashMap = HashMap::with_capacity(root_set.len()); 247 | let opera = get_from_mutex!(self.opera, ResourceHashgraphPoisonError)?; 248 | for r in root_set { 249 | let event = opera.get_event(&r)?; 250 | let count = counts 251 | .get(&event.lamport_timestamp) 252 | .map(|v: &usize| v.clone()) 253 | .unwrap_or(0); 254 | counts.insert(event.lamport_timestamp, count + 1); 255 | } 256 | let max_count = counts 257 | .values() 258 | .min() 259 | .ok_or(Error::from(HashgraphError::new( 260 | HashgraphErrorType::NoLamportTimeSet, 261 | )))? 262 | .clone(); 263 | let time = counts 264 | .iter() 265 | .filter(|(_t, c)| c.clone() == &max_count) 266 | .map(|(t, _c)| t.clone()) 267 | .min() 268 | .ok_or(Error::from(HashgraphError::new( 269 | HashgraphErrorType::NoLamportTimeSet, 270 | )))?; 271 | Ok(time) 272 | } 273 | 274 | fn assign_new_roots(&self) -> Result, Error> { 275 | let mut opera = get_from_mutex!(self.opera, ResourceHashgraphPoisonError)?; 276 | let mut new_root = vec![]; 277 | let mut new_frame = vec![]; 278 | for e in opera.unfamous_events().clone() { 279 | let is_root = 280 | e.flag_table.is_empty() || e.flag_table.len() > 2 / 3 * self.network.len(); 281 | if is_root { 282 | let hash = e.event.hash()?; 283 | new_root.push(hash.clone()); 284 | if !e.flag_table.is_empty() { 285 | new_frame.push(hash); 286 | } 287 | } 288 | } 289 | for h in new_root { 290 | opera.set_root(&h)?; 291 | } 292 | Ok(new_frame) 293 | } 294 | 295 | fn maybe_create_new_frame(&self, new_frame: Vec) -> Result<(), Error> { 296 | let mut opera = get_from_mutex!(self.opera, ResourceHashgraphPoisonError)?; 297 | if !new_frame.is_empty() { 298 | let mut new_current_frame = Frame::new(self.current_frame.load(Ordering::Relaxed) + 1); 299 | let new_current_frame_id = new_current_frame.id(); 300 | self.current_frame 301 | .store(new_current_frame_id, Ordering::Relaxed); 302 | for h in new_frame { 303 | opera.change_frame(&h, new_current_frame_id)?; 304 | new_current_frame.add(h); 305 | } 306 | let mut frames = get_from_mutex!(self.frames, ResourceFramesPoisonError)?; 307 | frames.push(new_current_frame); 308 | } 309 | Ok(()) 310 | } 311 | } 312 | 313 | impl + Clone> Node for Lachesis

{ 314 | type D = OperaWire; 315 | type P = ParentsList; 316 | fn run(&self, rng: &mut R) -> Result<(), Error> { 317 | self.sync(rng)?; 318 | self.root_selection()?; 319 | self.clotho_selection()?; 320 | Ok(()) 321 | } 322 | 323 | fn respond_message(&self, known: Option) -> Result<(EventHash, OperaWire), Error> { 324 | let mut opera = get_from_mutex!(self.opera, ResourceHashgraphPoisonError)?; 325 | let head = get_from_mutex!(self.head, ResourceHeadPoisonError)?; 326 | let resp = match known { 327 | Some(remote) => { 328 | if remote.lamport_timestamp > opera.lamport_timestamp { 329 | opera.set_lamport(remote.lamport_timestamp); 330 | } 331 | opera.diff(remote) 332 | } 333 | None => opera.wire(), 334 | }; 335 | match head.clone() { 336 | Some(cloned_head) => Ok((cloned_head, resp)), 337 | None => Err(format_err!("head.clone() returned None")), 338 | } 339 | } 340 | 341 | fn add_transaction(&self, _msg: Vec) -> Result<(), Error> { 342 | Ok(()) 343 | } 344 | 345 | fn get_ordered_events(&self) -> Result>, Error> { 346 | Ok(Vec::new()) 347 | } 348 | } 349 | -------------------------------------------------------------------------------- /lachesis-rs/src/lachesis/frame.rs: -------------------------------------------------------------------------------- 1 | use crate::event::event_hash::EventHash; 2 | use std::collections::{HashMap, HashSet}; 3 | 4 | #[derive(Clone)] 5 | pub struct Frame { 6 | clotho_times: HashMap, 7 | id: usize, 8 | pub root_set: HashSet, 9 | } 10 | 11 | impl Frame { 12 | pub fn new(id: usize) -> Frame { 13 | Frame { 14 | id, 15 | clotho_times: HashMap::new(), 16 | root_set: HashSet::new(), 17 | } 18 | } 19 | 20 | pub fn id(&self) -> usize { 21 | self.id 22 | } 23 | 24 | pub fn add(&mut self, hash: EventHash) { 25 | self.root_set.insert(hash); 26 | } 27 | 28 | pub fn set_clotho_time(&mut self, hash: EventHash, time: usize) { 29 | self.clotho_times.insert(hash, time); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /lachesis-rs/src/lachesis/opera.rs: -------------------------------------------------------------------------------- 1 | use super::parents_list::ParentsList; 2 | use crate::errors::{HashgraphError, HashgraphErrorType}; 3 | use crate::event::event_hash::EventHash; 4 | use crate::event::Event; 5 | use failure::Error; 6 | use std::collections::{BTreeMap, HashMap, HashSet}; 7 | use std::iter::FromIterator; 8 | 9 | #[derive(Clone, Deserialize, PartialEq, Serialize)] 10 | pub enum OperaEventType { 11 | Clotho(Option), 12 | Root, 13 | Undefined, 14 | } 15 | 16 | #[derive(Clone, Deserialize, Serialize)] 17 | pub struct OperaEvent { 18 | pub event: Event, 19 | pub flag_table: HashSet, 20 | frame: usize, 21 | pub lamport_timestamp: usize, 22 | event_type: OperaEventType, 23 | } 24 | 25 | pub struct Opera { 26 | graph: HashMap, 27 | pub lamport_timestamp: usize, 28 | } 29 | 30 | impl Opera { 31 | pub fn new() -> Opera { 32 | let graph = HashMap::new(); 33 | Opera { 34 | graph, 35 | lamport_timestamp: 0, 36 | } 37 | } 38 | 39 | pub fn sync(&mut self, other: Opera) { 40 | for (eh, ev) in other.graph { 41 | self.graph.insert(eh, ev); 42 | } 43 | if self.lamport_timestamp < other.lamport_timestamp { 44 | self.lamport_timestamp = other.lamport_timestamp; 45 | } 46 | } 47 | 48 | pub fn wire(&self) -> OperaWire { 49 | OperaWire { 50 | graph: BTreeMap::from_iter(self.graph.clone().into_iter()), 51 | lamport_timestamp: self.lamport_timestamp, 52 | } 53 | } 54 | 55 | pub fn insert( 56 | &mut self, 57 | hash: EventHash, 58 | event: Event, 59 | frame: usize, 60 | ) -> Result<(), Error> { 61 | self.lamport_timestamp += 1; 62 | let flag_table = match event.parents() { 63 | None => HashSet::with_capacity(0), 64 | Some(ps) => self.parent_list_to_flag_table(ps)?, 65 | }; 66 | self.graph.insert( 67 | hash, 68 | OperaEvent { 69 | event, 70 | flag_table, 71 | frame, 72 | event_type: OperaEventType::Undefined, 73 | lamport_timestamp: self.lamport_timestamp, 74 | }, 75 | ); 76 | Ok(()) 77 | } 78 | 79 | pub fn unfamous_events(&self) -> Vec<&OperaEvent> { 80 | self.graph 81 | .values() 82 | .filter(|e| e.event_type != OperaEventType::Root) 83 | .collect() 84 | } 85 | 86 | pub fn get_event_mut(&mut self, h: &EventHash) -> Result<&mut OperaEvent, Error> { 87 | self.graph.get_mut(h).ok_or(Error::from(HashgraphError::new( 88 | HashgraphErrorType::EventNotFound, 89 | ))) 90 | } 91 | 92 | pub fn get_event(&self, h: &EventHash) -> Result { 93 | self.graph 94 | .get(h) 95 | .map(|v| v.clone()) 96 | .ok_or(Error::from(HashgraphError::new( 97 | HashgraphErrorType::EventNotFound, 98 | ))) 99 | } 100 | 101 | pub fn set_root(&mut self, h: &EventHash) -> Result<(), Error> { 102 | let mut e = self 103 | .graph 104 | .get_mut(h) 105 | .ok_or(Error::from(HashgraphError::new( 106 | HashgraphErrorType::EventNotFound, 107 | )))?; 108 | e.event_type = OperaEventType::Root; 109 | e.flag_table = HashSet::new(); 110 | Ok(()) 111 | } 112 | 113 | pub fn set_clotho(&mut self, h: &EventHash) -> Result<(), Error> { 114 | let mut e = self 115 | .graph 116 | .get_mut(h) 117 | .ok_or(Error::from(HashgraphError::new( 118 | HashgraphErrorType::EventNotFound, 119 | )))?; 120 | e.event_type = OperaEventType::Clotho(None); 121 | Ok(()) 122 | } 123 | 124 | pub fn set_consensus_time(&mut self, h: &EventHash, time: usize) -> Result<(), Error> { 125 | let mut e = self.get_event_mut(h)?; 126 | e.event_type = OperaEventType::Clotho(Some(time)); 127 | Ok(()) 128 | } 129 | 130 | pub fn change_frame(&mut self, h: &EventHash, frame: usize) -> Result<(), Error> { 131 | let mut e = self 132 | .graph 133 | .get_mut(h) 134 | .ok_or(Error::from(HashgraphError::new( 135 | HashgraphErrorType::EventNotFound, 136 | )))?; 137 | e.frame = frame; 138 | Ok(()) 139 | } 140 | 141 | fn parent_list_to_flag_table(&mut self, ps: &ParentsList) -> Result, Error> { 142 | let mut ft = HashSet::new(); 143 | for p in ps.0.iter() { 144 | let event = self 145 | .graph 146 | .get(p) 147 | .ok_or(Error::from(HashgraphError::new( 148 | HashgraphErrorType::EventNotFound, 149 | )))? 150 | .clone(); 151 | if event.event_type == OperaEventType::Root { 152 | ft.insert(p.clone()); 153 | } 154 | ft = ft.union(&event.flag_table).map(|e| e.clone()).collect(); 155 | } 156 | Ok(ft) 157 | } 158 | 159 | pub fn set_lamport(&mut self, lamport_timestamp: usize) { 160 | self.lamport_timestamp = lamport_timestamp; 161 | } 162 | 163 | pub fn diff(&self, wire: OperaWire) -> OperaWire { 164 | let local_keys: Vec<&EventHash> = self.graph.keys().collect(); 165 | let remote_keys: Vec<&EventHash> = wire.graph.keys().collect(); 166 | let diff_keys = local_keys 167 | .into_iter() 168 | .filter(|k| !remote_keys.contains(k)) 169 | .map(|k| match self.graph.get(k) { 170 | Some(graph_at_k) => Some((k.clone(), graph_at_k.clone())), 171 | None => None, 172 | }) 173 | .filter(|k| k.is_some()) 174 | .map(|k| k.unwrap()) 175 | .collect(); 176 | OperaWire { 177 | graph: diff_keys, 178 | lamport_timestamp: self.lamport_timestamp, 179 | } 180 | } 181 | 182 | pub fn can_see(&self, seer: &EventHash, seen: &EventHash) -> Result { 183 | if seer == seen { 184 | Ok(true) 185 | } else { 186 | let ancestors = self.get_ancestors(seer)?; 187 | Ok(ancestors.contains(seen)) 188 | } 189 | } 190 | 191 | fn get_ancestors(&self, hash: &EventHash) -> Result, Error> { 192 | let event = self 193 | .graph 194 | .get(hash) 195 | .ok_or(Error::from(HashgraphError::new( 196 | HashgraphErrorType::EventNotFound, 197 | )))? 198 | .clone(); 199 | let result = match event.event.parents() { 200 | None => vec![], 201 | Some(p) => { 202 | let mut base = p.0.clone(); 203 | let mut prev = 204 | p.0.iter() 205 | .map(|ph| match self.get_ancestors(ph) { 206 | Ok(ancestors) => Some(ancestors), 207 | Err(e) => { 208 | debug!(target: "swirlds", "{}", e); 209 | return None; 210 | } 211 | }) 212 | .filter(|ph| ph.is_some()) 213 | .map(|v| v.unwrap().into_iter()) 214 | .flatten() 215 | .collect(); 216 | base.append(&mut prev); 217 | base 218 | } 219 | }; 220 | Ok(result) 221 | } 222 | } 223 | 224 | #[derive(Deserialize, Serialize)] 225 | pub struct OperaWire { 226 | graph: BTreeMap, 227 | pub lamport_timestamp: usize, 228 | } 229 | 230 | impl OperaWire { 231 | pub fn into_opera(self) -> Opera { 232 | Opera { 233 | graph: HashMap::from_iter(self.graph.into_iter()), 234 | lamport_timestamp: self.lamport_timestamp, 235 | } 236 | } 237 | } 238 | -------------------------------------------------------------------------------- /lachesis-rs/src/lachesis/parents_list.rs: -------------------------------------------------------------------------------- 1 | use crate::errors::ParentsError; 2 | use crate::event::{event_hash::EventHash, parents::Parents}; 3 | use failure::Error; 4 | 5 | #[derive(Clone, Deserialize, Serialize)] 6 | pub struct ParentsList(pub Vec); 7 | 8 | impl Parents for ParentsList { 9 | fn self_parent(&self) -> Result { 10 | Ok(self 11 | .0 12 | .first() 13 | .ok_or(Error::from(ParentsError::EmptyParents))? 14 | .clone()) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /lachesis-rs/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate failure; 3 | #[macro_use] 4 | extern crate log; 5 | #[macro_use] 6 | extern crate proptest; 7 | #[macro_use] 8 | extern crate serde_derive; 9 | extern crate json; 10 | 11 | macro_rules! get_from_mutex { 12 | ($resource: expr, $error: ident) => { 13 | $resource.lock().map_err(|e| $error::from(e)) 14 | }; 15 | } 16 | 17 | mod errors; 18 | mod event; 19 | mod hashgraph; 20 | mod lachesis; 21 | mod node; 22 | mod peer; 23 | mod printable_hash; 24 | mod round; 25 | mod server; 26 | mod swirlds; 27 | pub mod tcp_server; 28 | 29 | pub use crate::event::{event_hash::EventHash, Event}; 30 | pub use crate::hashgraph::{BTreeHashgraph, Hashgraph, HashgraphWire}; 31 | pub use crate::lachesis::Lachesis; 32 | pub use crate::node::Node; 33 | pub use crate::peer::{Peer, PeerId}; 34 | pub use crate::server::ws_message::InternodeMessage; 35 | pub use crate::server::Server; 36 | pub use crate::swirlds::Swirlds; 37 | -------------------------------------------------------------------------------- /lachesis-rs/src/node.rs: -------------------------------------------------------------------------------- 1 | use crate::event::event_hash::EventHash; 2 | use crate::event::parents::Parents; 3 | use crate::event::Event; 4 | use failure::Error; 5 | use rand::Rng; 6 | use serde::Serialize; 7 | 8 | pub trait Node { 9 | type D; 10 | type P: Parents + Clone + Serialize; 11 | 12 | fn run(&self, rng: &mut R) -> Result<(), Error>; 13 | 14 | fn respond_message(&self, known: Option) -> Result<(EventHash, Self::D), Error>; 15 | 16 | fn add_transaction(&self, msg: Vec) -> Result<(), Error>; 17 | 18 | fn get_ordered_events(&self) -> Result>, Error>; 19 | } 20 | -------------------------------------------------------------------------------- /lachesis-rs/src/peer.rs: -------------------------------------------------------------------------------- 1 | use crate::event::event_hash::EventHash; 2 | use failure::Error; 3 | 4 | pub type PeerId = Vec; 5 | 6 | pub trait Peer: Send + Sync { 7 | fn get_sync(&self, pk: PeerId, known: Option<&H>) -> Result<(EventHash, H), Error>; 8 | fn address(&self) -> String; 9 | fn id(&self) -> &PeerId; 10 | } 11 | -------------------------------------------------------------------------------- /lachesis-rs/src/printable_hash.rs: -------------------------------------------------------------------------------- 1 | use crate::event::event_hash::EventHash; 2 | use crate::peer::PeerId; 3 | 4 | pub trait PrintableHash: Sized + AsRef<[u8]> { 5 | fn printable_hash(&self) -> String { 6 | base64::encode(self)[..8].to_owned() 7 | } 8 | } 9 | 10 | impl PrintableHash for EventHash {} 11 | impl PrintableHash for PeerId {} 12 | -------------------------------------------------------------------------------- /lachesis-rs/src/round.rs: -------------------------------------------------------------------------------- 1 | use crate::event::event_hash::EventHash; 2 | use crate::peer::PeerId; 3 | use std::collections::HashMap; 4 | 5 | #[derive(Clone)] 6 | pub struct Round { 7 | pub id: usize, 8 | witnesses: HashMap, 9 | } 10 | 11 | impl Round { 12 | pub fn new(id: usize) -> Round { 13 | Round { 14 | id, 15 | witnesses: HashMap::new(), 16 | } 17 | } 18 | 19 | pub fn add_witness(&mut self, peer: PeerId, event: EventHash) { 20 | self.witnesses.insert(peer, event); 21 | } 22 | 23 | pub fn witnesses(&self) -> Vec { 24 | self.witnesses.values().map(|h| h.clone()).collect() 25 | } 26 | 27 | pub fn witnesses_map(&self) -> &HashMap { 28 | &self.witnesses 29 | } 30 | } 31 | 32 | #[cfg(test)] 33 | mod tests { 34 | use super::Round; 35 | use crate::event::event_hash::EventHash; 36 | use ring::digest::{digest, SHA256}; 37 | 38 | #[test] 39 | fn it_should_correctly_get_all_witnesses() { 40 | let mut round = Round::new(0); 41 | let digest1 = digest(&SHA256, b"42"); 42 | let event1 = EventHash::new(digest1.as_ref()); 43 | let digest2 = digest(&SHA256, b"fish"); 44 | let event2 = EventHash::new(digest2.as_ref()); 45 | round.add_witness(vec![1], event1.clone()); 46 | round.add_witness(vec![0], event2.clone()); 47 | let mut expected = vec![event1, event2]; 48 | expected.sort(); 49 | let mut actual = round.witnesses(); 50 | actual.sort(); 51 | assert_eq!(round.id, 0); 52 | assert_eq!(expected, actual); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /lachesis-rs/src/server.rs: -------------------------------------------------------------------------------- 1 | use actix_web::{http, middleware, server, App}; 2 | 3 | use actix::prelude::*; 4 | 5 | use std::sync::Arc; 6 | use std::sync::Mutex; 7 | 8 | mod heartbeat; 9 | pub mod http_handler; 10 | pub mod ws_handler; 11 | pub mod ws_message; 12 | 13 | use self::heartbeat::Heartbeat; 14 | use self::http_handler::{check_transaction_status, get_peers, heartbeat, submit_transaction}; 15 | use self::ws_handler::ws_index; 16 | pub struct Server; 17 | 18 | #[derive(Clone)] 19 | pub struct AppState { 20 | counter: Arc>, 21 | heartbeat_counter: Addr, 22 | } 23 | 24 | impl Server { 25 | pub fn create_app() -> App { 26 | let addr = Arbiter::start(move |_| Heartbeat { count: 0 }); 27 | 28 | let counter = Arc::new(Mutex::new(0)); 29 | 30 | App::with_state(AppState { 31 | counter: counter.clone(), 32 | heartbeat_counter: addr.clone(), 33 | }) 34 | .middleware(middleware::Logger::default()) 35 | .resource("/transaction", |r| { 36 | r.method(http::Method::POST).a(submit_transaction) 37 | }) 38 | .resource("/transaction/{id}", |r| { 39 | r.method(http::Method::GET).a(check_transaction_status) 40 | }) 41 | .resource("/peer", |r| r.method(http::Method::GET).f(get_peers)) 42 | .resource("/heartbeat", |r| r.method(http::Method::GET).f(heartbeat)) 43 | .resource("/ws", |r| r.method(http::Method::GET).f(ws_index)) 44 | } 45 | 46 | pub fn init( 47 | ) -> server::HttpServer, impl Fn() -> App + Send + Clone + 'static> 48 | { 49 | let counter: Arc> = Arc::new(Mutex::new(0)); 50 | 51 | let addr: Addr = Arbiter::start(move |_| Heartbeat { count: 0 }); 52 | 53 | server::new(move || -> App { 54 | App::with_state(AppState { 55 | counter: counter.clone(), 56 | heartbeat_counter: addr.clone(), 57 | }) 58 | .middleware(middleware::Logger::default()) 59 | .resource("/transaction", |r| { 60 | r.method(http::Method::POST).a(submit_transaction) 61 | }) 62 | .resource("/transaction/{id}", |r| { 63 | r.method(http::Method::GET).a(check_transaction_status) 64 | }) 65 | .resource("/peer", |r| r.method(http::Method::GET).f(get_peers)) 66 | .resource("/heartbeat", |r| r.method(http::Method::GET).f(heartbeat)) 67 | .resource("/ws", |r| r.method(http::Method::GET).f(ws_index)) 68 | }) 69 | } 70 | } 71 | 72 | #[cfg(test)] 73 | mod tests { 74 | use super::http_handler::SubmitTransaction; 75 | 76 | use super::*; 77 | use actix_web::test::TestServer; 78 | use actix_web::HttpMessage; 79 | use futures::future::Future; 80 | 81 | #[test] 82 | fn test_submit_transaction() { 83 | let mut server = TestServer::with_factory(Server::create_app); 84 | 85 | let request = server 86 | .client(http::Method::POST, "/transaction") 87 | .json(SubmitTransaction { 88 | signature: "efwef".to_string(), 89 | payload: "WEfwef".to_string(), 90 | }) 91 | .unwrap(); 92 | 93 | let response = server.execute(request.send()).unwrap(); 94 | assert!(response.status().is_success()); 95 | } 96 | 97 | #[test] 98 | fn test_get_peers() { 99 | let mut server = TestServer::with_factory(Server::create_app); 100 | 101 | let request = server.client(http::Method::GET, "/peer").finish().unwrap(); 102 | 103 | let response = server.execute(request.send()).unwrap(); 104 | assert!(response.status().is_success()); 105 | } 106 | 107 | #[test] 108 | fn test_check_transaction_status() { 109 | let mut server = TestServer::with_factory(Server::create_app); 110 | 111 | let request = server 112 | .client(http::Method::GET, "/transaction/0x81732be82h") 113 | .finish() 114 | .unwrap(); 115 | 116 | let response = server.execute(request.send()).unwrap(); 117 | println!("{:?}", response.body().wait()); 118 | assert!(response.status().is_success()); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /lachesis-rs/src/server/heartbeat.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use std::io; 4 | 5 | use actix::prelude::*; 6 | 7 | pub struct Heartbeat { 8 | pub count: usize, 9 | } 10 | 11 | impl Heartbeat { 12 | fn beat(&mut self, _context: &mut Context) { 13 | self.count += 1; 14 | } 15 | } 16 | 17 | pub struct GetHeartbeatCount; 18 | 19 | impl Message for GetHeartbeatCount { 20 | type Result = Result; 21 | } 22 | 23 | impl Actor for Heartbeat { 24 | type Context = Context; 25 | 26 | fn started(&mut self, ctx: &mut Context) { 27 | IntervalFunc::new(Duration::new(3, 0), Self::beat) 28 | .finish() 29 | .spawn(ctx); 30 | } 31 | } 32 | 33 | impl Handler for Heartbeat { 34 | type Result = Result; 35 | 36 | fn handle(&mut self, _msg: GetHeartbeatCount, _ctx: &mut Context) -> Self::Result { 37 | Ok(self.count) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /lachesis-rs/src/server/http_handler.rs: -------------------------------------------------------------------------------- 1 | use actix_web::{AsyncResponder, Error, HttpMessage, HttpRequest, HttpResponse}; 2 | 3 | use futures::{future::result, Future}; 4 | 5 | use super::AppState; 6 | 7 | use super::heartbeat::GetHeartbeatCount; 8 | 9 | use actix::prelude::*; 10 | 11 | #[derive(Debug, Serialize, Deserialize)] 12 | pub struct SubmitTransaction { 13 | pub signature: String, 14 | pub payload: String, 15 | } 16 | 17 | #[derive(Debug, Serialize, Deserialize)] 18 | pub struct CheckTransactionStatus { 19 | id: String, 20 | } 21 | 22 | #[derive(Debug, Serialize, Deserialize)] 23 | pub struct Peer { 24 | id: String, 25 | } 26 | 27 | #[derive(Debug, Serialize, Deserialize)] 28 | pub struct PeerList { 29 | peers: Vec, 30 | } 31 | 32 | #[derive(Debug, Serialize, Deserialize)] 33 | pub enum TransactionStatus { 34 | Complete, 35 | Pending, 36 | Failed, 37 | } 38 | 39 | pub fn submit_transaction( 40 | req: &HttpRequest, 41 | ) -> Box> { 42 | req.json() 43 | .from_err() 44 | .and_then(|val: SubmitTransaction| { 45 | debug!("model: {:?}", val); 46 | Ok(HttpResponse::Ok().json(val)) // <- send response 47 | }) 48 | .responder() 49 | } 50 | 51 | pub fn heartbeat(req: &HttpRequest) -> HttpResponse { 52 | debug!("{:?}", req); 53 | 54 | *(req.state().counter.lock().unwrap()) += 1; 55 | 56 | let res = req.state().heartbeat_counter.send(GetHeartbeatCount); 57 | 58 | Arbiter::spawn( 59 | res.map(|res| match res { 60 | Ok(result) => info!("Got result: {}", result), 61 | Err(err) => error!("Got error: {}", err), 62 | }) 63 | .map_err(|e| { 64 | debug!("Actor is probably dead: {}", e); 65 | }), 66 | ); 67 | 68 | HttpResponse::Ok().body(format!( 69 | "Num of requests: {}", 70 | req.state().counter.lock().unwrap() 71 | )) 72 | } 73 | 74 | pub fn check_transaction_status( 75 | req: &HttpRequest, 76 | ) -> Box> { 77 | let _transaction_id = req.match_info().get("id").expect("no id provided"); 78 | 79 | result(Ok(HttpResponse::Ok().json(TransactionStatus::Failed))).responder() 80 | } 81 | 82 | pub fn get_peers(_req: &HttpRequest) -> Box> { 83 | let peers = vec![Peer { 84 | id: "wefwef".to_string(), 85 | }]; 86 | 87 | result(Ok(HttpResponse::Ok().json(peers))).responder() 88 | } 89 | -------------------------------------------------------------------------------- /lachesis-rs/src/server/ws_handler.rs: -------------------------------------------------------------------------------- 1 | use actix::prelude::*; 2 | use actix_web::{ws, Error, HttpRequest, HttpResponse}; 3 | use bincode::deserialize; 4 | 5 | use super::ws_message::InternodeMessage; 6 | use super::AppState; 7 | 8 | pub fn ws_index(r: &HttpRequest) -> Result { 9 | info!("Websocket handshake"); 10 | ws::start(r, Ws) 11 | } 12 | 13 | struct Ws; 14 | 15 | impl Actor for Ws { 16 | type Context = ws::WebsocketContext; 17 | } 18 | 19 | impl StreamHandler for Ws { 20 | fn handle(&mut self, msg: ws::Message, ctx: &mut Self::Context) { 21 | match msg { 22 | ws::Message::Ping(msg) => { 23 | ctx.pong(&msg); 24 | } 25 | ws::Message::Pong(msg) => { 26 | ctx.ping(&msg); 27 | } 28 | ws::Message::Text(text) => ctx.text(text), 29 | ws::Message::Binary(mut bin) => { 30 | let decoded: InternodeMessage = deserialize(&bin.take()).unwrap(); 31 | info!("{:?}", decoded); 32 | } 33 | ws::Message::Close(_) => { 34 | ctx.stop(); 35 | } 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /lachesis-rs/src/server/ws_message.rs: -------------------------------------------------------------------------------- 1 | use actix::*; 2 | 3 | use bytes::Bytes; 4 | 5 | use actix_web::Binary; 6 | use bincode::serialize; 7 | 8 | #[derive(Message, Serialize, Deserialize, Clone, Debug)] 9 | pub enum InternodeMessage { 10 | SyncRequest, 11 | SyncResponse, 12 | } 13 | 14 | impl Into for InternodeMessage { 15 | fn into(self) -> Binary { 16 | match serialize(&self) { 17 | Ok(encoded) => Binary::Bytes(Bytes::from(encoded)), 18 | Err(e) => { 19 | error!("{}", e); 20 | Binary::Bytes(Bytes::from(vec![])) 21 | } 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /lachesis-rs/src/swirlds.rs: -------------------------------------------------------------------------------- 1 | use crate::errors::*; 2 | use crate::event::{ 3 | event_hash::EventHash, event_signature::EventSignature, parents::ParentsPair, Event, 4 | }; 5 | use crate::hashgraph::{Hashgraph, HashgraphWire}; 6 | use crate::node::Node; 7 | use crate::peer::{Peer, PeerId}; 8 | use crate::printable_hash::PrintableHash; 9 | use crate::round::Round; 10 | use failure::Error; 11 | use rand::prelude::IteratorRandom; 12 | use rand::Rng; 13 | use ring::signature; 14 | use std::cmp::Ordering; 15 | use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; 16 | use std::fmt; 17 | use std::iter::FromIterator; 18 | use std::marker::PhantomData; 19 | use std::sync::MutexGuard; 20 | use std::sync::{Arc, Mutex}; 21 | use std::time::{SystemTime, UNIX_EPOCH}; 22 | 23 | // const C is the frequency of coin toss rounds 24 | // Swirlds paper requires C > 2; let keep it prime 25 | const C: usize = 3; 26 | 27 | #[inline] 28 | fn get_current_timestamp() -> u64 { 29 | SystemTime::now() 30 | .duration_since(UNIX_EPOCH) 31 | .expect("Time went back") 32 | .as_secs() 33 | } 34 | 35 | #[inline] 36 | fn assign_round(event: &mut Event, round: usize) -> Result { 37 | event.set_round(round); 38 | Ok(round) 39 | } 40 | 41 | #[inline] 42 | fn get_round_pairs(r: &Round) -> Vec<(usize, EventHash)> { 43 | r.witnesses().iter().map(|w| (r.id, w.clone())).collect() 44 | } 45 | 46 | struct NodeInternalState, H: Hashgraph> { 47 | consensus: BTreeSet, 48 | network: HashMap>, 49 | ordered_events: Vec>, 50 | pending_events: HashSet, 51 | rounds: Vec, 52 | super_majority: usize, 53 | transactions: Vec>, 54 | votes: HashMap<(EventHash, EventHash), bool>, 55 | _phantom: PhantomData, 56 | } 57 | 58 | impl, H: Hashgraph + Clone + fmt::Debug> fmt::Debug for Swirlds { 59 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 60 | fn print_arrows(f: &mut fmt::Formatter, n_nodes: usize) -> fmt::Result { 61 | for _ in 0..3 { 62 | write!(f, " ")?; 63 | for _ in 0..n_nodes { 64 | write!(f, " | ")?; 65 | } 66 | writeln!(f, "")?; 67 | } 68 | Ok(()) 69 | } 70 | fn update_last_events( 71 | events: &mut BTreeMap>, 72 | h: &H, 73 | ) -> Result<(), Error> { 74 | for (k, v) in events.clone() { 75 | if let Some(v) = v { 76 | let self_child = h.find_self_child(&v)?; 77 | events.insert(k.clone(), self_child); 78 | } 79 | } 80 | Ok(()) 81 | } 82 | 83 | fn print_hashes( 84 | f: &mut fmt::Formatter, 85 | events: &mut BTreeMap>, 86 | ) -> fmt::Result { 87 | write!(f, " ")?; 88 | for peer in events.keys() { 89 | if let Some(Some(ev)) = events.get(peer) { 90 | write!(f, "{} ", ev.printable_hash())?; 91 | } else { 92 | write!(f, " ")?; 93 | } 94 | } 95 | writeln!(f, "")?; 96 | Ok(()) 97 | } 98 | fn print_other_parents( 99 | f: &mut fmt::Formatter, 100 | events: &mut BTreeMap>, 101 | h: &H, 102 | ) -> fmt::Result { 103 | write!(f, " ")?; 104 | for peer in events.keys() { 105 | if let Some(Some(ev)) = events.get(peer) { 106 | match h.get(ev) { 107 | Ok(ev) => { 108 | if let Some(ParentsPair(_, other_parent)) = ev.parents() { 109 | write!(f, "{} ", other_parent.printable_hash())?; 110 | } else { 111 | write!(f, " ")?; 112 | } 113 | } 114 | Err(e) => { 115 | debug!(target: "print_other_parents::h.get", "{}", e); 116 | write!(f, " ")?; 117 | } 118 | } 119 | } else { 120 | write!(f, " ")?; 121 | } 122 | } 123 | writeln!(f, "")?; 124 | Ok(()) 125 | } 126 | fn print_rounds( 127 | f: &mut fmt::Formatter, 128 | events: &mut BTreeMap>, 129 | h: &H, 130 | ) -> fmt::Result { 131 | write!(f, " ")?; 132 | for peer in events.keys() { 133 | if let Some(Some(ev)) = events.get(peer) { 134 | match h.get(ev) { 135 | Ok(ev) => { 136 | if let Some(round) = ev.maybe_round() { 137 | let r_string = format!("{}", round); 138 | let spaces = 139 | (0..(10 - r_string.len())).map(|_| " ").collect::(); 140 | write!(f, "{}{}", round, spaces)?; 141 | } else { 142 | write!(f, " ")?; 143 | } 144 | } 145 | Err(e) => { 146 | debug!(target: "print_rounds::h.get", "{}", e); 147 | write!(f, " ")?; 148 | } 149 | } 150 | } else { 151 | write!(f, " ")?; 152 | } 153 | } 154 | writeln!(f, "")?; 155 | Ok(()) 156 | } 157 | 158 | fn num_of_some_in_map(map: &BTreeMap>) -> usize { 159 | let vs: Vec<&Option> = map.values().filter(|v| v.is_some()).collect(); 160 | vs.len() 161 | } 162 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError).unwrap(); 163 | let head = get_from_mutex!(self.head, ResourceHeadPoisonError) 164 | .unwrap() 165 | .clone(); 166 | let network: &HashMap> = &state.network; 167 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError).unwrap(); 168 | writeln!(f, "Node ID: {:?}", self.get_id().printable_hash())?; 169 | writeln!(f, "Head: {:?}", head.map(|h| h.printable_hash()))?; 170 | let roots: Vec = hashgraph.find_roots(); 171 | let mut last_event_per_peer: BTreeMap> = BTreeMap::new(); 172 | for peer in network.keys() { 173 | for root in roots.iter() { 174 | let e: &Event = hashgraph.get(root).unwrap(); 175 | if e.creator() == peer { 176 | last_event_per_peer.insert(peer.clone(), Some(root.clone())); 177 | } 178 | } 179 | if !last_event_per_peer.contains_key(peer) { 180 | last_event_per_peer.insert(peer.clone(), None); 181 | } 182 | } 183 | for root in roots.iter() { 184 | let e: &Event = hashgraph.get(root).unwrap(); 185 | if e.creator() == &self.get_id() { 186 | last_event_per_peer.insert(self.get_id().clone(), Some(root.clone())); 187 | } 188 | } 189 | if !last_event_per_peer.contains_key(&self.get_id()) { 190 | last_event_per_peer.insert(self.get_id().clone(), None); 191 | } 192 | write!(f, "Peers: ")?; 193 | for peer in last_event_per_peer.keys() { 194 | write!(f, "{} ", peer.printable_hash())?; 195 | } 196 | writeln!(f, "")?; 197 | write!(f, " ")?; 198 | for root in last_event_per_peer.values() { 199 | if let Some(root) = root { 200 | write!(f, "{} ", root.printable_hash())?; 201 | } else { 202 | write!(f, " ")?; 203 | } 204 | } 205 | writeln!(f, "")?; 206 | let h = (*hashgraph).clone(); 207 | print_rounds(f, &mut last_event_per_peer, &h)?; 208 | match update_last_events(&mut last_event_per_peer, &h) { 209 | Ok(_) => (), 210 | Err(e) => debug!(target: "fmt::Debug::fmt::update_last_events", "{}", e), 211 | } 212 | while num_of_some_in_map(&last_event_per_peer) > 0 { 213 | print_arrows(f, network.len() + 1)?; 214 | print_hashes(f, &mut last_event_per_peer)?; 215 | print_other_parents(f, &mut last_event_per_peer, &h)?; 216 | print_rounds(f, &mut last_event_per_peer, &h)?; 217 | match update_last_events(&mut last_event_per_peer, &h) { 218 | Ok(_) => (), 219 | // TODO: Incorporate an ID for hashgraphs, so we can identify this error from above^ 220 | Err(e) => debug!(target: "fmt::Debug::fmt::update_last_events", "{}", e), 221 | } 222 | } 223 | writeln!(f, "")?; 224 | writeln!(f, "") 225 | } 226 | } 227 | 228 | pub struct Swirlds, H: Hashgraph + Clone + fmt::Debug> { 229 | hashgraph: Mutex, 230 | head: Mutex>, 231 | // TODO: Plain keys in memory? Not great. See https://stackoverflow.com/a/1263421 for possible 232 | // alternatives 233 | pk: signature::Ed25519KeyPair, 234 | state: Mutex>, 235 | } 236 | 237 | impl> Swirlds { 238 | pub fn new(pk: signature::Ed25519KeyPair, hashgraph: H) -> Result { 239 | let state = Mutex::new(NodeInternalState { 240 | consensus: BTreeSet::new(), 241 | network: HashMap::new(), 242 | ordered_events: Vec::new(), 243 | pending_events: HashSet::new(), 244 | rounds: Vec::new(), 245 | super_majority: 0, 246 | transactions: Vec::new(), 247 | votes: HashMap::new(), 248 | _phantom: PhantomData, 249 | }); 250 | let node = Swirlds { 251 | hashgraph: Mutex::new(hashgraph), 252 | head: Mutex::new(None), 253 | pk, 254 | state, 255 | }; 256 | node.create_new_head(None, Some(0))?; 257 | Ok(node) 258 | } 259 | 260 | #[inline] 261 | pub fn add_node(&self, peer: Arc

) -> Result<(), Error> { 262 | let super_majority = { 263 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 264 | state.network.insert(peer.id().clone(), peer); 265 | state.network.len() * 2 / 3 266 | }; 267 | self.set_super_majority(super_majority)?; 268 | Ok(()) 269 | } 270 | 271 | pub fn sync(&self, remote_head: EventHash, remote_hg: H) -> Result, Error> { 272 | info!( 273 | "[Node {:?}] Syncing with head {:?}", 274 | self.get_id().printable_hash(), 275 | remote_head.printable_hash() 276 | ); 277 | debug!("{:?}", self); 278 | let mut res = self.merge_hashgraph(remote_hg.clone())?; 279 | info!( 280 | "[Node {:?}] Merging {:?}", 281 | self.get_id().printable_hash(), 282 | res.iter() 283 | .map(|v| v.printable_hash()) 284 | .collect::>() 285 | ); 286 | debug!("{:?}", self); 287 | 288 | if res.len() > 0 { 289 | let new_head = self.maybe_change_head(remote_head, remote_hg.clone())?; 290 | res.extend(new_head.into_iter()); 291 | } 292 | Ok(res) 293 | } 294 | 295 | pub fn divide_rounds(&self, events: Vec) -> Result<(), Error> { 296 | for eh in events.into_iter() { 297 | let round = self.assign_round(&eh)?; 298 | info!( 299 | "[Node {:?}] Round {} assigned to {:?}", 300 | self.get_id().printable_hash(), 301 | round, 302 | eh.printable_hash() 303 | ); 304 | debug!("{:?}", self); 305 | 306 | self.maybe_add_new_round(round)?; 307 | 308 | self.set_event_can_see_self(&eh)?; 309 | 310 | self.maybe_add_witness_to_round(round, &eh)?; 311 | } 312 | Ok(()) 313 | } 314 | 315 | pub fn decide_fame(&self) -> Result, Error> { 316 | let mut famous_events = HashMap::new(); 317 | let mut rounds_done = BTreeSet::new(); 318 | let super_majority = self.get_super_majority()?; 319 | for (round, veh) in self.get_voters()?.into_iter() { 320 | let witnesses = self.get_round_witnesses(round, &veh)?; 321 | for (ur, eh) in self.get_undetermined_events(round)? { 322 | if round - ur == 1 { 323 | self.vote(veh.clone(), eh.clone(), witnesses.contains(&eh))?; 324 | } else { 325 | let (vote, stake) = self.get_vote(&witnesses, &eh)?; 326 | if (round - ur) % C > 0 { 327 | if stake > super_majority { 328 | famous_events.insert(eh, vote); 329 | rounds_done.insert(ur); 330 | } else { 331 | self.vote(veh.clone(), eh, vote)?; 332 | } 333 | } else { 334 | if stake > super_majority { 335 | self.vote(veh.clone(), eh, vote)?; 336 | } else { 337 | let hashgraph = 338 | get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 339 | let new_vote = hashgraph.get(&veh)?.signature()?.as_ref()[0] != 0; 340 | self.vote(veh.clone(), eh, new_vote)?; 341 | } 342 | } 343 | } 344 | } 345 | } 346 | 347 | self.update_famous_events(famous_events)?; 348 | 349 | let new_consensus: BTreeSet = BTreeSet::from_iter( 350 | rounds_done 351 | .into_iter() 352 | .map(|r| match self.are_all_witnesses_famous(r) { 353 | Ok(famous) => Some(famous), 354 | Err(e) => { 355 | debug!(target: "swirlds", "{}", e); 356 | return None; 357 | } 358 | }) 359 | .filter(|f| f.is_some()) 360 | .map(|f| f.unwrap().into()), 361 | ); 362 | info!( 363 | "[Node {:?}] New consensus rounds: {:?}", 364 | self.get_id().printable_hash(), 365 | new_consensus 366 | ); 367 | debug!("{:?}", self); 368 | 369 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 370 | state.consensus = 371 | BTreeSet::from_iter(state.consensus.union(&new_consensus).map(|r| r.clone())); 372 | 373 | Ok(new_consensus) 374 | } 375 | 376 | pub fn find_order(&self, new_consensus: BTreeSet) -> Result<(), Error> { 377 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 378 | for round in new_consensus { 379 | let unique_famous_witnesses = self.get_unique_famous_witnesses(round)?; 380 | for eh in state.pending_events.clone() { 381 | let is_round_received = self.is_round_received(&unique_famous_witnesses, &eh)?; 382 | if is_round_received { 383 | self.set_received_information(&eh, round, &unique_famous_witnesses)?; 384 | state.pending_events.remove(&eh); 385 | } 386 | } 387 | } 388 | Ok(()) 389 | } 390 | 391 | pub fn get_id(&self) -> PeerId { 392 | self.pk.public_key_bytes().to_vec() 393 | } 394 | 395 | pub fn get_hashgraph(&self) -> Result { 396 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 397 | Ok(hashgraph.clone()) 398 | } 399 | 400 | pub fn get_head(&self) -> Result { 401 | get_from_mutex!(self.head, ResourceHeadPoisonError)? 402 | .clone() 403 | .map(|v| v.clone()) 404 | .ok_or(Error::from(NodeError::new(NodeErrorType::NoHead))) 405 | } 406 | 407 | pub fn get_peer(&self, id: &PeerId) -> Result, Error> { 408 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 409 | state 410 | .network 411 | .get(id) 412 | .map(|v| v.clone()) 413 | .ok_or(Error::from(NodeError::new(NodeErrorType::PeerNotFound( 414 | id.clone(), 415 | )))) 416 | } 417 | 418 | fn update_order(&self) -> Result<(), Error> { 419 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 420 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 421 | let events: Vec = hashgraph.get_events(); 422 | let rounds: Vec = state.consensus.clone().into_iter().collect(); 423 | let mut hashe_pairs: Vec<(u64, Event)> = rounds 424 | .into_iter() 425 | .flat_map(|r| { 426 | events 427 | .iter() 428 | .map(|h| (h.clone(), hashgraph.get(h).unwrap().maybe_round())) 429 | .filter(|(_, h): &(EventHash, Option)| h.is_some()) 430 | .map(|(h, r): (EventHash, Option)| (h, r.expect("can't happen"))) 431 | .filter(move |(_, cr): &(EventHash, usize)| r == cr.clone()) 432 | .map(|(h, _): (EventHash, usize)| h) 433 | }) 434 | .map(|h| hashgraph.get(&h)) 435 | .collect::>, Error>>()? 436 | .into_iter() 437 | .map(|v| v.clone()) 438 | .filter(|e: &Event| e.timestamp().is_ok()) 439 | .map(|e: Event| (e.timestamp().expect("can't happen"), e)) 440 | .collect::)>>(); 441 | hashe_pairs.sort_by_key(|(t, _)| t.clone()); 442 | let events: Vec> = hashe_pairs.into_iter().map(|(_, e)| e).collect(); 443 | state.ordered_events = events; 444 | Ok(()) 445 | } 446 | 447 | #[inline] 448 | fn update_famous_events(&self, famous_events: HashMap) -> Result<(), Error> { 449 | let mut hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 450 | for (e, vote) in famous_events.into_iter() { 451 | let ev = hashgraph.get_mut(&e)?; 452 | ev.famous(vote); 453 | } 454 | Ok(()) 455 | } 456 | 457 | #[inline] 458 | pub fn get_stats(&self) -> Result<(usize, usize), Error> { 459 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 460 | Ok((state.rounds.len(), state.pending_events.len())) 461 | } 462 | 463 | #[inline] 464 | fn set_super_majority(&self, sm: usize) -> Result<(), Error> { 465 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 466 | state.super_majority = sm; 467 | Ok(()) 468 | } 469 | 470 | #[inline] 471 | fn get_super_majority(&self) -> Result { 472 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 473 | Ok(state.super_majority) 474 | } 475 | 476 | #[inline] 477 | fn vote(&self, veh: EventHash, eh: EventHash, vote: bool) -> Result<(), Error> { 478 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 479 | state.votes.insert((veh, eh), vote); 480 | Ok(()) 481 | } 482 | 483 | #[inline] 484 | fn maybe_add_witness_to_round(&self, round: usize, eh: &EventHash) -> Result<(), Error> { 485 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 486 | let event = hashgraph.get(&eh)?; 487 | if round == 0 || round > hashgraph.get(&event.self_parent()?)?.round()? { 488 | let creator = event.creator().clone(); 489 | self.add_witness_to_round(round, creator, eh)?; 490 | } 491 | Ok(()) 492 | } 493 | 494 | #[inline] 495 | fn add_witness_to_round( 496 | &self, 497 | round: usize, 498 | creator: PeerId, 499 | eh: &EventHash, 500 | ) -> Result<(), Error> { 501 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 502 | state.rounds[round].add_witness(creator, eh.clone()); 503 | Ok(()) 504 | } 505 | 506 | #[inline] 507 | fn maybe_add_new_round(&self, round: usize) -> Result<(), Error> { 508 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 509 | if state.rounds.len() == round { 510 | state.rounds.push(Round::new(round)); 511 | } 512 | Ok(()) 513 | } 514 | 515 | #[inline] 516 | fn is_round_received( 517 | &self, 518 | unique_famous_witnesses: &HashSet, 519 | eh: &EventHash, 520 | ) -> Result { 521 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 522 | for witness in unique_famous_witnesses.iter() { 523 | if !hashgraph.ancestors(witness)?.contains(&eh) { 524 | return Ok(false); 525 | } 526 | } 527 | Ok(true) 528 | } 529 | 530 | #[inline] 531 | fn set_received_information( 532 | &self, 533 | hash: &EventHash, 534 | round: usize, 535 | unique_famous_witnesses: &HashSet, 536 | ) -> Result<(), Error> { 537 | let timestamp_deciders = self.get_timestamp_deciders(hash, unique_famous_witnesses)?; 538 | let mut hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 539 | let times = timestamp_deciders 540 | .into_iter() 541 | .map(|eh| match hashgraph.get(&eh) { 542 | Ok(hg) => match hg.timestamp() { 543 | Ok(timestamp) => Some(timestamp), 544 | Err(e) => { 545 | debug!(target: "swirlds", "{}", e); 546 | return None; 547 | } 548 | }, 549 | Err(e) => { 550 | debug!(target: "swirlds", "{}", e); 551 | return None; 552 | } 553 | }) 554 | .filter(|eh| eh.is_some()) 555 | .map(|eh| eh.unwrap()) 556 | .collect::>(); 557 | let times_sum: u64 = times.iter().sum(); 558 | let new_time = times_sum / times.len() as u64; 559 | let event = hashgraph.get_mut(hash)?; 560 | event.set_timestamp(new_time); 561 | event.set_round_received(round); 562 | Ok(()) 563 | } 564 | 565 | #[inline] 566 | fn get_timestamp_deciders( 567 | &self, 568 | hash: &EventHash, 569 | unique_famous_witnesses: &HashSet, 570 | ) -> Result, Error> { 571 | let mut result = HashSet::new(); 572 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 573 | for unique_famous_witness in unique_famous_witnesses { 574 | let self_ancestors = hashgraph.self_ancestors(unique_famous_witness)?.into_iter(); 575 | for self_ancestor in self_ancestors { 576 | let ancestors = hashgraph.ancestors(self_ancestor)?; 577 | let event = hashgraph.get(self_ancestor)?; 578 | if ancestors.contains(&hash) && !event.is_self_parent(hash)? { 579 | result.insert(self_ancestor.clone()); 580 | } 581 | } 582 | } 583 | Ok(result) 584 | } 585 | 586 | #[inline] 587 | fn get_unique_famous_witnesses(&self, round: usize) -> Result, Error> { 588 | let mut famous_witnesses = self.get_famous_witnesses(round)?; 589 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 590 | for w in famous_witnesses.clone() { 591 | for w1 in famous_witnesses.clone() { 592 | if w != w1 { 593 | let e = hashgraph.get(&w)?; 594 | let e1 = hashgraph.get(&w1)?; 595 | if e.parents() == e1.parents() { 596 | famous_witnesses.remove(&w); 597 | } 598 | } 599 | } 600 | } 601 | Ok(famous_witnesses) 602 | } 603 | 604 | #[inline] 605 | fn get_famous_witnesses(&self, round: usize) -> Result, Error> { 606 | //let hashgraph: std::sync::MutexGuard<_, Swirlds> = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 607 | let hashgraph = self.hashgraph.lock().unwrap(); 608 | //self.hashgraph.lock().unwrap(); 609 | //let state: NodeInternalState = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 610 | let state = self.state.lock().unwrap(); 611 | let a = state.rounds[round].clone(); 612 | 613 | let b = a 614 | .witnesses() 615 | .iter() 616 | .map(|eh| match hashgraph.get(&eh) { 617 | Ok(event) => Some(event), 618 | Err(e) => { 619 | debug!(target: "swirlds", "{}", e); 620 | return None; 621 | } 622 | }) 623 | // .collect(); 624 | .filter(|event| event.is_some()) 625 | .map(|eh| eh.unwrap()) 626 | .filter(|eh| eh.is_famous()) 627 | .map(|eh| eh.hash()) 628 | .map(|ef| ef.unwrap()) 629 | // .map(|f| f) 630 | .collect(); 631 | Ok(b) 632 | } 633 | 634 | #[inline] 635 | fn are_all_witnesses_famous(&self, round: usize) -> Result { 636 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 637 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 638 | Ok(state.rounds[round] 639 | .witnesses() 640 | .iter() 641 | .map(|eh| match hashgraph.get(eh) { 642 | Ok(eh_hg) => Some(eh_hg), 643 | Err(e) => { 644 | debug!(target: "swirlds", "{}", e); 645 | return None; 646 | } 647 | }) 648 | .filter(|eh_hg_opt| eh_hg_opt.is_some()) 649 | .map(|eh_hg_opt| eh_hg_opt.unwrap()) 650 | .all(|e| e.is_famous())) 651 | } 652 | 653 | #[inline] 654 | fn get_vote( 655 | &self, 656 | witnesses: &HashSet, 657 | eh: &EventHash, 658 | ) -> Result<(bool, usize), Error> { 659 | let total = self.get_votes_for_event(witnesses, eh)?; 660 | if total >= witnesses.len() / 2 { 661 | Ok((true, total)) 662 | } else { 663 | Ok((false, witnesses.len() - total)) 664 | } 665 | } 666 | 667 | #[inline] 668 | fn get_votes_for_event( 669 | &self, 670 | witnesses: &HashSet, 671 | eh: &EventHash, 672 | ) -> Result { 673 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 674 | let mut total = 0; 675 | for w in witnesses { 676 | if state.votes[&(w.clone(), eh.clone())] { 677 | total += 1; 678 | } 679 | } 680 | Ok(total) 681 | } 682 | 683 | #[inline] 684 | fn get_undetermined_events(&self, round: usize) -> Result, Error> { 685 | let next_consensus = self.get_next_consensus()?; 686 | let hashgraph: MutexGuard = 687 | get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 688 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 689 | let a = (next_consensus..round) 690 | .filter(|r| !state.consensus.contains(r)) 691 | .map(|r| get_round_pairs(&state.rounds[r]).into_iter()) 692 | .flatten() 693 | .map(|(_, h)| match hashgraph.get(&h) { 694 | Ok(hg) => Some(hg), 695 | Err(e) => { 696 | debug!(target: "swirlds", "{}", e); 697 | return None; 698 | } 699 | }) 700 | .filter(|hg_opt| hg_opt.is_some()) 701 | .map(|hg_opt| hg_opt.unwrap()) 702 | .filter(|hg| hg.is_undefined()) 703 | .map(|hg| hg.hash()) 704 | .map(|hg| hg.unwrap()) 705 | .map(|ef| (round, ef)) 706 | .collect(); 707 | Ok(a) 708 | } 709 | 710 | #[inline] 711 | fn get_round_witnesses( 712 | &self, 713 | round: usize, 714 | hash: &EventHash, 715 | ) -> Result, Error> { 716 | if round == 0 { 717 | Ok(HashSet::new()) 718 | } else { 719 | let hits = self.get_round_hits(round, hash)?; 720 | let prev_round = round - 1; 721 | let super_majority = self.get_super_majority()?; 722 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 723 | let r = &state.rounds[prev_round]; 724 | let map_iter = hits 725 | .into_iter() 726 | .filter(|(_, v)| *v > super_majority) 727 | .map(|(c, _)| r.witnesses_map()[&c].clone()); 728 | Ok(HashSet::from_iter(map_iter)) 729 | } 730 | } 731 | 732 | #[inline] 733 | fn get_round_hits( 734 | &self, 735 | round: usize, 736 | hash: &EventHash, 737 | ) -> Result, Error> { 738 | if round == 0 { 739 | Ok(HashMap::new()) 740 | } else { 741 | let mut hits: HashMap = HashMap::new(); 742 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 743 | let event = hashgraph.get(hash)?; 744 | let prev_round = round - 1; 745 | for (creator, event_hash) in event.can_see().iter() { 746 | let possible_witness = hashgraph.get(event_hash)?; 747 | if possible_witness.round()? == prev_round { 748 | for (_creator, _event_hash) in possible_witness.can_see().iter() { 749 | let r = hashgraph.get(_event_hash)?.round()?; 750 | if r == prev_round { 751 | let new_val = hits.get(creator).map(|v| *v + 1).unwrap_or(1); 752 | hits.insert(creator.clone(), new_val); 753 | } 754 | } 755 | } 756 | } 757 | Ok(hits) 758 | } 759 | } 760 | 761 | #[inline] 762 | fn get_voters(&self) -> Result, Error> { 763 | let next_consensus = self.get_next_consensus()?; 764 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 765 | let max_r = state.rounds.iter().map(|r| r.id).max().unwrap_or(0); 766 | Ok(state.rounds[next_consensus..max_r] 767 | .iter() 768 | .flat_map(|r| get_round_pairs(r)) 769 | .collect()) 770 | } 771 | 772 | #[inline] 773 | fn get_next_consensus(&self) -> Result { 774 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 775 | Ok(state.consensus.iter().last().map(|v| *v + 1).unwrap_or(0)) 776 | } 777 | 778 | #[inline] 779 | fn set_event_can_see_self(&self, hash: &EventHash) -> Result<(), Error> { 780 | let mut hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 781 | let event = hashgraph.get_mut(&hash)?; 782 | let creator = event.creator().clone(); 783 | event.add_can_see(creator, hash.clone()); 784 | Ok(()) 785 | } 786 | 787 | #[inline] 788 | fn assign_round(&self, hash: &EventHash) -> Result { 789 | let is_root = { 790 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 791 | hashgraph.get(hash)?.is_root() 792 | }; 793 | if is_root { 794 | let mut hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 795 | assign_round(hashgraph.get_mut(&hash)?, 0) 796 | } else { 797 | self.assign_non_root_round(hash) 798 | } 799 | } 800 | 801 | #[inline] 802 | fn assign_non_root_round(&self, hash: &EventHash) -> Result { 803 | let events_parents_can_see = { 804 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 805 | hashgraph.events_parents_can_see(hash)? 806 | }; 807 | let mut r = self.get_parents_round(hash)?; 808 | let hits = self.get_hits_per_events(r, &events_parents_can_see)?; 809 | let sm = self.get_super_majority()?; 810 | let votes = hits.values().map(|v| v.clone()).filter(|v| *v > sm); 811 | if votes.sum::() > sm { 812 | r += 1; 813 | } 814 | self.set_events_parents_can_see(hash, events_parents_can_see)?; 815 | let mut hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 816 | assign_round(hashgraph.get_mut(&hash)?, r) 817 | } 818 | 819 | #[inline] 820 | fn get_hits_per_events( 821 | &self, 822 | r: usize, 823 | events_parents_can_see: &HashMap, 824 | ) -> Result, Error> { 825 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 826 | let mut hits: HashMap = HashMap::new(); 827 | for (_, h) in events_parents_can_see.iter() { 828 | let event = hashgraph.get(h)?; 829 | if event.round()? == r { 830 | for (_c, _h) in event.can_see().iter() { 831 | let seen_event = hashgraph.get(_h)?; 832 | if seen_event.round()? == r { 833 | let prev = hits.get(_c).map(|v| v.clone()).unwrap_or(0); 834 | hits.insert(_c.clone(), prev + 1); 835 | } 836 | } 837 | } 838 | } 839 | Ok(hits) 840 | } 841 | 842 | #[inline] 843 | fn get_parents_round(&self, hash: &EventHash) -> Result { 844 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 845 | let event = hashgraph.get(hash)?; 846 | let parents = event.parents().clone().ok_or(Error::from(EventError::new( 847 | EventErrorType::NoParents { hash: hash.clone() }, 848 | )))?; 849 | parents.max_round(hashgraph.clone()) 850 | } 851 | 852 | #[inline] 853 | fn set_events_parents_can_see( 854 | &self, 855 | hash: &EventHash, 856 | events_parents_can_see: HashMap, EventHash>, 857 | ) -> Result<(), Error> { 858 | let mut hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 859 | let event = hashgraph.get_mut(hash)?; 860 | event.set_can_see(events_parents_can_see); 861 | Ok(()) 862 | } 863 | 864 | #[inline] 865 | fn merge_hashgraph(&self, remote_hg: H) -> Result, Error> { 866 | let mut diff = { 867 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 868 | remote_hg.difference(hashgraph.clone()) 869 | }; 870 | let mut error: Option = None; 871 | diff.sort_by(|h1, h2| { 872 | if error.is_some() { 873 | return Ordering::Less; 874 | } 875 | 876 | match remote_hg.higher(h1, h2) { 877 | Ok(h1_higher) => match remote_hg.higher(h2, h1) { 878 | Ok(h2_higher) => { 879 | if h1_higher { 880 | Ordering::Greater 881 | } else if h2_higher { 882 | Ordering::Less 883 | } else { 884 | Ordering::Equal 885 | } 886 | } 887 | Err(e) => { 888 | error = Some(e); 889 | Ordering::Less 890 | } 891 | }, 892 | Err(e) => { 893 | error = Some(e); 894 | Ordering::Less 895 | } 896 | } 897 | }); 898 | if error.is_some() { 899 | return Err(error.unwrap()); 900 | } 901 | let mut res = Vec::with_capacity(diff.len()); 902 | for eh in diff.clone().into_iter() { 903 | let is_valid_event = { 904 | let event = remote_hg.get(&eh)?; 905 | self.is_valid_event(&eh, event) 906 | }?; 907 | if is_valid_event { 908 | self.add_event(remote_hg.get(&eh)?.clone())?; 909 | res.push(eh); 910 | } else { 911 | warn!( 912 | "[Node {:?}] Error {:?} isn't valid", 913 | self.get_id().printable_hash(), 914 | eh.printable_hash() 915 | ); 916 | } 917 | } 918 | Ok(res) 919 | } 920 | 921 | #[inline] 922 | fn maybe_change_head( 923 | &self, 924 | remote_head: EventHash, 925 | remote_hg: H, 926 | ) -> Result, Error> { 927 | let remote_head_event = remote_hg.get(&remote_head)?.clone(); 928 | 929 | if self.is_valid_event(&remote_head, &remote_head_event)? { 930 | let current_head = self.get_head()?; 931 | let parents = ParentsPair(current_head, remote_head); 932 | Ok(Some(self.create_new_head(Some(parents), None)?)) 933 | } else { 934 | Ok(None) 935 | } 936 | } 937 | 938 | #[inline] 939 | fn is_valid_event( 940 | &self, 941 | event_hash: &EventHash, 942 | event: &Event, 943 | ) -> Result { 944 | event.is_valid(event_hash).and_then(|b| { 945 | if !b { 946 | Ok(false) 947 | } else { 948 | let hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 949 | hashgraph.is_valid_event(event) 950 | } 951 | }) 952 | } 953 | 954 | #[inline] 955 | fn select_peer(&self, rng: &mut R) -> Result, Error> { 956 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 957 | state 958 | .network 959 | .values() 960 | .choose(rng) 961 | .ok_or(Error::from(NodeError::new(NodeErrorType::EmptyNetwork))) 962 | .map(|p| p.clone()) 963 | } 964 | 965 | fn create_new_head( 966 | &self, 967 | parents: Option, 968 | round: Option, 969 | ) -> Result { 970 | let (event, hash) = self.create_event(parents, round)?; 971 | self.add_event(event)?; 972 | let mut current_head = get_from_mutex!(self.head, ResourceHeadPoisonError)?; 973 | *current_head = Some(hash.clone()); 974 | Ok(hash.clone()) 975 | } 976 | 977 | #[inline] 978 | fn create_event( 979 | &self, 980 | parents: Option, 981 | round: Option, 982 | ) -> Result<(Event, EventHash), Error> { 983 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 984 | let mut event = Event::new( 985 | state.transactions.clone(), 986 | parents, 987 | self.pk.public_key_bytes().to_vec(), 988 | ); 989 | state.transactions = Vec::new(); 990 | if event.is_root() { 991 | event.set_timestamp(get_current_timestamp()) 992 | } 993 | round.iter().for_each(|r| event.set_round(r.clone())); 994 | let hash = event.hash()?; 995 | let signature = self.pk.sign(hash.as_ref()); 996 | event.sign(EventSignature::new(signature.as_ref())); 997 | Ok((event, hash)) 998 | } 999 | 1000 | #[inline] 1001 | fn add_event(&self, e: Event) -> Result<(), Error> { 1002 | let hash = e.hash()?; 1003 | self.add_pending_event(hash.clone())?; 1004 | let mut hashgraph = get_from_mutex!(self.hashgraph, ResourceHashgraphPoisonError)?; 1005 | Ok(hashgraph.insert(hash, e)) 1006 | } 1007 | 1008 | #[inline] 1009 | fn add_pending_event(&self, e: EventHash) -> Result<(), Error> { 1010 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 1011 | state.pending_events.insert(e); 1012 | Ok(()) 1013 | } 1014 | } 1015 | 1016 | impl, H: Hashgraph + Clone + fmt::Debug> Node for Swirlds { 1017 | type D = HashgraphWire; 1018 | type P = ParentsPair; 1019 | fn run(&self, rng: &mut R) -> Result<(), Error> { 1020 | let (head, hg) = { 1021 | let peer = self.select_peer(rng)?; 1022 | peer.get_sync(self.pk.public_key_bytes().to_vec(), None)? 1023 | }; 1024 | let new_events = self.sync(head, hg)?; 1025 | self.divide_rounds(new_events)?; 1026 | let new_consensus = self.decide_fame()?; 1027 | self.find_order(new_consensus)?; 1028 | self.update_order()?; 1029 | Ok(()) 1030 | } 1031 | 1032 | fn respond_message( 1033 | &self, 1034 | _k: Option, 1035 | ) -> Result<(EventHash, HashgraphWire), Error> { 1036 | let head = self.get_head()?; 1037 | let hashgraph = self.get_hashgraph()?; 1038 | let wire = hashgraph.wire(); 1039 | Ok((head, wire)) 1040 | } 1041 | 1042 | fn add_transaction(&self, msg: Vec) -> Result<(), Error> { 1043 | let mut state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 1044 | state.transactions.push(msg); 1045 | Ok(()) 1046 | } 1047 | 1048 | fn get_ordered_events(&self) -> Result>, Error> { 1049 | let state = get_from_mutex!(self.state, ResourceNodeInternalStatePoisonError)?; 1050 | Ok(state.ordered_events.clone()) 1051 | } 1052 | } 1053 | 1054 | #[cfg(test)] 1055 | mod tests { 1056 | use super::Swirlds; 1057 | use crate::event::{ 1058 | event_hash::EventHash, event_signature::EventSignature, parents::ParentsPair, Event, 1059 | }; 1060 | use crate::hashgraph::*; 1061 | use crate::peer::{Peer, PeerId}; 1062 | use ring::digest::{digest, SHA256}; 1063 | use ring::{rand, signature}; 1064 | use std::collections::HashSet; 1065 | use std::iter::FromIterator; 1066 | use std::sync::Arc; 1067 | 1068 | fn create_node() -> Swirlds { 1069 | let rng = rand::SystemRandom::new(); 1070 | let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap(); 1071 | let kp = 1072 | signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap(); 1073 | let hashgraph = BTreeHashgraph::new(); 1074 | Swirlds::new(kp, hashgraph).unwrap() 1075 | } 1076 | 1077 | fn create_useless_peer(id: PeerId) -> Arc { 1078 | let digest = digest(&SHA256, b"42"); 1079 | let event = EventHash::new(digest.as_ref()); 1080 | Arc::new(TestDummyPeer { 1081 | hashgraph: BTreeHashgraph::new(), 1082 | head: event, 1083 | id, 1084 | }) 1085 | } 1086 | 1087 | #[derive(Clone)] 1088 | struct TestDummyPeer { 1089 | hashgraph: BTreeHashgraph, 1090 | head: EventHash, 1091 | id: PeerId, 1092 | } 1093 | 1094 | impl Peer for TestDummyPeer { 1095 | fn get_sync( 1096 | &self, 1097 | _pk: PeerId, 1098 | _h: Option<&BTreeHashgraph>, 1099 | ) -> Result<(EventHash, BTreeHashgraph), failure::Error> { 1100 | Ok((self.head.clone(), self.hashgraph.clone())) 1101 | } 1102 | fn address(&self) -> String { 1103 | String::from_utf8(self.id.clone()).unwrap() 1104 | } 1105 | fn id(&self) -> &PeerId { 1106 | &self.id 1107 | } 1108 | } 1109 | 1110 | #[test] 1111 | fn it_should_calculate_super_majority_correctly() { 1112 | let node = create_node(); 1113 | let peer1 = create_useless_peer(vec![1]); 1114 | let peer2 = create_useless_peer(vec![2]); 1115 | let peer3 = create_useless_peer(vec![3]); 1116 | let peer4 = create_useless_peer(vec![4]); 1117 | assert_eq!(node.get_super_majority().unwrap(), 0); 1118 | node.add_node(peer1).unwrap(); 1119 | assert_eq!(node.get_super_majority().unwrap(), 0); 1120 | node.add_node(peer2).unwrap(); 1121 | assert_eq!(node.get_super_majority().unwrap(), 1); 1122 | node.add_node(peer3).unwrap(); 1123 | assert_eq!(node.get_super_majority().unwrap(), 2); 1124 | node.add_node(peer4).unwrap(); 1125 | assert_eq!(node.get_super_majority().unwrap(), 2); 1126 | } 1127 | 1128 | #[test] 1129 | fn it_should_add_event_correctly() { 1130 | let event = Event::new(vec![], None, vec![2]); 1131 | let hash = event.hash().unwrap(); 1132 | let node = create_node(); 1133 | let head = node.head.lock().unwrap().clone().unwrap().clone(); 1134 | node.add_event(event.clone()).unwrap(); 1135 | let state = node.state.lock().unwrap(); 1136 | assert_eq!( 1137 | state.pending_events, 1138 | HashSet::from_iter(vec![head, hash.clone()].into_iter()) 1139 | ); 1140 | let hashgraph = node.hashgraph.lock().unwrap(); 1141 | assert!(hashgraph.contains_key(&hash)); 1142 | assert_eq!(hashgraph.get(&hash).unwrap(), &event); 1143 | } 1144 | 1145 | #[test] 1146 | fn it_should_create_a_new_head() { 1147 | let node = create_node(); 1148 | let prev_head = node.head.lock().unwrap().clone().unwrap().clone(); 1149 | node.create_new_head( 1150 | Some(ParentsPair(prev_head.clone(), prev_head.clone())), 1151 | None, 1152 | ) 1153 | .unwrap(); 1154 | let head = node.head.lock().unwrap().clone().unwrap().clone(); 1155 | assert_ne!(head, prev_head); 1156 | let hashgraph = node.hashgraph.lock().unwrap(); 1157 | let head_event = hashgraph.get(&head).unwrap(); 1158 | assert!(head_event.is_valid(&head).unwrap()); 1159 | assert_eq!( 1160 | head_event.parents(), 1161 | &Some(ParentsPair(prev_head.clone(), prev_head.clone())) 1162 | ); 1163 | } 1164 | 1165 | #[test] 1166 | fn root_event_should_be_valid_in_node() { 1167 | let node = create_node(); 1168 | let head = node.head.lock().unwrap().clone().unwrap().clone(); 1169 | let event = { 1170 | let hashgraph = node.hashgraph.lock().unwrap(); 1171 | hashgraph.get(&head).unwrap().clone() 1172 | }; 1173 | assert!(node.is_valid_event(&head, &event).unwrap()); 1174 | } 1175 | 1176 | #[test] 1177 | fn invalid_event_should_be_invalid_in_node() { 1178 | let node = create_node(); 1179 | let head = node.head.lock().unwrap().clone().unwrap().clone(); 1180 | let event = { 1181 | let hashgraph = node.hashgraph.lock().unwrap(); 1182 | hashgraph.get(&head).unwrap().clone() 1183 | }; 1184 | use ring::digest::{digest, SHA256}; 1185 | let real_hash = EventHash::new(digest(&SHA256, &vec![1]).as_ref()); 1186 | assert!(!node.is_valid_event(&real_hash, &event).unwrap()); 1187 | } 1188 | 1189 | #[test] 1190 | fn event_with_invalid_history_should_be_invalid_in_node() { 1191 | let node = create_node(); 1192 | let head = node.head.lock().unwrap().clone().unwrap().clone(); 1193 | let mut event = Event::new( 1194 | vec![], 1195 | Some(ParentsPair(head.clone(), head.clone())), 1196 | node.pk.public_key_bytes().to_vec(), 1197 | ); 1198 | let hash = event.hash().unwrap(); 1199 | let signature = node.pk.sign(hash.as_ref()).as_ref().to_vec(); 1200 | event.sign(EventSignature::new(signature.as_ref())); 1201 | node.add_event(event.clone()).unwrap(); 1202 | assert!(!node.is_valid_event(&hash, &event).unwrap()); 1203 | } 1204 | 1205 | #[test] 1206 | fn it_should_create_a_head_with_head_and_remote_head_parents() { 1207 | let node = create_node(); 1208 | let remote_node = create_node(); 1209 | let head = node.head.lock().unwrap().clone().unwrap().clone(); 1210 | let remote_head = remote_node.head.lock().unwrap().clone().unwrap().clone(); 1211 | let remote_hashgraph = { 1212 | let mutex_guard = remote_node.hashgraph.lock().unwrap(); 1213 | (*mutex_guard).clone() 1214 | }; 1215 | node.maybe_change_head(remote_head.clone(), remote_hashgraph) 1216 | .unwrap(); 1217 | let new_head = node.head.lock().unwrap().clone().unwrap().clone(); 1218 | let hashgraph = node.hashgraph.lock().unwrap(); 1219 | let head_event = hashgraph.get(&new_head).unwrap(); 1220 | assert_eq!( 1221 | head_event.parents(), 1222 | &Some(ParentsPair(head.clone(), remote_head.clone())) 1223 | ); 1224 | } 1225 | 1226 | #[test] 1227 | #[should_panic(expected = "EventNotFound")] 1228 | fn it_shouldnt_create_a_head() { 1229 | let node = create_node(); 1230 | let remote_node = create_node(); 1231 | let remote_hashgraph = { 1232 | let mutex_guard = remote_node.hashgraph.lock().unwrap(); 1233 | (*mutex_guard).clone() 1234 | }; 1235 | use ring::digest::{digest, SHA256}; 1236 | let real_hash = EventHash::new(digest(&SHA256, &vec![1]).as_ref()); 1237 | node.maybe_change_head(real_hash.clone(), remote_hashgraph) 1238 | .unwrap(); 1239 | } 1240 | 1241 | #[test] 1242 | fn it_should_merge_the_hashgraph() { 1243 | println!("start"); 1244 | let node = create_node(); 1245 | let remote_node = create_node(); 1246 | println!("prev get heads"); 1247 | let head = node.head.lock().unwrap().clone().unwrap().clone(); 1248 | let remote_head = remote_node.head.lock().unwrap().clone().unwrap().clone(); 1249 | println!("prev remote"); 1250 | let remote_hashgraph = { 1251 | let mutex_guard = remote_node.hashgraph.lock().unwrap(); 1252 | (*mutex_guard).clone() 1253 | }; 1254 | println!("prev merge"); 1255 | node.merge_hashgraph(remote_hashgraph).unwrap(); 1256 | println!("post merge"); 1257 | let hashgraph = node.hashgraph.lock().unwrap(); 1258 | assert!(hashgraph.contains_key(&head)); 1259 | assert!(hashgraph.contains_key(&remote_head)); 1260 | } 1261 | } 1262 | -------------------------------------------------------------------------------- /lachesis-rs/src/tcp_server.rs: -------------------------------------------------------------------------------- 1 | use crate::event::event_hash::EventHash; 2 | use crate::hashgraph::{BTreeHashgraph, HashgraphWire}; 3 | use crate::lachesis::opera::{Opera, OperaWire}; 4 | use crate::lachesis::Lachesis; 5 | use crate::node::Node; 6 | use crate::peer::{Peer, PeerId}; 7 | use crate::swirlds::Swirlds; 8 | use bincode::serialize; 9 | use failure::Error; 10 | use ring::rand::SystemRandom; 11 | use ring::signature; 12 | use std::io::{Read, Write}; 13 | use std::net::{TcpListener, TcpStream}; 14 | use std::sync::Arc; 15 | use std::thread::{sleep, spawn, JoinHandle}; 16 | use std::time::Duration; 17 | 18 | fn create_lachesis_node(rng: &mut SystemRandom) -> Result, Error> { 19 | let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(rng)?; 20 | let kp = signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))?; 21 | Ok(Lachesis::new(3, kp)) 22 | } 23 | 24 | fn create_swirlds_node(rng: &mut SystemRandom) -> Result, Error> { 25 | let hashgraph = BTreeHashgraph::new(); 26 | let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(rng)?; 27 | let kp = signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))?; 28 | Swirlds::new(kp, hashgraph) 29 | } 30 | 31 | pub struct TcpNode { 32 | pub address: String, 33 | pub node: N, 34 | } 35 | 36 | impl TcpNode> { 37 | pub fn new_lachesis( 38 | rng: &mut SystemRandom, 39 | address: String, 40 | ) -> Result>, Error> { 41 | let node = create_lachesis_node(rng)?; 42 | Ok(TcpNode { address, node }) 43 | } 44 | } 45 | 46 | impl TcpNode> { 47 | pub fn new( 48 | rng: &mut SystemRandom, 49 | address: String, 50 | ) -> Result>, Error> { 51 | let node = create_swirlds_node(rng)?; 52 | Ok(TcpNode { address, node }) 53 | } 54 | } 55 | 56 | #[derive(Clone)] 57 | pub struct TcpPeer { 58 | pub address: String, 59 | pub id: PeerId, 60 | } 61 | 62 | impl Peer for TcpPeer { 63 | fn get_sync( 64 | &self, 65 | _pk: PeerId, 66 | _k: Option<&BTreeHashgraph>, 67 | ) -> Result<(EventHash, BTreeHashgraph), Error> { 68 | let mut buffer = Vec::new(); 69 | let mut stream = TcpStream::connect(&self.address.clone())?; 70 | let mut last_received = 0; 71 | while last_received == 0 { 72 | last_received = stream.read_to_end(&mut buffer)?; 73 | } 74 | let (eh, wire): (EventHash, HashgraphWire) = bincode::deserialize(&buffer)?; 75 | let hashgraph = BTreeHashgraph::from(wire); 76 | Ok((eh, hashgraph)) 77 | } 78 | fn address(&self) -> String { 79 | self.address.clone() 80 | } 81 | fn id(&self) -> &PeerId { 82 | &self.id 83 | } 84 | } 85 | 86 | impl Peer for TcpPeer { 87 | fn get_sync(&self, _pk: PeerId, _k: Option<&Opera>) -> Result<(EventHash, Opera), Error> { 88 | let mut buffer = Vec::new(); 89 | let mut stream = TcpStream::connect(&self.address.clone())?; 90 | let mut last_received = 0; 91 | while last_received == 0 { 92 | last_received = stream.read_to_end(&mut buffer)?; 93 | } 94 | let (eh, wire): (EventHash, OperaWire) = bincode::deserialize(&buffer)?; 95 | Ok((eh, wire.into_opera())) 96 | } 97 | fn address(&self) -> String { 98 | self.address.clone() 99 | } 100 | fn id(&self) -> &PeerId { 101 | &self.id 102 | } 103 | } 104 | 105 | pub struct TcpApp(Arc>>); 106 | 107 | impl TcpApp { 108 | pub fn new(n: Arc>>) -> TcpApp { 109 | TcpApp(n) 110 | } 111 | 112 | pub fn run(self) -> Result<(JoinHandle<()>, JoinHandle<()>), Error> { 113 | let answer_thread_node = self.0.clone(); 114 | let sync_thread_node = self.0.clone(); 115 | let answer_handle = spawn(move || { 116 | let listener = TcpListener::bind(&answer_thread_node.address).unwrap(); 117 | for stream_result in listener.incoming() { 118 | let mut stream = stream_result.unwrap(); 119 | let message = answer_thread_node.node.respond_message(None).unwrap(); 120 | let payload = serialize(&message).unwrap(); 121 | stream.write(&payload).unwrap(); 122 | } 123 | () 124 | }); 125 | let sync_handle = spawn(move || { 126 | let mut rng = rand::thread_rng(); 127 | let mut counter = 0usize; 128 | let node_id = sync_thread_node.node.get_id(); 129 | loop { 130 | if counter % 100 == 0 { 131 | let head = sync_thread_node.node.get_head().unwrap(); 132 | let (n_rounds, n_events) = sync_thread_node.node.get_stats().unwrap(); 133 | info!( 134 | "Node {:?}: Head {:?} Rounds {:?} Pending events {:?}", 135 | node_id, head, n_rounds, n_events 136 | ); 137 | } 138 | match sync_thread_node.node.run(&mut rng) { 139 | Ok(_) => {} 140 | Err(e) => panic!("Error! {}", e), 141 | }; 142 | counter += 1; 143 | sleep(Duration::from_millis(100)); 144 | } 145 | }); 146 | Ok((answer_handle, sync_handle)) 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /llvm-vm-backend/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "llvm-vm-backend" 3 | version = "0.1.0" 4 | authors = ["Agustin Chiappe Berrini "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | failure = "0.1.3" 9 | llvm-sys = "70" 10 | -------------------------------------------------------------------------------- /llvm-vm-backend/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::env::args; 2 | use std::process::exit; 3 | 4 | const USAGE: &'static str = "USAGE: llvm-vm-backend [llvm file] [output file]"; 5 | fn main() { 6 | let arguments = args(); 7 | if arguments.len() != 3 { 8 | eprintln!("{}", USAGE); 9 | exit(1); 10 | } 11 | } 12 | --------------------------------------------------------------------------------