├── codeowners
├── img
└── evaluate_result.png
├── .gitignore
├── .github
├── workflows
│ ├── security_audit.yml
│ ├── commitlint.yml
│ ├── bump_version.yml
│ ├── github_release.yml
│ ├── auto_merge_prs.yml
│ ├── master.yml
│ ├── tag_release.yml
│ └── pr.yml
└── ISSUE_TEMPLATE
│ ├── feature_request.md
│ └── bug_report.md
├── CHANGELOG.md
├── README.md
├── Cargo.toml
├── LICENSE-MIT
├── src
├── error.rs
├── id.rs
├── lib.rs
├── messages.rs
├── rumor_state.rs
├── gossip.rs
└── node.rs
├── LICENSE-BSD
└── examples
└── network.rs
/codeowners:
--------------------------------------------------------------------------------
1 | * @maidsafe/backend_codeowners
2 |
--------------------------------------------------------------------------------
/img/evaluate_result.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maidsafe-archive/sn_gossip/HEAD/img/evaluate_result.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.exe
2 | *.lock
3 | *.rsproj
4 | *.rs.bk
5 | *.sln
6 | *.sublime-*
7 | *.swp
8 | */.vs
9 | tags*
10 | build/
11 | target
12 | /.project
13 | /bin
14 | .cargo/
15 | .DS_Store
16 |
--------------------------------------------------------------------------------
/.github/workflows/security_audit.yml:
--------------------------------------------------------------------------------
1 | name: Security audit
2 | on:
3 | schedule:
4 | - cron: '0 0 * * *'
5 | jobs:
6 | audit:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - uses: actions/checkout@v2
10 | - uses: actions-rs/audit-check@v1
11 | with:
12 | token: ${{ secrets.GITHUB_TOKEN }}
13 |
--------------------------------------------------------------------------------
/.github/workflows/commitlint.yml:
--------------------------------------------------------------------------------
1 | name: Commitlint
2 | on: [pull_request]
3 |
4 | jobs:
5 | lint:
6 | runs-on: ubuntu-latest
7 | env:
8 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
9 | steps:
10 | - uses: actions/checkout@v2
11 | with:
12 | fetch-depth: 0
13 | - uses: wagoid/commitlint-github-action@f114310111fdbd07e99f47f9ca13d62b3ec98372
14 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
4 |
5 | ### [0.1.2](https://github.com/maidsafe/sn_gossip/compare/v0.1.1...v0.1.2) (2020-11-23)
6 |
7 | ### [0.1.1](https://github.com/maidsafe/sn_gossip/compare/v0.1.0...v0.1.1) (2020-09-29)
8 |
9 |
10 | ### Bug Fixes
11 |
12 | * **clippy:** fix clippy warnings and errors ([812b78b](https://github.com/maidsafe/sn_gossip/commit/812b78bfa5014e087397b778bb4219ed97733923))
13 |
14 | ### [0.1.0](https://github.com/maidsafe/sn_gossip/compare/v0.1.0...v0.1.0) (2018-02-28)
15 | * Initial implementation
16 |
--------------------------------------------------------------------------------
/.github/workflows/bump_version.yml:
--------------------------------------------------------------------------------
1 | name: Version bump and create PR for changes
2 |
3 | on:
4 | # Trigger the workflow on push only for the master branch
5 | push:
6 | branches:
7 | - master
8 |
9 | env:
10 | NODE_ENV: 'development'
11 |
12 | jobs:
13 | update_changelog:
14 | runs-on: ubuntu-20.04
15 | # Dont run if we're on a release commit
16 | if: "!startsWith(github.event.head_commit.message, 'chore(release):')"
17 | steps:
18 | - uses: actions/checkout@v2
19 | with:
20 | fetch-depth: '0'
21 | - name: Bump Version
22 | uses: maidsafe/rust-version-bump-branch-creator@v2
23 | with:
24 | token: ${{ secrets.BRANCH_CREATOR_TOKEN }}
25 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | Thank you for contributing to the project!
11 | We recommend you check out our ["Contributing to the SAFE Network"](https://github.com/maidsafe/QA/blob/master/CONTRIBUTING.md) guide if you haven't already.
12 |
13 | **Is your feature request related to a problem? Please describe.**
14 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
15 |
16 | **Describe the solution you'd like**
17 | A clear and concise description of what you want to happen.
18 |
19 | **Describe alternatives you've considered**
20 | A clear and concise description of any alternative solutions or features you've considered.
21 |
22 | **Additional context**
23 | Add any other context or screenshots about the feature request here.
24 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # sn_gossip
2 |
3 | An implementation of a push-pull gossip protocol described in [Randomized Rumor Spreading - Karp et al. [FOCS 2000]](http://zoo.cs.yale.edu/classes/cs426/2013/bib/karp00randomized.pdf).
4 |
5 | ## Evaluation Result
6 |
7 | 
8 |
9 |
10 |
11 | ## License
12 | This library is dual-licensed under the Modified BSD ( [LICENSE-BSD](https://opensource.org/licenses/BSD-3-Clause)) or the MIT license ( [LICENSE-MIT](http://opensource.org/licenses/MIT)) at your option.
13 |
14 | ## Contributing
15 |
16 | Want to contribute? Great :tada:
17 |
18 | There are many ways to give back to the project, whether it be writing new code, fixing bugs, or just reporting errors. All forms of contributions are encouraged!
19 |
20 | For instructions on how to contribute, see our [Guide to contributing](https://github.com/maidsafe/QA/blob/master/CONTRIBUTING.md).
21 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | authors = [ "MaidSafe Developers " ]
3 | description = "An implementation of a push-pull gossip protocol."
4 | homepage = "https://maidsafe.net"
5 | license = "MIT OR BSD-3-Clause"
6 | name = "sn_gossip"
7 | readme = "README.md"
8 | repository = "https://github.com/maidsafe/sn_gossip"
9 | version = "0.1.2"
10 | edition = "2018"
11 |
12 | [dependencies]
13 | bytes = "~0.4.11"
14 | futures = "~0.1.25"
15 | log = "~0.4.8"
16 | quick-error = "1.2.3"
17 | serde = "1.0.104"
18 | serde_derive = "1.0.104"
19 | unwrap = "1.2.1"
20 | bincode = "1.2.1"
21 | err-derive = "~0.2.2"
22 | rand = "~0.7.3"
23 | rand_core = "~0.5.1"
24 |
25 | [dependencies.ed25519]
26 | package = "tmp-ed25519"
27 | version = "1.0.0-pre.3"
28 | features = [ "serde" ]
29 |
30 | [dependencies.tiny-keccak]
31 | version = "2.0.0"
32 | features = [ "sha3" ]
33 |
34 | [dev-dependencies]
35 | futures-cpupool = "~0.1.8"
36 | itertools = "~0.8.2"
37 | tokio = "~0.1.1"
38 | tokio-io = "~0.1.5"
39 |
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | Copyright 2018 MaidSafe.net limited.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/src/error.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2018 MaidSafe.net limited.
2 | //
3 | // This SAFE Network Software is licensed to you under the MIT license or the Modified BSD license , at your option. This file may not be copied,
6 | // modified, or distributed except according to those terms. Please review the Licences for the
7 | // specific language governing permissions and limitations relating to use of the SAFE Network
8 | // Software.
9 |
10 | use err_derive::Error;
11 |
12 | /// Node error variants.
13 | #[derive(Debug, Error)]
14 | #[allow(missing_docs)]
15 | pub enum Error {
16 | #[error(display = "Gossip group empty")]
17 | NoPeers,
18 | #[error(display = "Already started gossiping.")]
19 | AlreadyStarted,
20 | #[error(display = "Failed to verify signature.")]
21 | SigFailure,
22 | #[error(display = "IO error")]
23 | Io(#[error(cause)] ::std::io::Error),
24 | #[error(display = "Serialisation Error.")]
25 | Serialisation(#[error(cause)] Box),
26 | }
27 |
--------------------------------------------------------------------------------
/.github/workflows/github_release.yml:
--------------------------------------------------------------------------------
1 | name: Create GitHub Release
2 |
3 |
4 | on:
5 | push:
6 | tags:
7 | - 'v*'
8 |
9 | jobs:
10 | release:
11 | # only if we have a tag
12 | name: Release
13 | runs-on: ubuntu-20.04
14 | if: "startsWith(github.event.head_commit.message, 'chore(release):')"
15 |
16 | steps:
17 | - uses: actions/checkout@v2
18 | with:
19 | fetch-depth: '0'
20 |
21 | - name: Set tag as env
22 | shell: bash
23 | run: echo "RELEASE_VERSION=$(echo ${GITHUB_REF:10})" >> $GITHUB_ENV
24 |
25 | - name: lets check tag
26 | shell: bash
27 | run: echo ${{ env.RELEASE_VERSION }}
28 |
29 | - name: Generate Changelog
30 | shell: bash
31 | run: awk '/# \[/{c++;p=1}{if(c==2){exit}}p;' CHANGELOG.md > RELEASE-CHANGELOG.txt
32 | - run: cat RELEASE-CHANGELOG.txt
33 | - name: Release generation
34 | uses: softprops/action-gh-release@91409e712cf565ce9eff10c87a8d1b11b81757ae
35 | env:
36 | GITHUB_TOKEN: ${{ secrets.MERGE_BUMP_BRANCH_TOKEN }}
37 | with:
38 | body_path: RELEASE-CHANGELOG.txt
39 |
--------------------------------------------------------------------------------
/src/id.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2018 MaidSafe.net limited.
2 | //
3 | // This SAFE Network Software is licensed to you under the MIT license or the Modified BSD license , at your option. This file may not be copied,
6 | // modified, or distributed except according to those terms. Please review the Licences for the
7 | // specific language governing permissions and limitations relating to use of the SAFE Network
8 | // Software.
9 |
10 | use ed25519::{PublicKey, PUBLIC_KEY_LENGTH};
11 | use std::convert::From;
12 | use std::fmt::{self, Debug, Formatter};
13 |
14 | /// The ID of a node - equivalent to its public key.
15 | #[derive(Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)]
16 | pub struct Id(pub [u8; PUBLIC_KEY_LENGTH]);
17 |
18 | impl From for Id {
19 | fn from(key: PublicKey) -> Self {
20 | Id(key.to_bytes())
21 | }
22 | }
23 |
24 | impl Debug for Id {
25 | fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
26 | write!(
27 | formatter,
28 | "{:02x}{:02x}{:02x}..",
29 | self.0[0], self.0[1], self.0[2]
30 | )
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/.github/workflows/auto_merge_prs.yml:
--------------------------------------------------------------------------------
1 | # auto merge workflow.
2 | #
3 | # Auto merge PR if commit msg begins with `chore(release):`,
4 | # or if it has been raised by Dependabot.
5 | # Uses https://github.com/ridedott/merge-me-action.
6 |
7 | name: Merge Version Change and Dependabot PRs automatically
8 |
9 | on: pull_request
10 |
11 | jobs:
12 | merge:
13 | runs-on: ubuntu-20.04
14 | steps:
15 | - uses: actions/checkout@v2
16 | with:
17 | fetch-depth: '0'
18 |
19 | - name: get commit message
20 | run: |
21 | commitmsg=$(git log --format=%s -n 1 ${{ github.event.pull_request.head.sha }})
22 | echo "commitmsg=${commitmsg}" >> $GITHUB_ENV
23 |
24 | - name: show commit message
25 | run : echo $commitmsg
26 |
27 | - name: Merge Version change PR
28 | if: startsWith( env.commitmsg, 'chore(release):')
29 | uses: ridedott/merge-me-action@81667e6ae186ddbe6d3c3186d27d91afa7475e2c
30 | with:
31 | GITHUB_LOGIN: dirvine
32 | GITHUB_TOKEN: ${{ secrets.MERGE_BUMP_BRANCH_TOKEN }}
33 | MERGE_METHOD: REBASE
34 |
35 | - name: Dependabot Merge
36 | uses: ridedott/merge-me-action@master
37 | with:
38 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
39 | MERGE_METHOD: REBASE
40 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | Thank you for contributing to the project!
11 | We recommend you check out our ["Contributing to the SAFE Network"](https://github.com/maidsafe/QA/blob/master/CONTRIBUTING.md) guide if you haven't already.
12 |
13 | **Describe the bug**
14 | A clear and concise description of what the bug is.
15 |
16 | **To Reproduce**
17 | Steps to reproduce the behavior, e.g.:
18 | 1. Go to '...'
19 | 2. Click on '....'
20 | 3. Scroll down to '....'
21 | 4. See error
22 |
23 | **Expected behavior**
24 | A clear and concise description of what you expected to happen.
25 |
26 | **Screenshots**
27 | If applicable, add screenshots to help explain your problem.
28 |
29 | **Desktop (please complete the following information where applicable):**
30 | - OS: [e.g. Ubuntu 18.04]
31 | - Shell: [e.g. Zsh]
32 | - Rust: [e.g. 1.42.0 stable]
33 | - Browser [e.g. chrome, safari]
34 | - Version [e.g. 22]
35 | - etc
36 |
37 | **Smartphone (please complete the following information where applicable):**
38 | - Device: [e.g. iPhone6]
39 | - OS: [e.g. iOS8.1]
40 | - Browser [e.g. stock browser, safari]
41 | - Version [e.g. 22]
42 | - etc
43 |
44 | **Additional context**
45 | Add any other context about the problem here.
46 |
--------------------------------------------------------------------------------
/.github/workflows/master.yml:
--------------------------------------------------------------------------------
1 | # Push to master workflow.
2 | #
3 | # Runs when a PR has been merged to the master branch.
4 | #
5 | # 1. Generates a release build.
6 | # 2. If the last commit is a version change, publish.
7 |
8 | name: Master
9 |
10 | on:
11 | push:
12 | branches:
13 | - master
14 |
15 | env:
16 | # Run all cargo commands with --verbose.
17 | CARGO_TERM_VERBOSE: true
18 | RUST_BACKTRACE: 1
19 |
20 | jobs:
21 | build:
22 | name: Build
23 | runs-on: ${{ matrix.os }}
24 | strategy:
25 | matrix:
26 | os: [ubuntu-latest, windows-latest, macOS-latest]
27 | steps:
28 | - uses: actions/checkout@v2
29 |
30 | - name: Install Rust
31 | uses: actions-rs/toolchain@v1
32 | with:
33 | profile: minimal
34 | toolchain: stable
35 | override: true
36 |
37 | # Generate Cargo.lock, needed for the cache.
38 | - name: Generate lockfile
39 | run: cargo generate-lockfile
40 |
41 | # Cache.
42 | - name: Cargo cache registry, index and build
43 | uses: actions/cache@v2
44 | with:
45 | path: |
46 | ~/.cargo/registry
47 | ~/.cargo/git
48 | target
49 | key: ${{ runner.os }}-cargo-cache-${{ hashFiles('**/Cargo.lock') }}
50 |
51 | # Make sure the code builds.
52 | - name: Run cargo build
53 | run: cargo build --release --workspace
54 |
--------------------------------------------------------------------------------
/LICENSE-BSD:
--------------------------------------------------------------------------------
1 | Copyright 2018 MaidSafe.net limited.
2 |
3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
4 |
5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
6 |
7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
8 |
9 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
10 |
11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12 |
--------------------------------------------------------------------------------
/.github/workflows/tag_release.yml:
--------------------------------------------------------------------------------
1 | name: Tag release commit
2 |
3 | on:
4 | # Trigger the workflow on push only for the master branch
5 | push:
6 | branches:
7 | - master
8 |
9 | env:
10 | NODE_ENV: 'development'
11 | GITHUB_TOKEN: ${{ secrets.BRANCH_CREATOR_TOKEN }}
12 |
13 | jobs:
14 | tag:
15 | runs-on: ubuntu-latest
16 | # Only run on a release commit
17 | if: "startsWith(github.event.head_commit.message, 'chore(release):')"
18 | steps:
19 | - uses: actions/checkout@v2
20 | with:
21 | fetch-depth: '0'
22 | token: ${{ secrets.BRANCH_CREATOR_TOKEN }}
23 | - run: echo "RELEASE_VERSION=$(git log -1 --pretty=%s)" >> $GITHUB_ENV
24 | # parse out non-tag text
25 | - run: echo "RELEASE_VERSION=$( echo $RELEASE_VERSION | sed 's/chore(release)://' )" >> $GITHUB_ENV
26 | # remove spaces, but add back in `v` to tag, which is needed for standard-version
27 | - run: echo "RELEASE_VERSION=v$(echo $RELEASE_VERSION | tr -d '[:space:]')" >> $GITHUB_ENV
28 | - run: echo $RELEASE_VERSION
29 | - run: git tag $RELEASE_VERSION
30 |
31 | - name: Setup git for push
32 | run: |
33 | git remote add github "$REPO"
34 | git config --local user.email "action@github.com"
35 | git config --local user.name "GitHub Action"
36 | - name: Push tags to master
37 | run: git push "https://$GITHUB_ACTOR:$GITHUB_TOKEN@github.com/$GITHUB_REPOSITORY" HEAD:master --tags
38 |
--------------------------------------------------------------------------------
/src/lib.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2018 MaidSafe.net limited.
2 | //
3 | // This SAFE Network Software is licensed to you under the MIT license or the Modified BSD license , at your option. This file may not be copied,
6 | // modified, or distributed except according to those terms. Please review the Licences for the
7 | // specific language governing permissions and limitations relating to use of the SAFE Network
8 | // Software.
9 |
10 | //! An implementation of a push-pull gossip protocol.
11 |
12 | #![doc(
13 | html_logo_url = "https://raw.githubusercontent.com/maidsafe/QA/master/Images/maidsafe_logo.png",
14 | html_favicon_url = "https://maidsafe.net/img/favicon.ico",
15 | html_root_url = "https://docs.rs/sn_gossip"
16 | )]
17 | #![forbid(
18 | arithmetic_overflow,
19 | mutable_transmutes,
20 | no_mangle_const_items,
21 | unknown_crate_types,
22 | warnings
23 | )]
24 | #![deny(
25 | bad_style,
26 | deprecated,
27 | improper_ctypes,
28 | missing_docs,
29 | non_shorthand_field_patterns,
30 | overflowing_literals,
31 | stable_features,
32 | unconditional_recursion,
33 | unknown_lints,
34 | unsafe_code,
35 | unused_allocation,
36 | unused_attributes,
37 | unused_comparisons,
38 | unused_features,
39 | unused_parens,
40 | while_true,
41 | unused
42 | )]
43 | #![warn(
44 | trivial_casts,
45 | trivial_numeric_casts,
46 | unused_extern_crates,
47 | unused_import_braces,
48 | unused_qualifications,
49 | unused_results
50 | )]
51 | #![allow(missing_copy_implementations, missing_debug_implementations)]
52 |
53 | #[macro_use]
54 | extern crate log;
55 | #[macro_use]
56 | extern crate serde_derive;
57 | #[cfg(test)]
58 | #[macro_use]
59 | extern crate unwrap;
60 |
61 | mod error;
62 | mod gossip;
63 | mod id;
64 | mod messages;
65 | mod node;
66 | mod rumor_state;
67 |
68 | pub use crate::error::Error;
69 | pub use crate::gossip::Statistics;
70 | pub use crate::id::Id;
71 | pub use crate::node::Node;
72 |
--------------------------------------------------------------------------------
/src/messages.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2018 MaidSafe.net limited.
2 | //
3 | // This SAFE Network Software is licensed to you under the MIT license or the Modified BSD license , at your option. This file may not be copied,
6 | // modified, or distributed except according to those terms. Please review the Licences for the
7 | // specific language governing permissions and limitations relating to use of the SAFE Network
8 | // Software.
9 |
10 | use super::gossip::Rumor;
11 | use crate::error::Error;
12 | use bincode::{deserialize, serialize};
13 | use ed25519::{Keypair, PublicKey, Signature};
14 |
15 | /// Messages sent via a direct connection, wrapper of gossip protocol requests.
16 | #[derive(Serialize, Debug, Deserialize)]
17 | pub struct Message(pub Vec, pub Signature);
18 |
19 | #[cfg(not(test))]
20 | impl Message {
21 | pub fn serialise(gossip: &Gossip, keys: &Keypair) -> Result, Error> {
22 | let serialised_msg = serialize(gossip)?;
23 | let sig: Signature = keys.sign(&serialised_msg);
24 | Ok(serialize(&Message(serialised_msg, sig))?)
25 | }
26 |
27 | pub fn deserialise(serialised_msg: &[u8], key: &PublicKey) -> Result {
28 | let msg: Message = deserialize(serialised_msg)?;
29 | if key.verify(&msg.0, &msg.1).is_ok() {
30 | Ok(deserialize(&msg.0)?)
31 | } else {
32 | Err(Error::SigFailure)
33 | }
34 | }
35 | }
36 |
37 | #[cfg(test)]
38 | impl Message {
39 | pub fn serialise(gossip: &Gossip, _keys: &Keypair) -> Result, Error> {
40 | Ok(serialize(gossip)?)
41 | }
42 |
43 | pub fn deserialise(serialised_msg: &[u8], _key: &PublicKey) -> Result {
44 | Ok(deserialize(serialised_msg)?)
45 | }
46 | }
47 |
48 | /// Gossip with rumors
49 | #[derive(Debug, Serialize, Deserialize)]
50 | pub enum Gossip {
51 | /// Sent from Node A to Node B to push a rumor.
52 | Push(Vec),
53 | /// Sent from Node B to Node A as a reaction to receiving a push rumor from A.
54 | Pull(Vec),
55 | }
56 |
--------------------------------------------------------------------------------
/.github/workflows/pr.yml:
--------------------------------------------------------------------------------
1 | # PR workflow.
2 | #
3 | # Runs full suite of checks, with warnings treated as errors.
4 | # Gather code coverage stats and publish them on coveralls.io.
5 |
6 | name: PR
7 |
8 | on: pull_request
9 |
10 | env:
11 | # Run all cargo commands with --verbose.
12 | CARGO_TERM_VERBOSE: true
13 | RUST_BACKTRACE: 1
14 | # Deny all compiler warnings.
15 | RUSTFLAGS: "-D warnings"
16 |
17 | jobs:
18 | checks:
19 | name: Clippy & fmt
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: actions/checkout@v2
23 |
24 | # Install Rust and required components
25 | - uses: actions-rs/toolchain@v1
26 | with:
27 | profile: minimal
28 | toolchain: stable
29 | override: true
30 | components: rustfmt, clippy
31 |
32 | # Generate Cargo.lock, needed for the cache.
33 | - name: Generate Cargo.lock
34 | run: cargo generate-lockfile
35 |
36 | # Cache.
37 | - name: Cargo cache registry, index and build
38 | uses: actions/cache@v2
39 | with:
40 | path: |
41 | ~/.cargo/registry
42 | ~/.cargo/git
43 | target
44 | key: ${{ runner.os }}-cargo-cache-${{ hashFiles('**/Cargo.lock') }}
45 |
46 | # Check if the code is formatted correctly.
47 | - name: Check formatting
48 | run: cargo fmt --all -- --check
49 |
50 | # Run Clippy.
51 | - name: Clippy checks
52 | run: cargo clippy --release
53 |
54 | coverage:
55 | name: Code coverage check
56 | runs-on: ubuntu-latest
57 | steps:
58 | - uses: actions/checkout@v2
59 | # Install Rust and required components
60 | - uses: actions-rs/toolchain@v1
61 | with:
62 | profile: minimal
63 | toolchain: stable
64 | override: true
65 |
66 | # Generate Cargo.lock, needed for the cache.
67 | - name: Generate Cargo.lock
68 | run: cargo generate-lockfile
69 |
70 | # Cache.
71 | - name: Cargo cache registry, index and build
72 | uses: actions/cache@v2
73 | with:
74 | path: |
75 | ~/.cargo/registry
76 | ~/.cargo/git
77 | target
78 | key: ${{ runner.os }}-cargo-cache-${{ hashFiles('**/Cargo.lock') }}
79 |
80 | # Run cargo tarpaulin & push result to coveralls.io
81 | - name: rust-tarpaulin code coverage check
82 | uses: actions-rs/tarpaulin@master
83 | with:
84 | args: '-v -t 500 --release --out Lcov'
85 | - name: Push code coverage results to coveralls.io
86 | uses: coverallsapp/github-action@master
87 | with:
88 | github-token: ${{ secrets.GITHUB_TOKEN }}
89 | parallel: true
90 | path-to-lcov: ./lcov.info
91 | - name: Coveralls Finished
92 | uses: coverallsapp/github-action@master
93 | with:
94 | github-token: ${{ secrets.GITHUB_TOKEN }}
95 | parallel-finished: true
96 |
97 | cargo-udeps:
98 | name: Unused dependency check
99 | runs-on: ubuntu-latest
100 | steps:
101 | - uses: actions/checkout@v2
102 | # Install Rust and required components
103 | - uses: actions-rs/toolchain@v1
104 | with:
105 | profile: minimal
106 | toolchain: nightly
107 | override: true
108 |
109 | # Install and run cargo udeps to find unused cargo dependencies
110 | - name: cargo-udeps duplicate dependency check
111 | run: |
112 | cargo install cargo-udeps --locked
113 | cargo +nightly udeps --all-targets
114 |
115 | tests:
116 | name: Test
117 | runs-on: ubuntu-latest
118 | steps:
119 | - uses: actions/checkout@v2
120 |
121 | # Install Rust
122 | - uses: actions-rs/toolchain@v1
123 | with:
124 | profile: minimal
125 | toolchain: stable
126 | override: true
127 |
128 | # Generate Cargo.lock, needed for the cache.
129 | - name: Generate Cargo.lock
130 | run: cargo generate-lockfile
131 |
132 | # Cache.
133 | - name: Cargo cache registry, index and build
134 | uses: actions/cache@v2
135 | with:
136 | path: |
137 | ~/.cargo/registry
138 | ~/.cargo/git
139 | target
140 | key: ${{ runner.os }}-cargo-cache-${{ hashFiles('**/Cargo.lock') }}
141 |
142 | # Run tests.
143 | - name: Cargo test
144 | run: cargo test --release
145 |
--------------------------------------------------------------------------------
/src/rumor_state.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2018 MaidSafe.net limited.
2 | //
3 | // This SAFE Network Software is licensed to you under the MIT license or the Modified BSD license , at your option. This file may not be copied,
6 | // modified, or distributed except according to those terms. Please review the Licences for the
7 | // specific language governing permissions and limitations relating to use of the SAFE Network
8 | // Software.
9 |
10 | use crate::id::Id;
11 | use std::collections::btree_map::Entry;
12 | use std::collections::{BTreeMap, BTreeSet};
13 |
14 | /// This represents the state of a single rumor from this node's perspective.
15 | #[derive(Debug, Clone, Serialize, Deserialize)]
16 | pub enum RumorState {
17 | /// Exponential-growth phase.
18 | B {
19 | /// The round number for this rumor. This is not a globally-synchronised variable, rather
20 | /// it is set to 0 when we first receive a copy of this rumor and is incremented every
21 | /// time `next_round()` is called.
22 | round: Round,
23 | /// Our age for this rumor. This may increase by 1 during a single round or may
24 | /// remain the same depending on the ages attached to incoming copies of this rumor.
25 | rumor_age: Age,
26 | /// The map of s which have sent us this rumor during this round.
27 | peer_ages: BTreeMap,
28 | },
29 | /// Quadratic-shrinking phase.
30 | C {
31 | /// The number of rounds performed by the node while the rumor was in state B.
32 | rounds_in_state_b: Round,
33 | /// The round number for this rumor while in state C.
34 | round: Round,
35 | },
36 | /// Propagation complete.
37 | D,
38 | }
39 |
40 | impl Default for RumorState {
41 | fn default() -> Self {
42 | Self::new()
43 | }
44 | }
45 |
46 | impl RumorState {
47 | /// Construct a new `RumorState` where we're the initial node for the rumor. We start in
48 | /// state B with `rumor_age` set to `1`.
49 | pub fn new() -> Self {
50 | RumorState::B {
51 | round: Round::from(0),
52 | rumor_age: Age::from(1),
53 | peer_ages: BTreeMap::new(),
54 | }
55 | }
56 |
57 | /// Construct a new `RumorState` where we've received the rumor from a peer. If that peer
58 | /// is in state B (`age < max_b_age`) we start in state B with `rumor_age` set to `1`.
59 | /// If the peer is in state C, we start in state C too.
60 | pub fn new_from_peer(age: Age, max_b_age: Age) -> Self {
61 | if age < max_b_age {
62 | return RumorState::B {
63 | round: Round::from(0),
64 | rumor_age: Age::from(1),
65 | peer_ages: BTreeMap::new(),
66 | };
67 | }
68 | RumorState::C {
69 | rounds_in_state_b: Round::from(0),
70 | round: Round::from(0),
71 | }
72 | }
73 |
74 | /// Receive a copy of this rumor from `peer_id` with `age`.
75 | pub fn receive(&mut self, peer_id: Id, age: Age) {
76 | if let RumorState::B {
77 | ref mut peer_ages, ..
78 | } = *self
79 | {
80 | if peer_ages.insert(peer_id, age).is_some() {
81 | debug!("Received the same rumor more than once this round from a given peer");
82 | }
83 | }
84 | }
85 |
86 | /// Increment `round` value, consuming `self` and returning the new state.
87 | pub fn next_round(
88 | self,
89 | max_b_age: Age,
90 | max_c_rounds: Round,
91 | max_rounds: Round,
92 | peers_in_this_round: &BTreeSet,
93 | ) -> RumorState {
94 | match self {
95 | RumorState::B {
96 | mut round,
97 | mut rumor_age,
98 | mut peer_ages,
99 | } => {
100 | round += Round::from(1);
101 | // If we've hit the maximum permitted number of rounds, transition to state D
102 | if round >= max_rounds {
103 | return RumorState::D;
104 | }
105 |
106 | // For any `peers_in_this_round` which aren't accounted for in `peer_ages`, add
107 | // a age of `0` for them to indicate they're in state A (i.e. they didn't have
108 | // the rumor).
109 | for peer in peers_in_this_round {
110 | if let Entry::Vacant(entry) = peer_ages.entry(*peer) {
111 | let _ = entry.insert(Age::from(0));
112 | }
113 | }
114 |
115 | // Apply the median rule, but if any peer's age >= `max_b_age` (i.e. that peer
116 | // is in state C), transition to state C.
117 | let mut less = 0;
118 | let mut greater_or_equal = 0;
119 | for peer_age in peer_ages.values() {
120 | if *peer_age < rumor_age {
121 | less += 1;
122 | } else if *peer_age >= max_b_age {
123 | return RumorState::C {
124 | rounds_in_state_b: round,
125 | round: Round::from(0),
126 | };
127 | } else {
128 | greater_or_equal += 1;
129 | }
130 | }
131 | if greater_or_equal > less {
132 | rumor_age += Age::from(1);
133 | }
134 |
135 | // If our age has reached `max_b_age`, transition to state C, otherwise remain
136 | // in state B.
137 | if rumor_age >= max_b_age {
138 | return RumorState::C {
139 | rounds_in_state_b: round,
140 | round: Round::from(0),
141 | };
142 | }
143 | RumorState::B {
144 | round,
145 | rumor_age,
146 | peer_ages: BTreeMap::new(),
147 | }
148 | }
149 | RumorState::C {
150 | rounds_in_state_b,
151 | mut round,
152 | } => {
153 | round += Round::from(1);
154 | // If we've hit the maximum permitted number of rounds, transition to state D
155 | if round + rounds_in_state_b >= max_rounds {
156 | return RumorState::D;
157 | }
158 |
159 | // If we've hit the maximum rounds for remaining in state C, transition to state D.
160 | if round >= max_c_rounds {
161 | return RumorState::D;
162 | }
163 |
164 | // Otherwise remain in state C.
165 | RumorState::C {
166 | rounds_in_state_b,
167 | round,
168 | }
169 | }
170 | RumorState::D => RumorState::D,
171 | }
172 | }
173 |
174 | /// We only need to push and pull this rumor if we're in states B or C, hence this returns
175 | /// `None` if we're in state D. State C is indicated by returning a value > `max_b_age`.
176 | pub fn rumor_age(&self) -> Option {
177 | match *self {
178 | RumorState::B { rumor_age, .. } => Some(rumor_age),
179 | RumorState::C { .. } => Some(Age::max()),
180 | RumorState::D => None,
181 | }
182 | }
183 | }
184 |
185 | #[derive(Copy, Clone, Serialize, Debug, Deserialize, PartialEq, PartialOrd)]
186 | pub struct Age {
187 | pub value: u8,
188 | }
189 |
190 | impl Age {
191 | pub fn from(value: u8) -> Self {
192 | Self { value }
193 | }
194 | pub fn max() -> Self {
195 | Self {
196 | value: u8::max_value(),
197 | }
198 | }
199 | }
200 |
201 | impl std::ops::AddAssign for Age {
202 | fn add_assign(&mut self, rhs: Self) {
203 | self.value += rhs.value;
204 | }
205 | }
206 |
207 | #[derive(Default, Copy, Clone, Serialize, Debug, Deserialize, PartialEq, PartialOrd)]
208 | pub struct Round {
209 | pub value: u8,
210 | }
211 |
212 | impl Round {
213 | pub fn from(value: u8) -> Self {
214 | Self { value }
215 | }
216 | }
217 |
218 | impl std::ops::Add for Round {
219 | type Output = Round;
220 | fn add(self, rhs: Self) -> Round {
221 | Round::from(self.value + rhs.value)
222 | }
223 | }
224 |
225 | impl std::ops::AddAssign for Round {
226 | fn add_assign(&mut self, rhs: Self) {
227 | self.value += rhs.value;
228 | }
229 | }
230 |
--------------------------------------------------------------------------------
/src/gossip.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2018 MaidSafe.net limited.
2 | //
3 | // This SAFE Network Software is licensed to you under the MIT license or the Modified BSD license , at your option. This file may not be copied,
6 | // modified, or distributed except according to those terms. Please review the Licences for the
7 | // specific language governing permissions and limitations relating to use of the SAFE Network
8 | // Software.
9 |
10 | use crate::id::Id;
11 | use crate::messages::Gossip;
12 | use crate::rumor_state::RumorState;
13 | use crate::rumor_state::{Age, Round};
14 | use std::collections::btree_map::Entry;
15 | use std::collections::{BTreeMap, BTreeSet};
16 | use std::fmt::{self, Debug, Formatter};
17 | use std::{cmp, mem, u64};
18 | use tiny_keccak::{Hasher, Sha3};
19 |
20 | #[derive(Debug, Clone, Serialize, Deserialize)]
21 | pub struct Content(pub Vec);
22 |
23 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
24 | pub struct ContentHash(pub [u8; 32]);
25 |
26 | impl From for ContentHash {
27 | fn from(content: Content) -> Self {
28 | let mut hasher = Sha3::v256();
29 | let mut out = [0u8; 32];
30 | hasher.update(content.0.as_slice());
31 | hasher.finalize(&mut out);
32 | Self(out)
33 | }
34 | }
35 |
36 | #[derive(Debug, Clone, Serialize, Deserialize)]
37 | pub struct Rumor {
38 | pub content: Content,
39 | pub state: RumorState,
40 | }
41 |
42 | /// The gossip state of a node instance.
43 | pub struct GossipState {
44 | rumors: BTreeMap,
45 | network_size: f64,
46 | // When in state B, if our age for a Rumor is incremented to this value, the state
47 | // transitions to C. Specified in the paper as `O(ln ln n)`.
48 | max_b_age: Age,
49 | // The maximum number of rounds to remain in state C for a given rumor. Specified in the
50 | // paper as `O(ln ln n)`.
51 | max_c_rounds: Round,
52 | // The maximum total number of rounds for a rumor to remain in states B or C. This is a
53 | // failsafe to allow the definite termination of a rumor being propagated. Specified in the
54 | // paper as `O(ln n)`.
55 | max_rounds: Round,
56 | // All peers with which we communicated during this round.
57 | peers_in_this_round: BTreeSet,
58 | // Statistics
59 | statistics: Statistics,
60 | }
61 |
62 | impl GossipState {
63 | pub fn new() -> Self {
64 | GossipState {
65 | rumors: BTreeMap::new(),
66 | network_size: 1.0,
67 | max_b_age: Age::from(0),
68 | max_c_rounds: Round::from(0),
69 | max_rounds: Round::from(0),
70 | peers_in_this_round: BTreeSet::new(),
71 | statistics: Statistics::default(),
72 | }
73 | }
74 |
75 | pub fn add_peer(&mut self) {
76 | self.network_size += 1.0;
77 | self.max_b_age = Age::from(cmp::max(1, self.network_size.ln().ln().ceil() as u8));
78 | self.max_c_rounds = Round::from(cmp::max(1, self.network_size.ln().ln().ceil() as u8));
79 | self.max_rounds = Round::from(cmp::max(1, self.network_size.ln().ceil() as u8));
80 | }
81 |
82 | pub fn rumors(&self) -> Vec {
83 | self.rumors.values().cloned().collect()
84 | }
85 |
86 | /// Start gossiping a new rumor from this node.
87 | pub fn initiate_rumor(&mut self, content: Content) {
88 | if self
89 | .rumors
90 | .insert(
91 | ContentHash::from(content.clone()),
92 | Rumor {
93 | content,
94 | state: RumorState::new(),
95 | },
96 | )
97 | .is_some()
98 | {
99 | error!("New rumors should be unique.");
100 | }
101 | }
102 |
103 | /// Trigger the end of this round. Returns a list of Push gossips to be sent to a single random
104 | /// peer during this new round.
105 | pub fn next_round(&mut self) -> Option {
106 | self.statistics.rounds += 1;
107 | let mut rumors_to_push = vec![];
108 | let rumors = mem::replace(&mut self.rumors, BTreeMap::new());
109 | self.rumors = rumors
110 | .into_iter()
111 | .map(|(hash, mut rumor)| {
112 | rumor.state = rumor.state.next_round(
113 | self.max_b_age,
114 | self.max_c_rounds,
115 | self.max_rounds,
116 | &self.peers_in_this_round,
117 | );
118 | // Filter out any for which `rumor_age()` is `None`.
119 | if rumor.state.rumor_age().is_some() {
120 | rumors_to_push.push(rumor.clone());
121 | }
122 | (hash, rumor)
123 | })
124 | .collect();
125 | self.peers_in_this_round.clear();
126 | self.statistics.sent_rumors += rumors_to_push.len() as u64;
127 | if !rumors_to_push.is_empty() {
128 | Some(Gossip::Push(rumors_to_push))
129 | } else {
130 | None
131 | }
132 | }
133 |
134 | /// We've received `gossip` from `peer_id`. If this is a Push gossip and we've not already heard from
135 | /// `peer_id` in this round, this returns the list of Pull gossips which should be sent back to
136 | /// `peer_id`.
137 | pub fn receive(&mut self, peer_id: Id, gossip: Gossip) -> Option {
138 | let (is_push, received_rumors) = match gossip {
139 | Gossip::Push(received_rumors) => (true, received_rumors),
140 | Gossip::Pull(received_rumors) => (false, received_rumors),
141 | };
142 |
143 | // Collect any responses required.
144 | let is_new_this_round = self.peers_in_this_round.insert(peer_id);
145 | let response = if is_new_this_round && is_push {
146 | let response_rumors: Vec = self
147 | .rumors
148 | .iter()
149 | .filter_map(|(_, rumor)| {
150 | // Filter out any for which `rumor_age()` is `None`.
151 | rumor.state.rumor_age().map(|_| rumor.clone())
152 | })
153 | .collect();
154 | self.statistics.sent_rumors += response_rumors.len() as u64;
155 | let response_gossip = Gossip::Pull(response_rumors);
156 | Some(response_gossip)
157 | } else {
158 | None
159 | };
160 |
161 | for rumor in received_rumors {
162 | self.statistics.received_rumors += 1;
163 | // Add or update the entry for this rumor.
164 | let age = rumor.state.rumor_age().unwrap_or_else(Age::max);
165 | match self.rumors.entry(ContentHash::from(rumor.content.clone())) {
166 | Entry::Occupied(mut entry) => entry.get_mut().state.receive(peer_id, age),
167 | Entry::Vacant(entry) => {
168 | let _ = entry.insert(Rumor {
169 | content: rumor.content,
170 | state: RumorState::new_from_peer(age, self.max_b_age),
171 | });
172 | }
173 | }
174 | }
175 |
176 | response
177 | }
178 |
179 | #[cfg(test)]
180 | /// Clear the cache.
181 | pub fn clear(&mut self) {
182 | self.statistics = Statistics::default();
183 | self.rumors.clear();
184 | self.peers_in_this_round.clear();
185 | }
186 |
187 | /// Returns the statistics.
188 | pub fn statistics(&self) -> Statistics {
189 | self.statistics
190 | }
191 | }
192 |
193 | impl Debug for GossipState {
194 | fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
195 | write!(formatter, "GossipState {{ rumors: {{ ")?;
196 | for rumor in (&self.rumors).values() {
197 | write!(
198 | formatter,
199 | "{:02x}{:02x}{:02x}{:02x}: {:?}, ",
200 | rumor.content.0[0],
201 | rumor.content.0[1],
202 | rumor.content.0[2],
203 | rumor.content.0[3],
204 | rumor.state
205 | )?;
206 | }
207 | write!(formatter, "}}, network_size: {}, ", self.network_size)?;
208 | write!(formatter, "max_b_age: {}, ", self.max_b_age.value)?;
209 | write!(formatter, "max_c_rounds: {}, ", self.max_c_rounds.value)?;
210 | write!(formatter, "max_rounds: {}, ", self.max_rounds.value)?;
211 | write!(
212 | formatter,
213 | "peers_in_this_round: {:?} }}",
214 | self.peers_in_this_round
215 | )
216 | }
217 | }
218 |
219 | /// Statistics on each node.
220 | #[derive(Clone, Copy, Default)]
221 | pub struct Statistics {
222 | /// Total rounds experienced (each push_tick is considered as one round).
223 | pub rounds: u64,
224 | /// Total rumors sent from this node.
225 | pub sent_rumors: u64,
226 | /// Total rumors this node received.
227 | pub received_rumors: u64,
228 | }
229 |
230 | impl Statistics {
231 | /// Create a default with u64::MAX
232 | pub fn new_max() -> Self {
233 | Statistics {
234 | rounds: u64::MAX,
235 | sent_rumors: u64::MAX,
236 | received_rumors: u64::MAX,
237 | }
238 | }
239 |
240 | /// Add the value of other into self
241 | pub fn add(&mut self, other: &Statistics) {
242 | self.rounds += other.rounds;
243 | self.sent_rumors += other.sent_rumors;
244 | self.received_rumors += other.received_rumors;
245 | }
246 |
247 | /// Update self with the min of self and other
248 | pub fn min(&mut self, other: &Statistics) {
249 | self.rounds = cmp::min(self.rounds, other.rounds);
250 | self.sent_rumors = cmp::min(self.sent_rumors, other.sent_rumors);
251 | self.received_rumors = cmp::min(self.received_rumors, other.received_rumors);
252 | }
253 |
254 | /// Update self with the max of self and other
255 | pub fn max(&mut self, other: &Statistics) {
256 | self.rounds = cmp::max(self.rounds, other.rounds);
257 | self.sent_rumors = cmp::max(self.sent_rumors, other.sent_rumors);
258 | self.received_rumors = cmp::max(self.received_rumors, other.received_rumors);
259 | }
260 | }
261 |
262 | impl Debug for Statistics {
263 | fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
264 | write!(
265 | formatter,
266 | "rounds: {}, rumors sent: {}, \n
267 | rumors received: {}",
268 | self.rounds, self.sent_rumors, self.received_rumors
269 | )
270 | }
271 | }
272 |
--------------------------------------------------------------------------------
/src/node.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2018 MaidSafe.net limited.
2 | //
3 | // This SAFE Network Software is licensed to you under the MIT license or the Modified BSD license , at your option. This file may not be copied,
6 | // modified, or distributed except according to those terms. Please review the Licences for the
7 | // specific language governing permissions and limitations relating to use of the SAFE Network
8 | // Software.
9 |
10 | use super::gossip::{Content, GossipState, Rumor, Statistics};
11 | use super::messages::{Gossip, Message};
12 | use crate::error::Error;
13 | use crate::id::Id;
14 | use bincode::serialize;
15 | use ed25519::{Keypair, PublicKey};
16 | use rand::seq::SliceRandom;
17 | use rand_core::OsRng;
18 | use serde::ser::Serialize;
19 | use std::fmt::{self, Debug, Formatter};
20 |
21 | /// An entity on the network which will gossip rumors.
22 | pub struct Node {
23 | keys: Keypair,
24 | peers: Vec,
25 | gossip: GossipState,
26 | }
27 |
28 | impl Node {
29 | /// The ID of this `Node`, i.e. its public key.
30 | pub fn id(&self) -> Id {
31 | self.keys.public.into()
32 | }
33 |
34 | /// Add the ID of another node on the network. This will fail if `initiate_rumor()` has already been
35 | /// called since this `Node` needs to know about all other nodes in the network before
36 | /// starting to gossip rumors.
37 | pub fn add_peer(&mut self, peer_id: Id) -> Result<(), Error> {
38 | if !self.gossip.rumors().is_empty() {
39 | return Err(Error::AlreadyStarted);
40 | }
41 | self.peers.push(peer_id);
42 | self.gossip.add_peer();
43 | Ok(())
44 | }
45 |
46 | /// Initiate a new rumor starting at this `Node`.
47 | pub fn initiate_rumor(&mut self, rumor: &T) -> Result<(), Error> {
48 | if self.peers.is_empty() {
49 | return Err(Error::NoPeers);
50 | }
51 | self.gossip.initiate_rumor(Content(serialize(rumor)?));
52 | Ok(())
53 | }
54 |
55 | /// Start a new round. Returns a Push gossip with rumors to be sent to the given peer.
56 | ///
57 | /// These should all be given to just a single peer to avoid triggering a flood of Pull gossips in
58 | /// response. For example, if we have 100 Push gossips to send here and we send them all to a
59 | /// single peer, we only receive a single tranche of Pull gossips in responses (the tranche
60 | /// comprising several rumors). However, if we send each Push gossip to a different peer, we'd
61 | /// receive 100 tranches of Pull gossips.
62 | pub fn next_round(&mut self) -> Result<(Id, Option>), Error> {
63 | let mut rng = rand::thread_rng();
64 | let peer_id = match self.peers.choose(&mut rng) {
65 | Some(id) => *id,
66 | None => return Err(Error::NoPeers),
67 | };
68 | if let Some(gossip) = self.gossip.next_round() {
69 | let serialized = self.serialise(gossip);
70 | debug!("{:?} Sending Push gossip to {:?}", self, peer_id);
71 | Ok((peer_id, Some(serialized)))
72 | } else {
73 | Ok((peer_id, None))
74 | }
75 | }
76 |
77 | /// Handles incoming gossip from peer.
78 | pub fn receive_gossip(&mut self, peer_id: &Id, serialised_gossip: &[u8]) -> Option> {
79 | debug!("{:?} handling gossip from {:?}", self, peer_id);
80 | let pub_key = if let Ok(pub_key) = PublicKey::from_bytes(&peer_id.0) {
81 | pub_key
82 | } else {
83 | return None;
84 | };
85 | let gossip = if let Ok(gossip) = Message::deserialise(serialised_gossip, &pub_key) {
86 | gossip
87 | } else {
88 | error!("Failed to deserialise gossip");
89 | return None;
90 | };
91 | // If this gossip is a Push from a peer we've not already heard from in this round, there could
92 | // be a Pull response to send back to that peer.
93 | if let Some(response) = self.gossip.receive(*peer_id, gossip) {
94 | Some(self.serialise(response))
95 | } else {
96 | None
97 | }
98 | }
99 |
100 | /// Returns the list of rumors this node is informed about so far.
101 | pub fn rumors(&self) -> Vec {
102 | self.gossip.rumors()
103 | }
104 |
105 | /// Returns the statistics of this node.
106 | pub fn statistics(&self) -> Statistics {
107 | self.gossip.statistics()
108 | }
109 |
110 | #[cfg(test)]
111 | /// Clear the statistics and gossip's cache.
112 | pub fn clear(&mut self) {
113 | self.gossip.clear();
114 | }
115 |
116 | fn serialise(&mut self, gossip: Gossip) -> Vec {
117 | if let Ok(serialised_msg) = Message::serialise(&gossip, &self.keys) {
118 | return serialised_msg;
119 | } else {
120 | error!("Failed to serialise {:?}", gossip);
121 | }
122 | vec![]
123 | }
124 | }
125 |
126 | impl Default for Node {
127 | fn default() -> Self {
128 | let keys = Keypair::generate(&mut OsRng);
129 | Node {
130 | keys,
131 | peers: vec![],
132 | gossip: GossipState::new(),
133 | }
134 | }
135 | }
136 |
137 | impl Debug for Node {
138 | fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
139 | write!(formatter, "{:?}", self.id())
140 | }
141 | }
142 |
143 | #[cfg(test)]
144 | mod tests {
145 | use super::*;
146 | use itertools::Itertools;
147 | use rand::seq::SliceRandom;
148 | use rand::{self, Rng};
149 | use std::collections::BTreeMap;
150 | use std::time::Instant;
151 | use std::{cmp, u64};
152 |
153 | fn create_network(node_count: u32) -> Vec {
154 | let mut nodes = std::iter::repeat_with(Node::default)
155 | .take(node_count as usize)
156 | .collect_vec();
157 | // Connect all the nodes.
158 | for i in 0..(nodes.len() - 1) {
159 | let lhs_id = nodes[i].id();
160 | for j in (i + 1)..nodes.len() {
161 | let rhs_id = nodes[j].id();
162 | let _ = nodes[j].add_peer(lhs_id);
163 | let _ = nodes[i].add_peer(rhs_id);
164 | }
165 | }
166 | nodes
167 | }
168 |
169 | fn send_rumors(nodes: &mut Vec, num_of_msgs: u32) -> (u64, u64, Statistics) {
170 | let mut rng = rand::thread_rng();
171 | let mut rumors: Vec = Vec::new();
172 | for _ in 0..num_of_msgs {
173 | let mut raw = [0u8; 20];
174 | rng.fill(&mut raw[..]);
175 | rumors.push(String::from_utf8_lossy(&raw).to_string());
176 | }
177 |
178 | // Initiate the first rumor.
179 | {
180 | assert!(num_of_msgs >= 1);
181 | let node = unwrap!(nodes.choose_mut(&mut rng));
182 | let rumor = unwrap!(rumors.pop());
183 | let _ = node.initiate_rumor(&rumor);
184 | }
185 |
186 | // Polling
187 | let mut processed = true;
188 | while processed {
189 | processed = false;
190 | let mut gossips_to_push = BTreeMap::new();
191 | // Call `next_round()` on each node to gather a list of all Push gossips.
192 | for node in nodes.iter_mut() {
193 | if !rumors.is_empty() && rng.gen() {
194 | let rumor = unwrap!(rumors.pop());
195 | let _ = node.initiate_rumor(&rumor);
196 | }
197 | if let Ok((dst_id, Some(push_gossip))) = node.next_round() {
198 | processed = true;
199 | let _ = gossips_to_push.insert((node.id(), dst_id), push_gossip);
200 | }
201 | }
202 |
203 | // Send all Push gossips and the corresponding Pull gossips.
204 | for ((src_id, dst_id), push_gossip) in gossips_to_push {
205 | let dst = unwrap!(nodes.iter_mut().find(|node| node.id() == dst_id));
206 | let pull_gossip = dst.receive_gossip(&src_id, &push_gossip);
207 | let src = unwrap!(nodes.iter_mut().find(|node| node.id() == src_id));
208 | if let Some(gossip) = pull_gossip {
209 | assert!(src.receive_gossip(&dst_id, &gossip).is_none());
210 | }
211 | }
212 | }
213 |
214 | let mut statistics = Statistics::default();
215 | let mut nodes_missed = 0;
216 | let mut msgs_missed = 0;
217 | // Checking nodes missed the rumor, and clear the nodes for the next iteration.
218 | for node in nodes.iter_mut() {
219 | let stat = node.statistics();
220 | statistics.add(&stat);
221 | statistics.rounds = stat.rounds;
222 |
223 | if node.rumors().len() as u32 != num_of_msgs {
224 | nodes_missed += 1;
225 | msgs_missed += u64::from(num_of_msgs - node.rumors().len() as u32);
226 | }
227 | node.clear();
228 | }
229 |
230 | (nodes_missed, msgs_missed, statistics)
231 | }
232 |
233 | fn one_rumor_test(num_of_nodes: u32) {
234 | let mut nodes = create_network(num_of_nodes);
235 | println!("Network of {} nodes:", num_of_nodes);
236 | let iterations = 100;
237 | let mut metrics = Vec::new();
238 | for _ in 0..iterations {
239 | metrics.push(send_rumors(&mut nodes, 1));
240 | }
241 |
242 | let mut stats_avg = Statistics::default();
243 | let mut stats_max = Statistics::default();
244 | let mut stats_min = Statistics::new_max();
245 | let mut nodes_missed_avg = 0.0;
246 | let mut nodes_missed_max = 0;
247 | let mut nodes_missed_min = u64::MAX;
248 | let mut msgs_missed_avg = 0.0;
249 | let mut msgs_missed_max = 0;
250 | let mut msgs_missed_min = u64::MAX;
251 |
252 | for (nodes_missed, msgs_missed, stats) in metrics {
253 | nodes_missed_avg += nodes_missed as f64;
254 | nodes_missed_max = cmp::max(nodes_missed_max, nodes_missed);
255 | nodes_missed_min = cmp::min(nodes_missed_min, nodes_missed);
256 | msgs_missed_avg += msgs_missed as f64;
257 | msgs_missed_max = cmp::max(msgs_missed_max, msgs_missed);
258 | msgs_missed_min = cmp::min(msgs_missed_min, msgs_missed);
259 | stats_avg.add(&stats);
260 | stats_max.max(&stats);
261 | stats_min.min(&stats);
262 | }
263 | nodes_missed_avg /= iterations as f64;
264 | msgs_missed_avg /= iterations as f64;
265 | stats_avg.rounds /= iterations;
266 | stats_avg.sent_rumors /= iterations;
267 | stats_avg.received_rumors /= iterations;
268 |
269 | print!(" AVERAGE ---- ");
270 | print_metric(
271 | nodes_missed_avg,
272 | msgs_missed_avg,
273 | &stats_avg,
274 | num_of_nodes,
275 | 1,
276 | );
277 | print!(" MIN -------- ");
278 | print_metric(
279 | nodes_missed_min as f64,
280 | msgs_missed_min as f64,
281 | &stats_min,
282 | num_of_nodes,
283 | 1,
284 | );
285 | print!(" MAX -------- ");
286 | print_metric(
287 | nodes_missed_max as f64,
288 | msgs_missed_max as f64,
289 | &stats_max,
290 | num_of_nodes,
291 | 1,
292 | );
293 | }
294 |
295 | fn print_metric(
296 | nodes_missed: f64,
297 | msgs_missed: f64,
298 | stats: &Statistics,
299 | num_of_nodes: u32,
300 | num_of_msgs: u32,
301 | ) {
302 | println!(
303 | "rounds: {}, msgs_sent: {}, msgs_missed: {} \
304 | ({:.2}%), nodes_missed: {} ({:.2}%)",
305 | stats.rounds,
306 | stats.sent_rumors,
307 | msgs_missed,
308 | 100.0 * msgs_missed / f64::from(num_of_nodes) / f64::from(num_of_msgs),
309 | nodes_missed,
310 | 100.0 * nodes_missed / f64::from(num_of_nodes) / f64::from(num_of_msgs)
311 | );
312 | }
313 |
314 | #[test]
315 | fn one_rumor() {
316 | one_rumor_test(20);
317 | one_rumor_test(200);
318 | one_rumor_test(2000);
319 | }
320 |
321 | #[test]
322 | fn multiple_rumors() {
323 | let num_of_nodes: Vec = vec![20, 200, 2000];
324 | let num_of_msgs: Vec = vec![10, 100, 1000];
325 | for number in &num_of_nodes {
326 | for msgs in &num_of_msgs {
327 | print!(
328 | "Network of {} nodes, gossiping {} rumors:\n\t",
329 | number, msgs
330 | );
331 | let mut nodes = create_network(*number);
332 | let metric = send_rumors(&mut nodes, *msgs);
333 | print_metric(metric.0 as f64, metric.1 as f64, &metric.2, *number, *msgs);
334 | }
335 | }
336 | }
337 |
338 | #[test]
339 | fn avg_rounds_and_missed() {
340 | let num_nodes = 20;
341 | let num_msgs = 1;
342 | let iters = 100;
343 | let mut all_rounds = vec![];
344 | let mut all_missed = vec![];
345 | let mut total_rounds = 0;
346 | let mut total_missed = 0;
347 | let t = Instant::now();
348 | for _ in 0..iters {
349 | let (rounds, nodes_missed) = prove_of_stop(num_nodes, num_msgs);
350 | all_rounds.push(rounds);
351 | all_missed.push(nodes_missed);
352 | total_rounds += rounds;
353 | total_missed += nodes_missed;
354 | }
355 | println!("Elapsed time: {:?}", t.elapsed());
356 | all_rounds.sort();
357 | all_missed.sort();
358 | let avg_rounds = total_rounds / iters;
359 | let avg_missed = total_missed / iters;
360 | let median_rounds = all_rounds[iters / 2];
361 | let median_missed = all_missed[iters / 2];
362 |
363 | println!("Iters: {:?}", iters);
364 | println!("Avg rounds: {:?}", avg_rounds);
365 | println!("Median rounds: {:?}", median_rounds);
366 | println!(
367 | "Avg missed percent: {1:.*} %",
368 | 2,
369 | 100_f32 * (avg_missed as f32 / num_nodes as f32)
370 | );
371 | println!(
372 | "Median missed percent: {1:.*} %",
373 | 2,
374 | 100_f32 * (median_missed as f32 / num_nodes as f32)
375 | );
376 | }
377 |
378 | fn prove_of_stop(num_nodes: u32, num_msgs: u32) -> (usize, usize) {
379 | let mut nodes = create_network(num_nodes);
380 | let mut rng = rand::thread_rng();
381 | let mut rumors: Vec = Vec::new();
382 | for _ in 0..num_msgs {
383 | let mut raw = [0u8; 20];
384 | rng.fill(&mut raw[..]);
385 | rumors.push(String::from_utf8_lossy(&raw).to_string());
386 | }
387 |
388 | let mut rounds = 0;
389 | // Polling
390 | let mut processed = true;
391 | while processed {
392 | rounds += 1;
393 | processed = false;
394 | let mut gossips_to_push = BTreeMap::new();
395 | // Call `next_round()` on each node to gather a list of all Push gossips.
396 | for node in nodes.iter_mut() {
397 | if !rumors.is_empty() && rng.gen() {
398 | let rumor = unwrap!(rumors.pop());
399 | let _ = node.initiate_rumor(&rumor);
400 | }
401 | if let Ok((dst_id, Some(push_gossip))) = node.next_round() {
402 | processed = true;
403 | let _ = gossips_to_push.insert((node.id(), dst_id), push_gossip);
404 | }
405 | }
406 |
407 | // Send all Push gossips and the corresponding Pull gossips.
408 | for ((src_id, dst_id), push_gossip) in gossips_to_push {
409 | let dst = unwrap!(nodes.iter_mut().find(|node| node.id() == dst_id));
410 | let pull_msgs = dst.receive_gossip(&src_id, &push_gossip);
411 | let src = unwrap!(nodes.iter_mut().find(|node| node.id() == src_id));
412 | if let Some(pull_msg) = pull_msgs {
413 | assert!(src.receive_gossip(&dst_id, &pull_msg).is_none());
414 | }
415 | }
416 | }
417 |
418 | let mut nodes_missed = 0;
419 | // Checking if nodes missed the rumor.
420 | for node in nodes.iter() {
421 | if node.rumors().len() as u32 != num_msgs {
422 | nodes_missed += 1;
423 | }
424 | }
425 |
426 | (rounds, nodes_missed)
427 | }
428 | }
429 |
--------------------------------------------------------------------------------
/examples/network.rs:
--------------------------------------------------------------------------------
1 | // Copyright 2018 MaidSafe.net limited.
2 | //
3 | // This SAFE Network Software is licensed to you under the MIT license or the Modified BSD license , at your option. This file may not be copied,
6 | // modified, or distributed except according to those terms. Please review the Licences for the
7 | // specific language governing permissions and limitations relating to use of the SAFE Network
8 | // Software.
9 |
10 | //! Run a local network of gossiping nodes.
11 |
12 | #![forbid(
13 | exceeding_bitshifts,
14 | mutable_transmutes,
15 | no_mangle_const_items,
16 | unknown_crate_types
17 | )]
18 | #![deny(
19 | bad_style,
20 | improper_ctypes,
21 | missing_docs,
22 | non_shorthand_field_patterns,
23 | overflowing_literals,
24 | stable_features,
25 | unconditional_recursion,
26 | unknown_lints,
27 | unsafe_code,
28 | unused_allocation,
29 | unused_attributes,
30 | unused_comparisons,
31 | unused_features,
32 | unused_parens,
33 | while_true,
34 | unused
35 | )]
36 | #![warn(
37 | trivial_casts,
38 | trivial_numeric_casts,
39 | unused_extern_crates,
40 | unused_import_braces,
41 | unused_qualifications,
42 | unused_results
43 | )]
44 | #![allow(
45 | box_pointers,
46 | missing_copy_implementations,
47 | missing_debug_implementations,
48 | variant_size_differences,
49 | non_camel_case_types
50 | )]
51 |
52 | #[macro_use]
53 | extern crate futures;
54 | use rand;
55 | #[macro_use]
56 | extern crate tokio_io;
57 | #[macro_use]
58 | extern crate unwrap;
59 | use bincode::{deserialize, serialize};
60 | use bytes::{BufMut, BytesMut};
61 | use futures::sync::mpsc;
62 | use futures::{Async, Future, Poll, Stream};
63 | use futures_cpupool::{CpuFuture, CpuPool};
64 | use itertools::Itertools;
65 | use rand::distributions::Alphanumeric;
66 | use rand::Rng;
67 | use sn_gossip::{Error, Id, Node, Statistics};
68 | use std::cell::RefCell;
69 | use std::collections::HashMap;
70 | use std::fmt::{self, Debug, Formatter};
71 | use std::io::Write;
72 | use std::mem;
73 | use std::rc::Rc;
74 | use std::thread;
75 | use tokio::executor::current_thread;
76 | use tokio::net::{TcpListener, TcpStream};
77 | use tokio_io::AsyncRead;
78 |
79 | /// TCP stream wrapper presenting a message-based read / write interface.
80 | #[derive(Debug)]
81 | struct MessageStream {
82 | tcp_stream: TcpStream,
83 | read_buffer: BytesMut,
84 | write_buffer: BytesMut,
85 | incoming_message_length: Option,
86 | }
87 |
88 | impl MessageStream {
89 | fn new(tcp_stream: TcpStream) -> Self {
90 | MessageStream {
91 | tcp_stream,
92 | read_buffer: BytesMut::new(),
93 | write_buffer: BytesMut::new(),
94 | incoming_message_length: None,
95 | }
96 | }
97 |
98 | /// Buffer `message` to an internal buffer. Calls to `poll_flush` will attempt to flush this
99 | /// buffer to the TCP stream. The size of `message` as a `u32` is added to the buffer first so
100 | /// that the correct size can be read by the receiver before it tries to retrieve the actual
101 | /// message.
102 | fn buffer(&mut self, message: &[u8]) {
103 | let serialised_length = unwrap!(serialize(&(message.len() as u32)));
104 | if self.write_buffer.remaining_mut() < serialised_length.len() + message.len() {
105 | self.write_buffer.extend_from_slice(&serialised_length);
106 | self.write_buffer.extend_from_slice(message);
107 | } else {
108 | self.write_buffer.put(&serialised_length);
109 | self.write_buffer.put(message);
110 | }
111 | }
112 |
113 | /// Flush the write buffer to the TCP stream.
114 | fn poll_flush(&mut self) -> Poll<(), Error> {
115 | while !self.write_buffer.is_empty() {
116 | // `try_nb` is kind of like `try_ready`, but for operations that return `io::Result`
117 | // instead of `Async`. In the case of `io::Result`, an error of `WouldBlock` is
118 | // equivalent to `Async::NotReady`.
119 | let num_bytes = try_nb!(self.tcp_stream.write(&self.write_buffer));
120 | assert!(num_bytes > 0);
121 | // Discard the first `num_bytes` bytes of the buffer.
122 | let _ = self.write_buffer.split_to(num_bytes);
123 | }
124 |
125 | Ok(Async::Ready(()))
126 | }
127 |
128 | /// Read data from the TCP stream. This only returns `Ready` when the socket has closed.
129 | fn fill_read_buffer(&mut self) -> Poll<(), Error> {
130 | loop {
131 | self.read_buffer.reserve(1024);
132 | let num_bytes = try_ready!(self.tcp_stream.read_buf(&mut self.read_buffer));
133 | if num_bytes == 0 {
134 | return Ok(Async::Ready(()));
135 | }
136 | }
137 | }
138 | }
139 |
140 | impl Stream for MessageStream {
141 | type Item = BytesMut;
142 | type Error = Error;
143 |
144 | fn poll(&mut self) -> Poll