├── .circleci
├── config.yml
├── publish
└── removeScalaStandardLibrary
├── .github
├── ISSUE_TEMPLATE
│ └── bug_report.md
└── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── .scalafmt.conf
├── LICENSE
├── NOTICE
├── README.md
├── build.sc
├── coursier
├── doc-resources
└── sample-configurations.png
├── docker
├── installBazel
└── installMill
├── install-scalafmt.sh
├── pre-commit
├── scalafmt
├── scalanet
├── discovery
│ ├── it
│ │ ├── resources
│ │ │ └── logback-test.xml
│ │ └── src
│ │ │ └── io
│ │ │ └── iohk
│ │ │ └── scalanet
│ │ │ ├── discovery
│ │ │ └── ethereum
│ │ │ │ └── v4
│ │ │ │ └── DiscoveryKademliaIntegrationSpec.scala
│ │ │ └── kademlia
│ │ │ ├── KRouterKademliaIntegrationSpec.scala
│ │ │ └── KademliaIntegrationSpec.scala
│ ├── src
│ │ └── io
│ │ │ └── iohk
│ │ │ └── scalanet
│ │ │ ├── discovery
│ │ │ ├── Tagger.scala
│ │ │ ├── crypto
│ │ │ │ ├── SigAlg.scala
│ │ │ │ └── package.scala
│ │ │ ├── ethereum
│ │ │ │ ├── EthereumNodeRecord.scala
│ │ │ │ ├── KeyValueTag.scala
│ │ │ │ ├── Node.scala
│ │ │ │ ├── codecs
│ │ │ │ │ └── DefaultCodecs.scala
│ │ │ │ └── v4
│ │ │ │ │ ├── DiscoveryConfig.scala
│ │ │ │ │ ├── DiscoveryNetwork.scala
│ │ │ │ │ ├── DiscoveryRPC.scala
│ │ │ │ │ ├── DiscoveryService.scala
│ │ │ │ │ ├── KBucketsWithSubnetLimits.scala
│ │ │ │ │ ├── Packet.scala
│ │ │ │ │ └── Payload.scala
│ │ │ └── hash
│ │ │ │ ├── Keccak256.scala
│ │ │ │ └── package.scala
│ │ │ └── kademlia
│ │ │ ├── KBuckets.scala
│ │ │ ├── KMessage.scala
│ │ │ ├── KNetwork.scala
│ │ │ ├── KRouter.scala
│ │ │ ├── TimeSet.scala
│ │ │ ├── Xor.scala
│ │ │ ├── XorOrdering.scala
│ │ │ └── codec
│ │ │ └── DefaultCodecs.scala
│ └── ut
│ │ ├── resources
│ │ └── logback-test.xml
│ │ └── src
│ │ └── io
│ │ └── iohk
│ │ └── scalanet
│ │ ├── discovery
│ │ ├── crypto
│ │ │ └── SigAlgSpec.scala
│ │ ├── ethereum
│ │ │ ├── EthereumNodeRecordSpec.scala
│ │ │ ├── NodeSpec.scala
│ │ │ └── v4
│ │ │ │ ├── DiscoveryNetworkSpec.scala
│ │ │ │ ├── DiscoveryServiceSpec.scala
│ │ │ │ ├── KBucketsWithSubnetLimitsSpec.scala
│ │ │ │ ├── PacketSpec.scala
│ │ │ │ └── mocks
│ │ │ │ ├── MockPeerGroup.scala
│ │ │ │ └── MockSigAlg.scala
│ │ └── hash
│ │ │ └── Keccak256Spec.scala
│ │ └── kademlia
│ │ ├── Generators.scala
│ │ ├── KBucketsSpec.scala
│ │ ├── KNetworkRequestProcessing.scala
│ │ ├── KNetworkSpec.scala
│ │ ├── KRouterSpec.scala
│ │ ├── TimeSetSpec.scala
│ │ ├── XorOrderingSpec.scala
│ │ └── XorSpec.scala
├── examples
│ ├── readme.md
│ ├── resources
│ │ ├── kconsole-sample.conf
│ │ └── logback.xml
│ ├── src
│ │ └── io
│ │ │ └── iohk
│ │ │ └── scalanet
│ │ │ └── kconsole
│ │ │ ├── App.scala
│ │ │ ├── AppContext.scala
│ │ │ ├── CommandParser.scala
│ │ │ ├── ConsoleLoop.scala
│ │ │ ├── PureConfigReadersAndWriters.scala
│ │ │ └── Utils.scala
│ └── ut
│ │ └── src
│ │ └── io
│ │ └── iohk
│ │ └── scalanet
│ │ └── kconsole
│ │ └── CommandParserSpec.scala
├── src
│ └── io
│ │ └── iohk
│ │ └── scalanet
│ │ ├── codec
│ │ └── DefaultCodecs.scala
│ │ ├── crypto
│ │ └── CryptoUtils.scala
│ │ └── peergroup
│ │ ├── BufferConversionOps.scala
│ │ ├── CloseableQueue.scala
│ │ ├── ControlEvent.scala
│ │ ├── ExternalAddressResolver.scala
│ │ ├── InetAddressOps.scala
│ │ ├── InetMultiAddress.scala
│ │ ├── NettyFutureUtils.scala
│ │ ├── PeerGroup.scala
│ │ ├── ReqResponseProtocol.scala
│ │ ├── dynamictls
│ │ ├── ChannelAwareQueue.scala
│ │ ├── CustomHandlers.scala
│ │ ├── CustomTlsValidator.scala
│ │ ├── DynamicTLSExtension.scala
│ │ ├── DynamicTLSPeerGroup.scala
│ │ ├── DynamicTLSPeerGroupInternals.scala
│ │ └── DynamicTLSPeerGroupUtils.scala
│ │ ├── implicits.scala
│ │ ├── package.scala
│ │ └── udp
│ │ ├── DynamicUDPPeerGroup.scala
│ │ └── StaticUDPPeerGroup.scala
└── ut
│ ├── resources
│ ├── alice.pem
│ ├── bob.pem
│ ├── create-keystore.sh
│ ├── keystore.jks
│ ├── keystore.p12
│ ├── logback-test.xml
│ ├── truststore.jks
│ └── truststore.p12
│ └── src
│ └── io
│ └── iohk
│ └── scalanet
│ ├── NetUtils.scala
│ ├── PortForward.scala
│ ├── TaskValues.scala
│ ├── crypto
│ └── SignatureVerificationSpec.scala
│ ├── dynamictls
│ ├── CustomTlsValidatorSpec.scala
│ └── SignedKeyExtensionSpec.scala
│ ├── peergroup
│ ├── CloseableQueueSpec.scala
│ ├── DynamicTLSPeerGroupSpec.scala
│ ├── ExternalAddressResolverSpec.scala
│ ├── InetAddressOpsSpec.scala
│ ├── PeerUtils.scala
│ ├── StandardTestPack.scala
│ ├── TestMessage.scala
│ ├── ThrottlingIpFilterSpec.scala
│ ├── TransportPeerGroupAsyncSpec.scala
│ └── udp
│ │ ├── DynamicUDPPeerGroupSpec.scala
│ │ ├── StaticUDPPeerGroupSpec.scala
│ │ └── UDPPeerGroupSpec.scala
│ └── testutils
│ └── GeneratorUtils.scala
└── versionFile
└── version
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | version: 2.1
2 | jobs:
3 | build:
4 | docker:
5 | - image: openjdk:8
6 | working_directory: ~/repo
7 | steps:
8 | - checkout
9 | - run:
10 | name: install coursier
11 | command: curl -L -o coursier https://git.io/vgvpD && chmod +x coursier
12 |
13 | # The install started failing on CircleCI, it gets some HTML content instead of the package.
14 | - run:
15 | name: install scalafmt
16 | command: test -f scalafmt || ./coursier bootstrap org.scalameta:scalafmt-cli_2.12:2.7.4 -f -r sonatype:snapshots --main org.scalafmt.cli.Cli --standalone -o scalafmt
17 |
18 | - run:
19 | name: install mill
20 | command: sh -c '(echo "#!/usr/bin/env sh" && curl -L https://github.com/lihaoyi/mill/releases/download/0.8.0/0.8.0) > /usr/local/bin/mill && chmod +x /usr/local/bin/mill'
21 |
22 | - run:
23 | name: unit and integration tests
24 | #command: mill scalanet.test && mill scalanet.scoverage.htmlReport
25 | command: mill __.test
26 |
27 | - run:
28 | name: check that the code is formatted properly
29 | command: test -f scalafmt && ./scalafmt --test || echo "scalafmt not installed"
30 |
31 | - when:
32 | condition:
33 | or:
34 | - equal: [ master, << pipeline.git.branch >> ]
35 | - equal: [ develop, << pipeline.git.branch >> ]
36 | steps:
37 | - run:
38 | name: install gpg2
39 | # GPG in docker needs to be run with some additional flags
40 | # and we are not able to change how mill uses it
41 | # this is why we're creating wrapper that adds the flags
42 | command: sh -c "apt update && apt install -y gnupg2 && mv /usr/bin/gpg /usr/bin/gpg-vanilla && echo '#!/bin/sh\n\n/usr/bin/gpg-vanilla --no-tty --pinentry loopback \$@' > /usr/bin/gpg && chmod 755 /usr/bin/gpg && cat /usr/bin/gpg"
43 |
44 | - run:
45 | name: install base64
46 | command: apt update && apt install -y cl-base64
47 |
48 | - run:
49 | name: publish
50 | command: .circleci/publish
51 | no_output_timeout: 30m
52 |
53 | # scoverage disabled
54 | #- store_artifacts:
55 | #path: out/scalanet/scoverage/htmlReport/
56 |
57 | workflows:
58 | build_and_publish:
59 | jobs:
60 | - build
61 |
--------------------------------------------------------------------------------
/.circleci/publish:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -euv
4 |
5 | echo $GPG_KEY | base64 --decode | gpg --batch --import
6 |
7 | gpg --passphrase $GPG_PASSPHRASE --batch --yes -a -b LICENSE
8 |
9 | if [[ "$CIRCLE_BRANCH" == "develop" ]]; then
10 |
11 | mill mill.scalalib.PublishModule/publishAll \
12 | __.publishArtifacts \
13 | "$OSS_USERNAME":"$OSS_PASSWORD" \
14 | --gpgArgs --passphrase="$GPG_PASSPHRASE",--batch,--yes,-a,-b
15 |
16 | elif [[ "$CIRCLE_BRANCH" == "master" ]]; then
17 |
18 | mill versionFile.setReleaseVersion
19 | mill mill.scalalib.PublishModule/publishAll \
20 | __.publishArtifacts \
21 | "$OSS_USERNAME":"$OSS_PASSWORD" \
22 | --gpgArgs --passphrase="$GPG_PASSPHRASE",--batch,--yes,-a,-b \
23 | --readTimeout 600000 \
24 | --awaitTimeout 600000 \
25 | --release true
26 |
27 | else
28 |
29 | echo "Skipping publish step"
30 |
31 | fi
32 |
--------------------------------------------------------------------------------
/.circleci/removeScalaStandardLibrary:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ! $# -eq 1 ]
4 | then
5 | echo "USAGE: removeScalaStandardLibrary file.jar"
6 | exit 1
7 | fi
8 |
9 | TARGET_JAR="$1"
10 |
11 | if [ ! -f "$TARGET_JAR" ]
12 | then
13 | echo "USAGE: removeScalaStandardLibrary file.jar"
14 | echo
15 | echo " ERROR, '$TARGET_JAR' is not a file or can not be read"
16 | exit 1
17 | fi
18 |
19 | function performExtraction() {
20 | zip -d $TARGET_JAR "scala/*" "rootdoc.txt" "library.properties" "reflect.properties"
21 | }
22 |
23 | if [ ! -w "$TARGET_JAR" ]
24 | then
25 | chmod u+w "$TARGET_JAR"
26 | performExtraction
27 | EXIT_CODE="$?"
28 | chmod u-w "$TARGET_JAR"
29 | exit "$EXIT_CODE"
30 | else
31 | performExtraction
32 | fi
33 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 |
5 | ---
6 |
7 | # Describe the bug
8 | A clear and concise description of what the bug is.
9 |
10 | # To Reproduce
11 | Steps to reproduce the behavior:
12 |
13 | 1. Go to '...'
14 | 2. Run `command`
15 | 3. See error
16 |
17 | # Expected behavior
18 | A clear and concise description of what you expected to happen.
19 |
20 | # Desktop (please complete the following information):
21 |
22 | - OS: [e.g. Ubuntu 4.15.0-20-generic]
23 | - Version: [e.g v0.4.0]
24 | - Configuration: [e.g default configuration]
25 |
26 | # Additional context
27 | Add any other context about the problem here, include screenshots, logs, provide links, propose solution, network topology, etc.
28 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # Description
2 |
3 | _A clear and concise description of what this pull request does or fixes._
4 |
5 | # Proposed Solution
6 |
7 | _**Optional** Explain how does this PR solves the problem stated in [Description](#Description). You can also enumerate different alternatives considered while approaching this task and references to specifications._
8 |
9 | # Important Changes Introduced
10 |
11 | _**Optional** Notice Reviewers about changes that were introduced while developing this task_
12 |
13 | # Testing
14 |
15 | _**Optional** Leave some recommendations should be useful while reviewers are testing this PR_
16 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | out/
2 | .idea/
3 | .idea_modules/
4 | .ijwb/
5 | target/
6 | *.log
7 | *.iml
8 | .DS_Store
9 | .metals
10 | .bloop
11 | mill.isGc
12 | .vscode
13 |
--------------------------------------------------------------------------------
/.scalafmt.conf:
--------------------------------------------------------------------------------
1 | version = "2.0.0"
2 | project.git = true
3 | maxColumn = 120
4 | align=none
5 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | Copyright 2019 Input Output (HK) Ltd.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License”). You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.txt
4 |
5 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CI status
2 | [](https://circleci.com/gh/input-output-hk/scalanet)
3 |
4 | # scalanet
5 |
6 | ### Summary
7 |
8 | Scalanet is an asynchronous, strongly typed, resource-managed networking library, written in Scala with support for a variety of network technologies.
9 | What does all that mean?
10 | * Resource managed. Scalanet makes it as easy as possible to send and receive messages without having to open or close connections.
11 | * Asynchronous. Scalanet is non-blocking. In this regard, it is like netty, however, unlike netty, Scalanet uses *reactive*
12 | programming idioms.
13 | * Technology support. Out of the box, Scalanet supports TCP and UDP (with other internet and non-internet technologies to come) but through an abstraction called the _Peer Group_, allows for the addition of other transports or more complex p2p overlays (kademlia, ethereum, etc). The _Peer Group_ provides a consistent interface whatever your networking approach.
14 |
15 | It is well suited to peer-to-peer apps but supports client-server too.
16 |
17 | ### Peer groups
18 | As mentioned, the foundation of Scalanet is the notion of a _Peer Group_. From a practical standpoint, a peer group
19 | allows an application to use a variety of network technologies with a consistent interface. More abstractly, it defines
20 | * an address space and
21 | * a context in which communication can happen.
22 | * TODO: quality of service
23 |
24 | A Peer Group could be something like scalanet's `UDPPeerGroup` where the addresses are IP:port combos and the set of
25 | peers allowed to communicate is basically anybody on the IP network in question (the internet, an office network, etc).
26 | Equally, a Peer Group could be something like an Ethereum network where addresses are public keys and the peers
27 | are anybody who talks the RLPx protocol. Equally, a peer group could be an integration test with the address space {Alice, Bob, Charlie}
28 | and the peers are all in the same JVM. Scalanet will not limit you in this regard.
29 |
30 | Peer groups can implement arbitrary enrolment and encryption schemes, so are suitable for implementing secure messaging overlays.
31 | Typically, on the internet, limiting the context of communication (aka _zoning_) is performed by firewalls. The idea
32 | is so ubiquitous that it may not have struck you that this is a hack. Peer groups are designed to support more elegant
33 | solutions, generally using cryptography instead of firewall provisioning.
34 |
35 | ### Structure of the library
36 | Here is a picture of the structure of the library for a few sample applications:
37 | 
38 |
39 | ### Getting started
40 | The easiest way to get started is to send and receive data over TCP, using the library just like netty. Have a look at
41 | the [TCPPeerGroupSpec](core/io/iohk/scalanet/test/peergroup/TCPPeerGroupSpec.scala) test case or the following code.
42 |
43 | ```scala
44 | // import some peer group classes
45 | import io.iohk.scalanet.peergroup._
46 |
47 | // message sending can be controlled using either
48 | // monix, cat-effect, cats EitherT or scala Futures
49 | // depending on your taste.
50 | import io.iohk.scalanet.peergroup.future._
51 | import scala.concurrent.Future
52 |
53 | import java.net.InetSocketAddress
54 | import java.nio.ByteBuffer
55 |
56 | val config = TCPPeerGroup.Config(new InetSocketAddress(???))
57 |
58 | val tcp = TCPPeerGroup.createOrThrow(config)
59 |
60 | // send a message
61 | val messageF: Future[Unit] = tcp.sendMessage(new InetSocketAddress("example.com", 80), ByteBuffer.wrap("Hello!".getBytes))
62 |
63 | // receive messages
64 | tcp.messageStream.foreach((b: ByteBuffer) => ())
65 |
66 | ```
67 |
68 | # Contributing
69 |
70 | ### Branches
71 |
72 | Two main branches are maintained: `develop` and `master`.
73 | `master` contains the latest stable version of the library.
74 | `develop` is the place you want to merge to if submitting PRs.
75 |
76 | ### Building the codebase
77 | Scalanet is capable of building against Scala 2.12.10 and 2.13.4
78 | This guide will be using version 2.13.4 build: `mill csm[2.13.4]...` next to a multi-build `mill __.`
79 |
80 | To build the codebase, we use [mill](http://www.lihaoyi.com/mill). Assuming you have mill installed correctly, you can build and test the codebase with
81 | ```bash
82 | mill csm[2.13.4].__.test -or-
83 | mill __.test
84 | ```
85 |
86 | A single test suite can be executed with the `single` helper command, for example:
87 | ```bash
88 | mill csm[2.13.4].scalanet.ut.single io.iohk.scalanet.crypto.SignatureVerificationSpec -or-
89 | mill __.scalanet.ut.single io.iohk.scalanet.crypto.SignatureVerificationSpec
90 | ```
91 |
92 | ### Publishing
93 |
94 | Have a look [here](http://www.lihaoyi.com/mill/page/common-project-layouts.html#publishing) for how to publish multiple modules.
95 | The latest build on the `develop` branch is always published to [Sonatype](https://oss.sonatype.org/) according to the [Circle CI config](./.circleci/config.yml).
96 | To use it in a downstream project add the snapshots to the resolvers, e.g. in `build.sbt`:
97 |
98 | ```
99 | resolvers += "Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
100 | ```
101 |
102 | To publish new release to maven central:
103 | 1. Create release branch `realease/versionFromVersionFile`
104 | 2. Create a PR from release branch to `master` branch
105 | 3. Merge release PR to `master`, tag merge commit and push it
106 | 4. Create merge back PR from `master` to `develop` bumping appropriate version in `versionFile/version`,
107 | e.g. `mill versionFile.setNextVersion --bump minor`, to make sure no more updates are sent to the released snapshot
108 |
109 |
110 | ### Formatting the codebase
111 | In order to keep the code format consistent, we use scalafmt.
112 |
113 | The CI build will fail if code is not formatted, but the project contains a githook that means you do not have to think
114 | about. To set this up:
115 | - Install [coursier](https://github.com/coursier/coursier#command-line), the `coursier` command must work.
116 | - `./install-scalafmt.sh` (might require sudo).
117 | - `cp pre-commit .git/hooks/pre-commit`
118 |
119 | ### Reporting problems
120 | You can also create issues in github at https://github.com/input-output-hk/scalanet/issues.
121 |
--------------------------------------------------------------------------------
/coursier:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/scalanet/fce50a156a5be213093cc8dec65cc6d6e9af5a6e/coursier
--------------------------------------------------------------------------------
/doc-resources/sample-configurations.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/scalanet/fce50a156a5be213093cc8dec65cc6d6e9af5a6e/doc-resources/sample-configurations.png
--------------------------------------------------------------------------------
/docker/installBazel:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export BAZEL_VERSION=0.24.0
4 |
5 | apt-get update && apt-get install -y --no-install-recommends git ca-certificates curl gnupg \
6 | && apt-get install -y --no-install-recommends bash-completion g++ unzip zlib1g-dev \
7 | && curl -LO "https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel_${BAZEL_VERSION}-linux-x86_64.deb" \
8 | && dpkg -i bazel_*.deb \
9 | && apt-get purge --auto-remove -y curl gnupg
10 |
--------------------------------------------------------------------------------
/docker/installMill:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | apt-get update && apt-get install -y --no-install-recommends curl \
4 | && sh -c '(echo "#!/usr/bin/env sh" && curl -L https://github.com/lihaoyi/mill/releases/download/0.4.0/0.4.0) > /usr/local/bin/mill && chmod +x /usr/local/bin/mill'
--------------------------------------------------------------------------------
/install-scalafmt.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ./coursier bootstrap org.scalameta:scalafmt-cli_2.12:2.7.4 \
3 | -f \
4 | -r sonatype:snapshots --main org.scalafmt.cli.Cli \
5 | --standalone \
6 | -o scalafmt
7 |
--------------------------------------------------------------------------------
/pre-commit:
--------------------------------------------------------------------------------
1 | ./scalafmt --test
2 |
--------------------------------------------------------------------------------
/scalafmt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/scalanet/fce50a156a5be213093cc8dec65cc6d6e9af5a6e/scalafmt
--------------------------------------------------------------------------------
/scalanet/discovery/it/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS} %-5level %logger{36} %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/scalanet/discovery/it/src/io/iohk/scalanet/discovery/ethereum/v4/DiscoveryKademliaIntegrationSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4
2 |
3 | import cats.effect.Resource
4 | import io.iohk.scalanet.discovery.crypto.{PublicKey, PrivateKey}
5 | import io.iohk.scalanet.discovery.crypto.SigAlg
6 | import io.iohk.scalanet.discovery.ethereum.Node
7 | import io.iohk.scalanet.discovery.ethereum.v4.mocks.MockSigAlg
8 | import io.iohk.scalanet.discovery.hash.Hash
9 | import io.iohk.scalanet.kademlia.KademliaIntegrationSpec
10 | import io.iohk.scalanet.kademlia.XorOrdering
11 | import io.iohk.scalanet.NetUtils
12 | import io.iohk.scalanet.peergroup.InetMultiAddress
13 | import io.iohk.scalanet.peergroup.udp.StaticUDPPeerGroup
14 | import java.net.InetSocketAddress
15 | import monix.eval.Task
16 | import scala.concurrent.duration._
17 | import scodec.bits.BitVector
18 |
19 | class DiscoveryKademliaIntegrationSpec extends KademliaIntegrationSpec("DiscoveryService with StaticUDPPeerGroup") {
20 | override type PeerRecord = Node
21 |
22 | class DiscoveryTestNode(
23 | override val self: Node,
24 | service: DiscoveryService
25 | ) extends TestNode {
26 | override def getPeers: Task[Seq[Node]] =
27 | service.getNodes.map(_.toSeq)
28 | }
29 |
30 | // Using fake crypto and scodec encoding instead of RLP.
31 | implicit val sigalg: SigAlg = new MockSigAlg()
32 | import io.iohk.scalanet.discovery.ethereum.codecs.DefaultCodecs._
33 | // Not dealing with non-conforming clients here.
34 | implicit val packetCoded = Packet.packetCodec(allowDecodeOverMaxPacketSize = false)
35 |
36 | override def generatePeerRecordWithKey = {
37 | val address = NetUtils.aRandomAddress()
38 | val (publicKey, privateKey) = sigalg.newKeyPair
39 | val node = Node(publicKey, Node.Address(address.getAddress, address.getPort, address.getPort))
40 | node -> privateKey
41 | }
42 |
43 | override def makeXorOrdering(nodeId: BitVector): Ordering[Node] =
44 | XorOrdering[Node, Hash](_.kademliaId)(Node.kademliaId(PublicKey(nodeId)))
45 |
46 | override def startNode(
47 | selfRecordWithKey: (Node, PrivateKey),
48 | initialNodes: Set[Node],
49 | testConfig: TestNodeKademliaConfig
50 | ): Resource[Task, TestNode] = {
51 | val (selfNode, privateKey) = selfRecordWithKey
52 | for {
53 | peerGroup <- StaticUDPPeerGroup[Packet](
54 | StaticUDPPeerGroup.Config(
55 | bindAddress = nodeAddressToInetMultiAddress(selfNode.address).inetSocketAddress,
56 | receiveBufferSizeBytes = Packet.MaxPacketBitsSize / 8 * 2
57 | )
58 | )
59 | config = DiscoveryConfig.default.copy(
60 | requestTimeout = 500.millis,
61 | kademliaTimeout = 100.millis, // We won't get that many results and waiting for them is slow.
62 | kademliaAlpha = testConfig.alpha,
63 | kademliaBucketSize = testConfig.k,
64 | discoveryPeriod = testConfig.refreshRate,
65 | knownPeers = initialNodes,
66 | subnetLimitPrefixLength = 0
67 | )
68 | network <- Resource.liftF {
69 | DiscoveryNetwork[InetMultiAddress](
70 | peerGroup,
71 | privateKey,
72 | localNodeAddress = selfNode.address,
73 | toNodeAddress = inetMultiAddressToNodeAddress,
74 | config = config
75 | )
76 | }
77 | service <- DiscoveryService[InetMultiAddress](
78 | privateKey,
79 | node = selfNode,
80 | config = config,
81 | network = network,
82 | toAddress = nodeAddressToInetMultiAddress
83 | )
84 | } yield new DiscoveryTestNode(selfNode, service)
85 | }
86 |
87 | def inetMultiAddressToNodeAddress(address: InetMultiAddress): Node.Address = {
88 | val addr = address.inetSocketAddress
89 | Node.Address(addr.getAddress, addr.getPort, addr.getPort)
90 | }
91 |
92 | def nodeAddressToInetMultiAddress(address: Node.Address): InetMultiAddress =
93 | InetMultiAddress(new InetSocketAddress(address.ip, address.udpPort))
94 | }
95 |
--------------------------------------------------------------------------------
/scalanet/discovery/it/src/io/iohk/scalanet/kademlia/KRouterKademliaIntegrationSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import java.security.SecureRandom
4 | import cats.effect.Resource
5 | import io.iohk.scalanet.NetUtils
6 | import io.iohk.scalanet.kademlia.KNetwork.KNetworkScalanetImpl
7 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
8 | import io.iohk.scalanet.peergroup.InetMultiAddress
9 | import monix.eval.Task
10 | import io.iohk.scalanet.peergroup.PeerGroup
11 | import scodec.bits.BitVector
12 | import io.iohk.scalanet.discovery.crypto.PrivateKey
13 |
14 | abstract class KRouterKademliaIntegrationSpec(peerGroupName: String)
15 | extends KademliaIntegrationSpec(s"KRouter and $peerGroupName") {
16 |
17 | override type PeerRecord = NodeRecord[InetMultiAddress]
18 |
19 | override def generatePeerRecordWithKey: (PeerRecord, PrivateKey) = {
20 | val randomGen = new SecureRandom()
21 | val testBitLength = 16
22 | val address = InetMultiAddress(NetUtils.aRandomAddress())
23 | val id = KBuckets.generateRandomId(testBitLength, randomGen)
24 | val privateKey = PrivateKey(BitVector.empty) // Not using cryptography.
25 | NodeRecord(id, address, address) -> privateKey
26 | }
27 |
28 | override def makeXorOrdering(baseId: BitVector): Ordering[NodeRecord[InetMultiAddress]] =
29 | XorNodeOrdering(baseId)
30 |
31 | import io.iohk.scalanet.codec.DefaultCodecs._
32 | import io.iohk.scalanet.kademlia.codec.DefaultCodecs._
33 | implicit val codec = implicitly[scodec.Codec[KMessage[InetMultiAddress]]]
34 |
35 | class KRouterTestNode(
36 | override val self: PeerRecord,
37 | router: KRouter[InetMultiAddress]
38 | ) extends TestNode {
39 | override def getPeers: Task[Seq[NodeRecord[InetMultiAddress]]] = {
40 | router.nodeRecords.map(_.values.toSeq)
41 | }
42 | }
43 |
44 | def makePeerGroup(
45 | selfRecord: NodeRecord[InetMultiAddress]
46 | ): Resource[Task, PeerGroup[InetMultiAddress, KMessage[InetMultiAddress]]]
47 |
48 | private def startRouter(
49 | selfRecord: NodeRecord[InetMultiAddress],
50 | routerConfig: KRouter.Config[InetMultiAddress]
51 | ): Resource[Task, KRouter[InetMultiAddress]] = {
52 | for {
53 | peerGroup <- makePeerGroup(selfRecord)
54 | kademliaNetwork = new KNetworkScalanetImpl(peerGroup)
55 | router <- Resource.liftF(KRouter.startRouterWithServerPar(routerConfig, kademliaNetwork))
56 | } yield router
57 | }
58 |
59 | override def startNode(
60 | selfRecordWithKey: (PeerRecord, PrivateKey),
61 | initialNodes: Set[PeerRecord],
62 | testConfig: TestNodeKademliaConfig
63 | ): Resource[Task, TestNode] = {
64 | val (selfRecord, _) = selfRecordWithKey
65 | val routerConfig = KRouter.Config(
66 | selfRecord,
67 | initialNodes,
68 | alpha = testConfig.alpha,
69 | k = testConfig.k,
70 | serverBufferSize = testConfig.serverBufferSize,
71 | refreshRate = testConfig.refreshRate
72 | )
73 | for {
74 | router <- startRouter(selfRecord, routerConfig)
75 | } yield new KRouterTestNode(selfRecord, router)
76 | }
77 |
78 | }
79 |
80 | class StaticUDPKRouterKademliaIntegrationSpec extends KRouterKademliaIntegrationSpec("StaticUDP") {
81 | import io.iohk.scalanet.peergroup.udp.StaticUDPPeerGroup
82 |
83 | override def makePeerGroup(
84 | selfRecord: NodeRecord[InetMultiAddress]
85 | ) = {
86 | val udpConfig = StaticUDPPeerGroup.Config(selfRecord.routingAddress.inetSocketAddress, channelCapacity = 100)
87 | StaticUDPPeerGroup[KMessage[InetMultiAddress]](udpConfig)
88 | }
89 | }
90 |
91 | class DynamicUDPKRouterKademliaIntegrationSpec extends KRouterKademliaIntegrationSpec("DynamicUDP") {
92 | import io.iohk.scalanet.peergroup.udp.DynamicUDPPeerGroup
93 |
94 | override def makePeerGroup(
95 | selfRecord: NodeRecord[InetMultiAddress]
96 | ) = {
97 | val udpConfig = DynamicUDPPeerGroup.Config(selfRecord.routingAddress.inetSocketAddress, channelCapacity = 100)
98 | DynamicUDPPeerGroup[KMessage[InetMultiAddress]](udpConfig)
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/Tagger.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery
2 |
3 | import shapeless.tag, tag.@@
4 |
5 | /** Helper class to make it easier to tag raw types such as BitVector
6 | * to specializations so that the compiler can help make sure we are
7 | * passign the right values to methods.
8 | *
9 | * Using it like so:
10 | *
11 | * ```
12 | * trait MyTypeTag
13 | * object MyType extends Tagger[ByteVector, MyTypeTag]
14 | * type MyType = MyType.Tagged
15 | *
16 | * val myThing = MyType(ByteVector.empty)
17 | * ```
18 | *
19 | */
20 | trait Tagger[U, T] {
21 | type Tagged = U @@ T
22 | def apply(underlying: U): Tagged =
23 | tag[T][U](underlying)
24 | }
25 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/crypto/SigAlg.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.crypto
2 |
3 | import scodec.bits.BitVector
4 | import scodec.Attempt
5 |
6 | trait SigAlg {
7 | def name: String
8 |
9 | def PrivateKeyBytesSize: Int
10 | def PublicKeyBytesSize: Int
11 | def SignatureBytesSize: Int
12 |
13 | def newKeyPair: (PublicKey, PrivateKey)
14 |
15 | /** In the context of Secp256k1, produce a 65 byte signature
16 | * as the concatenation of `r`, `s` and the recovery ID `v`. */
17 | def sign(privateKey: PrivateKey, data: BitVector): Signature
18 |
19 | /** In the context of Secp256k1, remove the `v` recovery ID. */
20 | def removeRecoveryId(signature: Signature): Signature
21 |
22 | /** Verify that a signature is correct. It may or may not have a recovery ID. */
23 | def verify(publicKey: PublicKey, signature: Signature, data: BitVector): Boolean
24 |
25 | /** Reverse engineer the public key from a signature, given the data that was signed.
26 | * It can fail if the signature is incorrect.
27 | */
28 | def recoverPublicKey(signature: Signature, data: BitVector): Attempt[PublicKey]
29 |
30 | /** Produce the public key based on the private key. */
31 | def toPublicKey(privateKey: PrivateKey): PublicKey
32 |
33 | /** In the context of Secp256k1, the signature consists of a prefix byte
34 | * followed by an `x` and `y` coordinate. Remove `y` and adjust the prefix
35 | * to compress.
36 | *
37 | * See https://davidederosa.com/basic-blockchain-programming/elliptic-curve-keys
38 | */
39 | def compressPublicKey(publicKey: PublicKey): PublicKey
40 | }
41 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/crypto/package.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery
2 |
3 | import scodec.bits.BitVector
4 |
5 | package object crypto {
6 |
7 | sealed trait PrivateKeyTag
8 | sealed trait PublicKeyTag
9 | sealed trait SignatureTag
10 |
11 | object PrivateKey extends Tagger[BitVector, PrivateKeyTag]
12 | type PrivateKey = PrivateKey.Tagged
13 |
14 | object PublicKey extends Tagger[BitVector, PublicKeyTag]
15 | type PublicKey = PublicKey.Tagged
16 |
17 | object Signature extends Tagger[BitVector, SignatureTag]
18 | type Signature = Signature.Tagged
19 | }
20 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/ethereum/EthereumNodeRecord.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum
2 |
3 | import scodec.bits.ByteVector
4 | import scala.collection.SortedMap
5 | import scala.math.Ordering.Implicits._
6 | import java.nio.charset.StandardCharsets.UTF_8
7 | import io.iohk.scalanet.discovery.crypto.{Signature, PrivateKey, PublicKey, SigAlg}
8 | import scodec.{Codec, Attempt}
9 | import java.net.Inet6Address
10 |
11 | /** ENR corresponding to https://github.com/ethereum/devp2p/blob/master/enr.md */
12 | case class EthereumNodeRecord(
13 | // Signature over the record contents: [seq, k0, v0, k1, v1, ...]
14 | signature: Signature,
15 | content: EthereumNodeRecord.Content
16 | )
17 |
18 | object EthereumNodeRecord {
19 |
20 | implicit val byteVectorOrdering: Ordering[ByteVector] =
21 | Ordering.by[ByteVector, Seq[Byte]](_.toSeq)
22 |
23 | case class Content(
24 | // Nodes should increment this number whenever their properties change, like their address, and re-publish.
25 | seq: Long,
26 | // Normally clients treat the values as RLP, however we don't have access to the RLP types here, hence it's just bytes.
27 | attrs: SortedMap[ByteVector, ByteVector]
28 | )
29 | object Content {
30 | def apply(seq: Long, attrs: (ByteVector, ByteVector)*): Content =
31 | Content(seq, SortedMap(attrs: _*))
32 | }
33 |
34 | object Keys {
35 | def key(k: String): ByteVector =
36 | ByteVector(k.getBytes(UTF_8))
37 |
38 | /** name of identity scheme, e.g. "v4" */
39 | val id = key("id")
40 |
41 | /** compressed secp256k1 public key, 33 bytes */
42 | val secp256k1 = key("secp256k1")
43 |
44 | /** IPv4 address, 4 bytes */
45 | val ip = key("ip")
46 |
47 | /** TCP port, big endian integer */
48 | val tcp = key("tcp")
49 |
50 | /** UDP port, big endian integer */
51 | val udp = key("udp")
52 |
53 | /** IPv6 address, 16 bytes */
54 | val ip6 = key("ip6")
55 |
56 | /** IPv6-specific TCP port, big endian integer */
57 | val tcp6 = key("tcp6")
58 |
59 | /** IPv6-specific UDP port, big endian integer */
60 | val udp6 = key("udp6")
61 |
62 | /** The keys above have pre-defined meaning, but there can be arbitrary entries in the map. */
63 | val Predefined: Set[ByteVector] = Set(id, secp256k1, ip, tcp, udp, ip6, tcp6, udp6)
64 | }
65 |
66 | def apply(signature: Signature, seq: Long, attrs: (ByteVector, ByteVector)*): EthereumNodeRecord =
67 | EthereumNodeRecord(
68 | signature,
69 | EthereumNodeRecord.Content(seq, attrs: _*)
70 | )
71 |
72 | def apply(privateKey: PrivateKey, seq: Long, attrs: (ByteVector, ByteVector)*)(
73 | implicit sigalg: SigAlg,
74 | codec: Codec[Content]
75 | ): Attempt[EthereumNodeRecord] = {
76 | val content = EthereumNodeRecord.Content(seq, attrs: _*)
77 | codec.encode(content).map { data =>
78 | val sig = sigalg.removeRecoveryId(sigalg.sign(privateKey, data))
79 | EthereumNodeRecord(sig, content)
80 | }
81 | }
82 |
83 | def fromNode(node: Node, privateKey: PrivateKey, seq: Long, customAttrs: (ByteVector, ByteVector)*)(
84 | implicit sigalg: SigAlg,
85 | codec: Codec[Content]
86 | ): Attempt[EthereumNodeRecord] = {
87 | val (ipKey, tcpKey, udpKey) =
88 | if (node.address.ip.isInstanceOf[Inet6Address])
89 | (Keys.ip6, Keys.tcp6, Keys.udp6)
90 | else
91 | (Keys.ip, Keys.tcp, Keys.udp)
92 |
93 | val standardAttrs = List(
94 | Keys.id -> ByteVector("v4".getBytes(UTF_8)),
95 | Keys.secp256k1 -> sigalg.compressPublicKey(sigalg.toPublicKey(privateKey)).toByteVector,
96 | ipKey -> ByteVector(node.address.ip.getAddress),
97 | tcpKey -> ByteVector.fromInt(node.address.tcpPort),
98 | udpKey -> ByteVector.fromInt(node.address.udpPort)
99 | )
100 |
101 | // Make sure a custom attribute doesn't overwrite a pre-defined one.
102 | val attrs = standardAttrs ++ customAttrs.filterNot(kv => Keys.Predefined(kv._1))
103 |
104 | apply(privateKey, seq, attrs: _*)
105 | }
106 |
107 | def validateSignature(
108 | enr: EthereumNodeRecord,
109 | publicKey: PublicKey
110 | )(implicit sigalg: SigAlg, codec: Codec[Content]): Attempt[Boolean] = {
111 | codec.encode(enr.content).map { data =>
112 | sigalg.verify(publicKey, enr.signature, data)
113 | }
114 | }
115 | }
116 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/ethereum/KeyValueTag.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum
2 |
3 | import cats.implicits._
4 | import java.nio.charset.StandardCharsets.UTF_8
5 | import scodec.bits.ByteVector
6 | import scala.util.{Try, Success, Failure}
7 |
8 | /** Key value pairs that get added to the local ENR record as well as used
9 | * as a critera for accepting remote ENRs.
10 | */
11 | trait KeyValueTag {
12 |
13 | /** Add a key-value pair to the outgoing ENR record.
14 | * Return None if this tag is used only for filtering.
15 | */
16 | def toAttr: Option[(ByteVector, ByteVector)]
17 |
18 | /** Apply a filter on incoming ENR records. */
19 | def toFilter: KeyValueTag.EnrFilter
20 | }
21 |
22 | object KeyValueTag {
23 |
24 | /** Return either a rejection message or unit, to accept the ENR. */
25 | type EnrFilter = EthereumNodeRecord => Either[String, Unit]
26 |
27 | def toFilter(tags: List[KeyValueTag]): EnrFilter = {
28 | val filters = tags.map(_.toFilter)
29 | enr => filters.traverse(_(enr)).void
30 | }
31 |
32 | class StringEquals(key: String, value: String) extends KeyValueTag {
33 | private val keyBytes =
34 | EthereumNodeRecord.Keys.key(key)
35 |
36 | private val valueBytes =
37 | ByteVector(value.getBytes(UTF_8))
38 |
39 | override val toAttr =
40 | Some(keyBytes -> valueBytes)
41 |
42 | override val toFilter = enr =>
43 | enr.content.attrs.get(keyBytes) match {
44 | case Some(otherBytes) if otherBytes != valueBytes =>
45 | Try(new String(otherBytes.toArray, UTF_8)) match {
46 | case Success(otherValue) =>
47 | Left(s"$key mismatch; $otherValue != $value")
48 |
49 | case Failure(_) =>
50 | Left(s"$key mismatch; $otherBytes != $valueBytes")
51 | }
52 |
53 | case Some(_) =>
54 | Right(())
55 |
56 | case None =>
57 | Left(s"$key is missing; expected $value")
58 | }
59 | }
60 |
61 | object NetworkId {
62 | def apply(networkId: String) =
63 | new StringEquals("network-id", networkId)
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/ethereum/Node.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum
2 |
3 | import io.iohk.scalanet.discovery.crypto.PublicKey
4 | import io.iohk.scalanet.discovery.hash.{Hash, Keccak256}
5 | import io.iohk.scalanet.peergroup.InetAddressOps._
6 | import java.net.InetAddress
7 | import scodec.bits.ByteVector
8 | import scala.util.Try
9 | import io.iohk.scalanet.peergroup.Addressable
10 |
11 | case class Node(id: Node.Id, address: Node.Address) {
12 | protected[discovery] lazy val kademliaId: Hash = Node.kademliaId(id)
13 | }
14 |
15 | object Node {
16 |
17 | /** 64 bit uncompressed Secp256k1 public key. */
18 | type Id = PublicKey
19 |
20 | /** The ID of the node is the 64 bit public key, but for the XOR distance we use its hash. */
21 | protected[discovery] def kademliaId(id: PublicKey): Hash =
22 | Keccak256(id)
23 |
24 | case class Address(
25 | ip: InetAddress,
26 | udpPort: Int,
27 | tcpPort: Int
28 | ) {
29 | protected[discovery] def checkRelay[A: Addressable](sender: A): Boolean =
30 | Address.checkRelay(sender = Addressable[A].getAddress(sender).getAddress, address = ip)
31 | }
32 | object Address {
33 | def fromEnr(enr: EthereumNodeRecord): Option[Node.Address] = {
34 | import EthereumNodeRecord.Keys
35 |
36 | def tryParse[T](key: ByteVector)(f: ByteVector => T): Option[T] =
37 | enr.content.attrs.get(key).flatMap { value =>
38 | Try(f(value)).toOption
39 | }
40 |
41 | def tryParseIP(key: ByteVector): Option[InetAddress] =
42 | tryParse[InetAddress](key)(bytes => InetAddress.getByAddress(bytes.toArray))
43 |
44 | def tryParsePort(key: ByteVector): Option[Int] =
45 | tryParse[Int](key)(bytes => bytes.toInt(signed = false))
46 |
47 | for {
48 | ip <- tryParseIP(Keys.ip6) orElse tryParseIP(Keys.ip)
49 | udp <- tryParsePort(Keys.udp6) orElse tryParsePort(Keys.udp)
50 | tcp <- tryParsePort(Keys.tcp6) orElse tryParsePort(Keys.tcp)
51 | } yield Node.Address(ip, udpPort = udp, tcpPort = tcp)
52 | }
53 |
54 | /** Check that an address relayed by the sender is valid:
55 | * - Special and unspecified addresses are invalid.
56 | * - LAN/loopback addresses are valid if the sender is also LAN/loopback.
57 | * - Other addresses are valid.
58 | */
59 | def checkRelay(sender: InetAddress, address: InetAddress): Boolean = {
60 | if (address.isSpecial || address.isUnspecified) false
61 | else if (address.isLoopbackAddress && !sender.isLoopbackAddress) false
62 | else if (address.isLAN && !sender.isLAN) false
63 | else true
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/ethereum/codecs/DefaultCodecs.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.codecs
2 |
3 | import io.iohk.scalanet.discovery.hash.Hash
4 | import io.iohk.scalanet.discovery.crypto.{PublicKey, Signature}
5 | import io.iohk.scalanet.discovery.ethereum.{Node, EthereumNodeRecord}
6 | import io.iohk.scalanet.discovery.ethereum.v4.Payload
7 | import io.iohk.scalanet.discovery.ethereum.v4.Payload._
8 | import scodec.Codec
9 | import scodec.codecs.{Discriminated, Discriminator, uint4}
10 | import scodec.codecs.implicits._
11 | import scodec.bits.{BitVector, ByteVector}
12 | import scala.collection.SortedMap
13 | import scala.math.Ordering.Implicits._
14 | import java.net.InetAddress
15 |
16 | object DefaultCodecs {
17 |
18 | implicit val publicKeyCodec: Codec[PublicKey] =
19 | implicitly[Codec[BitVector]].xmap(PublicKey(_), identity)
20 |
21 | implicit val signatureCodec: Codec[Signature] =
22 | implicitly[Codec[BitVector]].xmap(Signature(_), identity)
23 |
24 | implicit val hashCodec: Codec[Hash] =
25 | implicitly[Codec[BitVector]].xmap(Hash(_), identity)
26 |
27 | implicit val inetAddressCodec: Codec[InetAddress] =
28 | implicitly[Codec[BitVector]]
29 | .xmap(bits => InetAddress.getByAddress(bits.toByteArray), ip => BitVector(ip.getAddress))
30 |
31 | implicit val addressCodec: Codec[Node.Address] =
32 | Codec.deriveLabelledGeneric
33 |
34 | implicit val nodeCodec: Codec[Node] =
35 | Codec.deriveLabelledGeneric
36 |
37 | implicit def sortedMapCodec[K: Codec: Ordering, V: Codec] =
38 | implicitly[Codec[List[(K, V)]]].xmap(
39 | (kvs: List[(K, V)]) => SortedMap(kvs: _*),
40 | (sm: SortedMap[K, V]) => sm.toList
41 | )
42 |
43 | implicit val byteVectorOrdering: Ordering[ByteVector] =
44 | Ordering.by[ByteVector, Seq[Byte]](_.toSeq)
45 |
46 | implicit val attrCodec: Codec[SortedMap[ByteVector, ByteVector]] =
47 | sortedMapCodec[ByteVector, ByteVector]
48 |
49 | implicit val enrContentCodec: Codec[EthereumNodeRecord.Content] =
50 | Codec.deriveLabelledGeneric
51 |
52 | implicit val enrCodec: Codec[EthereumNodeRecord] =
53 | Codec.deriveLabelledGeneric
54 |
55 | implicit val pingCodec: Codec[Ping] =
56 | Codec.deriveLabelledGeneric
57 |
58 | implicit val pongCodec: Codec[Pong] =
59 | Codec.deriveLabelledGeneric
60 |
61 | implicit val findNodeCodec: Codec[FindNode] =
62 | Codec.deriveLabelledGeneric
63 |
64 | implicit val neigbhorsCodec: Codec[Neighbors] =
65 | Codec.deriveLabelledGeneric
66 |
67 | implicit val enrRequestCodec: Codec[ENRRequest] =
68 | Codec.deriveLabelledGeneric
69 |
70 | implicit val enrResponseCodec: Codec[ENRResponse] =
71 | Codec.deriveLabelledGeneric
72 |
73 | implicit val payloadDiscriminated =
74 | Discriminated[Payload, Int](uint4)
75 |
76 | implicit val pingDiscriminator =
77 | Discriminator[Payload, Ping, Int](1)
78 |
79 | implicit val pongDiscriminator =
80 | Discriminator[Payload, Pong, Int](2)
81 |
82 | implicit val findNodeDiscriminator =
83 | Discriminator[Payload, FindNode, Int](3)
84 |
85 | implicit val neighborsDiscriminator =
86 | Discriminator[Payload, Neighbors, Int](4)
87 |
88 | implicit val enrRequestDiscriminator =
89 | Discriminator[Payload, ENRRequest, Int](5)
90 |
91 | implicit val enrResponseDiscriminator =
92 | Discriminator[Payload, ENRResponse, Int](6)
93 |
94 | implicit val payloadCodec: Codec[Payload] =
95 | Codec.deriveCoproduct
96 | }
97 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/ethereum/v4/DiscoveryConfig.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4
2 |
3 | import io.iohk.scalanet.discovery.ethereum.Node
4 | import scala.concurrent.duration._
5 |
6 | case class DiscoveryConfig(
7 | // How long in the future to set message expiration.
8 | messageExpiration: FiniteDuration,
9 | // Allow incoming messages to be expired by this amount, accounting for the fact
10 | // the the senders clock might run late (or ours is early) and may have sent the
11 | // expiry to what already seems like the past.
12 | maxClockDrift: FiniteDuration,
13 | // Timeout for individual requests.
14 | requestTimeout: FiniteDuration,
15 | // Timeout for collecting multiple potential Neighbors responses.
16 | kademliaTimeout: FiniteDuration,
17 | // Max number of neighbours to expect.
18 | kademliaBucketSize: Int,
19 | // Concurrencly parameter 'alpha' for recursive Kademlia lookups.
20 | kademliaAlpha: Int,
21 | // Maximum time we consider a peer bonded without receiving a Pong response to a Ping.
22 | bondExpiration: FiniteDuration,
23 | // How often to look for new peers.
24 | discoveryPeriod: FiniteDuration,
25 | // Bootstrap nodes.
26 | knownPeers: Set[Node],
27 | // Limit the number of IPs from the same subnet, given by its prefix length, e.g. /24; 0 means no limit.
28 | subnetLimitPrefixLength: Int,
29 | // Limit the number of IPs from the same subnet in any given bucket; 0 means no limit.
30 | subnetLimitForBucket: Int,
31 | // Limit the number of IPs from the same subnet in the whole k-table; 0 means no limit.
32 | subnetLimitForTable: Int
33 | )
34 |
35 | object DiscoveryConfig {
36 | val default = DiscoveryConfig(
37 | messageExpiration = 60.seconds,
38 | maxClockDrift = Duration.Zero,
39 | requestTimeout = 3.seconds,
40 | kademliaTimeout = 7.seconds,
41 | kademliaBucketSize = 16,
42 | kademliaAlpha = 3,
43 | bondExpiration = 12.hours,
44 | discoveryPeriod = 15.minutes,
45 | knownPeers = Set.empty,
46 | subnetLimitPrefixLength = 24,
47 | subnetLimitForBucket = 2,
48 | subnetLimitForTable = 10
49 | )
50 | }
51 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/ethereum/v4/DiscoveryRPC.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4
2 |
3 | import io.iohk.scalanet.discovery.crypto.{PublicKey}
4 | import io.iohk.scalanet.discovery.ethereum.{Node, EthereumNodeRecord}
5 | import monix.eval.Task
6 |
7 | /** The RPC method comprising the Discovery protocol between peers. */
8 | trait DiscoveryRPC[A] {
9 | import DiscoveryRPC.{Call, Proc}
10 |
11 | /** Sends a Ping request to the node, waits for the correct Pong response,
12 | * and returns the ENR sequence, if the Pong had one.
13 | */
14 | def ping: Call[A, Proc.Ping]
15 |
16 | /** Sends a FindNode request to the node and collects Neighbours responses
17 | * until a timeout or if the maximum expected number of nodes are returned.
18 | */
19 | def findNode: Call[A, Proc.FindNode]
20 |
21 | /** Sends an ENRRequest to the node and waits for the correct ENRResponse,
22 | * returning the ENR from it.
23 | */
24 | def enrRequest: Call[A, Proc.ENRRequest]
25 | }
26 |
27 | object DiscoveryRPC {
28 | type ENRSeq = Long
29 |
30 | /** Pair up requests with responses in the RPC. */
31 | sealed trait Proc {
32 | type Req
33 | type Res
34 | }
35 | object Proc {
36 | trait Ping extends Proc {
37 | type Req = Option[ENRSeq]
38 | type Res = Option[ENRSeq]
39 | }
40 |
41 | trait FindNode extends Proc {
42 | type Req = PublicKey
43 | type Res = Seq[Node]
44 | }
45 |
46 | trait ENRRequest extends Proc {
47 | type Req = Unit
48 | type Res = EthereumNodeRecord
49 | }
50 | }
51 |
52 | /** Represents a request-response call to or from a remote peer.
53 | *
54 | * When remote, it returns None if the peer doesn't respond.
55 | * When local, returning None means ignoring the request.
56 | */
57 | type Call[A, P <: Proc] = A => P#Req => Task[Option[P#Res]]
58 | }
59 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/ethereum/v4/KBucketsWithSubnetLimits.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4
2 |
3 | import cats._
4 | import cats.implicits._
5 | import io.iohk.scalanet.discovery.hash.Hash
6 | import io.iohk.scalanet.discovery.ethereum.Node
7 | import io.iohk.scalanet.kademlia.{KBuckets, TimeSet}
8 | import io.iohk.scalanet.peergroup.Addressable
9 | import io.iohk.scalanet.peergroup.InetAddressOps._
10 | import java.net.InetAddress
11 |
12 | case class KBucketsWithSubnetLimits[A: Addressable](
13 | table: KBuckets[Hash],
14 | limits: KBucketsWithSubnetLimits.SubnetLimits,
15 | tableLevelCounts: KBucketsWithSubnetLimits.TableLevelCounts,
16 | bucketLevelCounts: KBucketsWithSubnetLimits.BucketLevelCounts
17 | ) {
18 | import DiscoveryNetwork.Peer
19 | import KBucketsWithSubnetLimits._
20 |
21 | def contains(peer: Peer[A]): Boolean =
22 | table.contains(peer.kademliaId)
23 |
24 | def touch(peer: Peer[A]): KBucketsWithSubnetLimits[A] =
25 | // Note that `KBuckets.touch` also adds, so if the the record
26 | // isn't in the table already then use `add` to maintain counts.
27 | if (contains(peer)) copy(table = table.touch(peer.kademliaId)) else add(peer)
28 |
29 | /** Add the peer to the underlying K-table unless doing so would violate some limit. */
30 | def add(peer: Peer[A]): KBucketsWithSubnetLimits[A] =
31 | if (contains(peer)) this
32 | else {
33 | val ip = subnet(peer)
34 | val idx = getBucket(peer)._1
35 |
36 | // Upsert the counts of the index and/or IP in the maps, so that we can check the limits on them.
37 | val tlc = incrementForTable(ip)
38 | val blc = incrementForBucket(idx, ip)
39 |
40 | val isOverAnyLimit =
41 | limits.isOverLimitForTable(tlc(ip)) ||
42 | limits.isOverLimitForBucket(blc(idx)(ip))
43 |
44 | if (isOverAnyLimit) this
45 | else {
46 | copy(
47 | table = table.add(peer.kademliaId),
48 | tableLevelCounts = tlc,
49 | bucketLevelCounts = blc
50 | )
51 | }
52 | }
53 |
54 | def remove(peer: Peer[A]): KBucketsWithSubnetLimits[A] =
55 | if (!contains(peer)) this
56 | else {
57 | val ip = subnet(peer)
58 | val idx = getBucket(peer)._1
59 |
60 | val tlc = decrementForTable(ip)
61 | val blc = decrementForBucket(idx, ip)
62 |
63 | copy(table = table.remove(peer.kademliaId), tableLevelCounts = tlc, bucketLevelCounts = blc)
64 | }
65 |
66 | def closestNodes(targetKademliaId: Hash, n: Int): List[Hash] =
67 | table.closestNodes(targetKademliaId, n)
68 |
69 | def getBucket(peer: Peer[A]): (Int, TimeSet[Hash]) =
70 | table.getBucket(peer.kademliaId)
71 |
72 | private def subnet(peer: Peer[A]): InetAddress =
73 | Addressable[A].getAddress(peer.address).getAddress.truncate(limits.prefixLength)
74 |
75 | /** Increase the table level count for the IP of a subnet. */
76 | private def incrementForTable(ip: InetAddress): TableLevelCounts =
77 | tableLevelCounts |+| Map(ip -> 1)
78 |
79 | /** Increase the bucket level count for the IP of a subnet. */
80 | private def incrementForBucket(idx: Int, ip: InetAddress): BucketLevelCounts =
81 | bucketLevelCounts |+| Map(idx -> Map(ip -> 1))
82 |
83 | /** Decrement the table level count for the IP of a subnet and remove the entry if it's zero. */
84 | private def decrementForTable(ip: InetAddress): TableLevelCounts =
85 | tableLevelCounts |+| Map(ip -> -1) match {
86 | case counts if counts(ip) <= 0 => counts - ip
87 | case counts => counts
88 | }
89 |
90 | /** Decrement the bucket level count for the IP of a subnet and remove the entry if it's zero
91 | * for the subnet itself, or the whole bucket.
92 | */
93 | private def decrementForBucket(idx: Int, ip: InetAddress): BucketLevelCounts =
94 | bucketLevelCounts |+| Map(idx -> Map(ip -> -1)) match {
95 | case counts if counts(idx)(ip) <= 0 && counts(idx).size > 1 =>
96 | // The subnet count in the bucket is zero, but there are other subnets in the bucket,
97 | // so keep the bucket level count and just remove the subnet from it.
98 | counts.updated(idx, counts(idx) - ip)
99 | case counts if counts(idx)(ip) <= 0 =>
100 | // The subnet count is zero, and it's the only subnet in the bucket, so remove the bucket.
101 | counts - idx
102 | case counts =>
103 | counts
104 | }
105 | }
106 |
107 | object KBucketsWithSubnetLimits {
108 | type SubnetCounts = Map[InetAddress, Int]
109 | type TableLevelCounts = SubnetCounts
110 | type BucketLevelCounts = Map[Int, SubnetCounts]
111 |
112 | case class SubnetLimits(
113 | // Number of leftmost bits of the IP address that counts as a subnet, serving as its ID.
114 | prefixLength: Int,
115 | // Limit of nodes from the same subnet within any given bucket in the K-table.
116 | forBucket: Int,
117 | // Limit of nodes from the same subnet across all buckets in the K-table.
118 | forTable: Int
119 | ) {
120 |
121 | /** All limits can be disabled by setting the subnet prefix length to 0. */
122 | def isEnabled: Boolean = prefixLength > 0
123 |
124 | def isEnabledForBucket: Boolean =
125 | isEnabled && forBucket > 0
126 |
127 | def isEnabledForTable: Boolean =
128 | isEnabled && forTable > 0
129 |
130 | def isOverLimitForBucket(count: Int): Boolean =
131 | isEnabledForBucket && count > forBucket
132 |
133 | def isOverLimitForTable(count: Int): Boolean =
134 | isEnabledForTable && count > forTable
135 | }
136 |
137 | object SubnetLimits {
138 | val Unlimited = SubnetLimits(0, 0, 0)
139 |
140 | def fromConfig(config: DiscoveryConfig): SubnetLimits =
141 | SubnetLimits(
142 | prefixLength = config.subnetLimitPrefixLength,
143 | forBucket = config.subnetLimitForBucket,
144 | forTable = config.subnetLimitForTable
145 | )
146 | }
147 |
148 | def apply[A: Addressable](
149 | node: Node,
150 | limits: SubnetLimits
151 | ): KBucketsWithSubnetLimits[A] = {
152 | KBucketsWithSubnetLimits[A](
153 | new KBuckets[Hash](node.kademliaId, clock = java.time.Clock.systemUTC()),
154 | limits,
155 | tableLevelCounts = Map.empty[InetAddress, Int],
156 | bucketLevelCounts = Map.empty[Int, Map[InetAddress, Int]]
157 | )
158 | }
159 | }
160 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/ethereum/v4/Packet.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4
2 |
3 | import cats.Show
4 | import io.iohk.scalanet.discovery.hash.{Hash, Keccak256}
5 | import io.iohk.scalanet.discovery.crypto.{SigAlg, PrivateKey, PublicKey, Signature}
6 | import scodec.bits.BitVector
7 | import scodec.{Codec, Attempt, Decoder, Err, Encoder}
8 | import scodec.DecodeResult
9 |
10 | /** Wire format from https://github.com/ethereum/devp2p/blob/master/discv4.md
11 | *
12 | * The packet type is included in the data.
13 | * */
14 | case class Packet(
15 | hash: Hash,
16 | signature: Signature,
17 | data: BitVector
18 | )
19 |
20 | object Packet {
21 | val MacBitsSize = 32 * 8 // Keccak256
22 | val SigBitsSize = 65 * 8 // Secp256k1
23 | val MaxPacketBitsSize = 1280 * 8
24 |
25 | private def consumeNBits(context: String, size: Int) =
26 | Decoder[BitVector] { (bits: BitVector) =>
27 | bits.consumeThen(size)(
28 | err => Attempt.failure(Err.InsufficientBits(size, bits.size, List(context))),
29 | (range, remainder) => Attempt.successful(DecodeResult(range, remainder))
30 | )
31 | }
32 |
33 | private val consumeRemainingBits =
34 | Decoder[BitVector] { (bits: BitVector) =>
35 | Attempt.successful(DecodeResult(bits, BitVector.empty))
36 | }
37 |
38 | private def packetDecoder(allowDecodeOverMaxPacketSize: Boolean): Decoder[Packet] =
39 | for {
40 | _ <- Decoder { bits =>
41 | Attempt
42 | .guard(
43 | allowDecodeOverMaxPacketSize || bits.size <= MaxPacketBitsSize,
44 | "Packet to decode exceeds maximum size."
45 | )
46 | .map(_ => DecodeResult((), bits))
47 | }
48 | hash <- consumeNBits("Hash", MacBitsSize).map(Hash(_))
49 | signature <- consumeNBits("Signature", SigBitsSize).map(Signature(_))
50 | data <- consumeRemainingBits
51 | } yield Packet(hash, signature, data)
52 |
53 | private val packetEncoder: Encoder[Packet] =
54 | Encoder[Packet] { (packet: Packet) =>
55 | for {
56 | _ <- Attempt.guard(packet.hash.size == MacBitsSize, "Unexpected hash size.")
57 | _ <- Attempt.guard(packet.signature.size == SigBitsSize, "Unexpected signature size.")
58 | bits <- Attempt.successful {
59 | packet.hash ++ packet.signature ++ packet.data
60 | }
61 | _ <- Attempt.guard(bits.size <= MaxPacketBitsSize, "Encoded packet exceeded maximum size.")
62 | } yield bits
63 | }
64 |
65 | /** Create a codec for packets. Some Ethereum clients don't respect the size limits;
66 | * for compatibility with them the check during decode can be turned off.
67 | */
68 | def packetCodec(allowDecodeOverMaxPacketSize: Boolean): Codec[Packet] =
69 | Codec[Packet](packetEncoder, packetDecoder(allowDecodeOverMaxPacketSize))
70 |
71 | /** Serialize the payload, sign the data and compute the hash. */
72 | def pack(
73 | payload: Payload,
74 | privateKey: PrivateKey
75 | )(implicit codec: Codec[Payload], sigalg: SigAlg): Attempt[Packet] =
76 | for {
77 | data <- codec.encode(payload)
78 | signature = sigalg.sign(privateKey, data)
79 | hash = Keccak256(signature ++ data)
80 | } yield Packet(hash, signature, data)
81 |
82 | /** Validate the hash, recover the public key by validating the signature, and deserialize the payload. */
83 | def unpack(packet: Packet)(implicit codec: Codec[Payload], sigalg: SigAlg): Attempt[(Payload, PublicKey)] =
84 | for {
85 | hash <- Attempt.successful(Keccak256(packet.signature ++ packet.data))
86 | _ <- Attempt.guard(hash == packet.hash, "Invalid hash.")
87 | publicKey <- sigalg.recoverPublicKey(packet.signature, packet.data)
88 | payload <- codec.decodeValue(packet.data)
89 | } yield (payload, publicKey)
90 |
91 | implicit val show: Show[Packet] = Show.show[Packet] { p =>
92 | s"""Packet(hash = hex"${p.hash.toHex}", signature = hex"${p.signature.toHex}", data = hex"${p.data.toHex}")"""
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/ethereum/v4/Payload.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4
2 |
3 | import io.iohk.scalanet.discovery.hash.Hash
4 | import io.iohk.scalanet.discovery.crypto.PublicKey
5 | import io.iohk.scalanet.discovery.ethereum.{Node, EthereumNodeRecord}
6 |
7 | /** Discovery protocol messages from https://github.com/ethereum/devp2p/blob/master/discv4.md
8 | *
9 | * Note that these case classes dont' contain the packet-type, e.g. 0x01 for Ping,
10 | * because in our case that has to be handled by the Codec, so if it's RLP then
11 | * it has to correctly prepend the discriminant byte so that it can later deserialize
12 | * the data as well. Incidentally this works fine with the signing.
13 | */
14 | sealed trait Payload
15 |
16 | object Payload {
17 | sealed trait Request extends Payload
18 | sealed trait Response extends Payload
19 |
20 | trait HasExpiration[T <: Payload] {
21 | // Absolute UNIX timestamp: seconds since epoch.
22 | def expiration: Long
23 | def withExpiration(at: Long): T
24 | }
25 |
26 | case class Ping(
27 | // Must be 4.
28 | version: Int,
29 | from: Node.Address,
30 | to: Node.Address,
31 | expiration: Long,
32 | // Current ENR sequence number of the sender.
33 | enrSeq: Option[Long]
34 | ) extends Request
35 | with HasExpiration[Ping] {
36 | def withExpiration(e: Long) = copy(expiration = e)
37 | }
38 |
39 | case class Pong(
40 | // Copy of `to` from the corresponding ping packet.
41 | to: Node.Address,
42 | // Hash of the corresponding ping packet.
43 | pingHash: Hash,
44 | expiration: Long,
45 | // Current ENR of the sender of Pong.
46 | enrSeq: Option[Long]
47 | ) extends Response
48 | with HasExpiration[Pong] {
49 | def withExpiration(e: Long) = copy(expiration = e)
50 | }
51 |
52 | case class FindNode(
53 | // 65-byte secp256k1 public key
54 | target: PublicKey,
55 | expiration: Long
56 | ) extends Request
57 | with HasExpiration[FindNode] {
58 | def withExpiration(e: Long) = copy(expiration = e)
59 | }
60 |
61 | case class Neighbors(
62 | nodes: List[Node],
63 | expiration: Long
64 | ) extends Response
65 | with HasExpiration[Neighbors] {
66 | def withExpiration(e: Long) = copy(expiration = e)
67 | }
68 |
69 | case class ENRRequest(
70 | expiration: Long
71 | ) extends Request
72 | with HasExpiration[ENRRequest] {
73 | def withExpiration(e: Long) = copy(expiration = e)
74 | }
75 |
76 | case class ENRResponse(
77 | requestHash: Hash,
78 | enr: EthereumNodeRecord
79 | ) extends Response
80 | }
81 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/hash/Keccak256.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.hash
2 |
3 | import org.bouncycastle.crypto.digests.KeccakDigest
4 | import scodec.bits.BitVector
5 |
6 | object Keccak256 {
7 | def apply(data: BitVector): Hash = {
8 | val input = data.toByteArray
9 | val output = new Array[Byte](32)
10 | val digest = new KeccakDigest(256)
11 | digest.update(input, 0, input.length)
12 | digest.doFinal(output, 0)
13 | Hash(BitVector(output))
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/discovery/hash/package.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery
2 |
3 | import scodec.bits.BitVector
4 |
5 | package object hash {
6 |
7 | sealed trait HashTag
8 |
9 | object Hash extends Tagger[BitVector, HashTag]
10 | type Hash = Hash.Tagged
11 | }
12 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/kademlia/KMessage.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import java.util.UUID
4 |
5 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
6 | import scodec.bits.BitVector
7 |
8 | sealed trait KMessage[A] {
9 | def requestId: UUID
10 | def nodeRecord: NodeRecord[A]
11 | }
12 |
13 | object KMessage {
14 |
15 | sealed trait KRequest[A] extends KMessage[A]
16 |
17 | object KRequest {
18 | case class FindNodes[A](requestId: UUID, nodeRecord: NodeRecord[A], targetNodeId: BitVector) extends KRequest[A]
19 |
20 | case class Ping[A](requestId: UUID, nodeRecord: NodeRecord[A]) extends KRequest[A]
21 | }
22 |
23 | sealed trait KResponse[A] extends KMessage[A]
24 |
25 | object KResponse {
26 | case class Nodes[A](requestId: UUID, nodeRecord: NodeRecord[A], nodes: Seq[NodeRecord[A]]) extends KResponse[A]
27 |
28 | case class Pong[A](requestId: UUID, nodeRecord: NodeRecord[A]) extends KResponse[A]
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/kademlia/KNetwork.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import io.iohk.scalanet.kademlia.KMessage.{KRequest, KResponse}
4 | import io.iohk.scalanet.kademlia.KMessage.KRequest.{FindNodes, Ping}
5 | import io.iohk.scalanet.kademlia.KMessage.KResponse.{Nodes, Pong}
6 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
7 | import io.iohk.scalanet.peergroup.implicits._
8 | import io.iohk.scalanet.peergroup.{Channel, PeerGroup}
9 | import io.iohk.scalanet.peergroup.Channel.MessageReceived
10 | import monix.eval.Task
11 | import monix.reactive.Observable
12 | import scala.util.control.NonFatal
13 |
14 | trait KNetwork[A] {
15 |
16 | /**
17 | * Server side requests stream.
18 | * @return An Observable for receiving FIND_NODES and PING requests.
19 | * Each element contains a tuple consisting of a request
20 | * with a function for accepting the required response.
21 | * With current conventions, it is mandatory to provide
22 | * Some(response) or None for all request types, in order that the
23 | * implementation can close the channel.
24 | */
25 | def kRequests: Observable[(KRequest[A], Option[KResponse[A]] => Task[Unit])]
26 |
27 | /**
28 | * Send a FIND_NODES message to another peer.
29 | * @param to the peer to send the message to
30 | * @param request the FIND_NODES request
31 | * @return the future response
32 | */
33 | def findNodes(to: NodeRecord[A], request: FindNodes[A]): Task[Nodes[A]]
34 |
35 | /**
36 | * Send a PING message to another peer.
37 | * @param to the peer to send the message to
38 | * @param request the PING request
39 | * @return the future response
40 | */
41 | def ping(to: NodeRecord[A], request: Ping[A]): Task[Pong[A]]
42 | }
43 |
44 | object KNetwork {
45 |
46 | import scala.concurrent.duration._
47 |
48 | class KNetworkScalanetImpl[A](
49 | peerGroup: PeerGroup[A, KMessage[A]],
50 | requestTimeout: FiniteDuration = 3 seconds
51 | ) extends KNetwork[A] {
52 |
53 | override lazy val kRequests: Observable[(KRequest[A], Option[KResponse[A]] => Task[Unit])] = {
54 | peerGroup.serverEventObservable.collectChannelCreated
55 | .mergeMap {
56 | case (channel: Channel[A, KMessage[A]], release: Task[Unit]) =>
57 | // NOTE: We cannot use mapEval with a Task here, because that would hold up
58 | // the handling of further incoming requests. For example if instead of a
59 | // request we got an incoming "response" type message that the collect
60 | // discards, `headL` would eventually time out but while we wait for
61 | // that the next incoming channel would not be picked up.
62 | Observable.fromTask {
63 | channel.channelEventObservable
64 | .collect { case MessageReceived(req: KRequest[A]) => req }
65 | .headL
66 | .timeout(requestTimeout)
67 | .map { request =>
68 | Some {
69 | request -> { (maybeResponse: Option[KResponse[A]]) =>
70 | maybeResponse
71 | .fold(Task.unit) { response =>
72 | channel.sendMessage(response).timeout(requestTimeout)
73 | }
74 | .guarantee(release)
75 | }
76 | }
77 | }
78 | .onErrorHandleWith {
79 | case NonFatal(_) =>
80 | // Most likely it wasn't a request that initiated the channel.
81 | release.as(None)
82 | }
83 | }
84 | }
85 | .collect { case Some(pair) => pair }
86 | }
87 |
88 | override def findNodes(to: NodeRecord[A], request: FindNodes[A]): Task[Nodes[A]] = {
89 | requestTemplate(to, request, { case n @ Nodes(_, _, _) => n })
90 | }
91 |
92 | override def ping(to: NodeRecord[A], request: Ping[A]): Task[Pong[A]] = {
93 | requestTemplate(to, request, { case p @ Pong(_, _) => p })
94 | }
95 |
96 | private def requestTemplate[Request <: KRequest[A], Response <: KResponse[A]](
97 | to: NodeRecord[A],
98 | message: Request,
99 | pf: PartialFunction[KMessage[A], Response]
100 | ): Task[Response] = {
101 | peerGroup
102 | .client(to.routingAddress)
103 | .use { clientChannel =>
104 | sendRequest(message, clientChannel, pf)
105 | }
106 | }
107 |
108 | private def sendRequest[Request <: KRequest[A], Response <: KResponse[A]](
109 | message: Request,
110 | clientChannel: Channel[A, KMessage[A]],
111 | pf: PartialFunction[KMessage[A], Response]
112 | ): Task[Response] = {
113 | for {
114 | _ <- clientChannel.sendMessage(message).timeout(requestTimeout)
115 | // This assumes that `requestTemplate` always opens a new channel.
116 | response <- clientChannel.channelEventObservable
117 | .collect {
118 | case MessageReceived(m) if pf.isDefinedAt(m) => pf(m)
119 | }
120 | .headL
121 | .timeout(requestTimeout)
122 | } yield response
123 | }
124 | }
125 | }
126 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/kademlia/TimeSet.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import java.time.Clock
4 | import java.time.Clock.systemUTC
5 |
6 | import scala.collection.{Set, AbstractSet}
7 | import scala.collection.immutable.{HashMap, ListSet}
8 |
9 | class TimeSet[T] private (val clock: Clock, val timestamps: HashMap[T, Long], val underlying: ListSet[T])
10 | extends AbstractSet[T] {
11 |
12 | private def this(clock: Clock) = this(clock, HashMap[T, Long](), ListSet[T]())
13 |
14 | private def this() = this(systemUTC())
15 |
16 | override def toString(): String =
17 | underlying.map(elem => s"($elem, ${timestamps(elem)})").mkString(", ")
18 |
19 | override def contains(elem: T): Boolean =
20 | underlying.contains(elem)
21 |
22 | override def +(elem: T): TimeSet[T] =
23 | addAll(elem)
24 |
25 | override def -(elem: T): TimeSet[T] =
26 | remove(elem)
27 |
28 | override def iterator: Iterator[T] =
29 | underlying.iterator
30 |
31 | def diff(that: Set[T]): Set[T] =
32 | underlying &~ that
33 |
34 | def touch(elem: T): TimeSet[T] =
35 | this + elem
36 |
37 | private def remove(elem: T): TimeSet[T] = {
38 | new TimeSet[T](clock, timestamps - elem, underlying - elem)
39 | }
40 |
41 | private def addAll(elems: T*): TimeSet[T] = {
42 | val t = clock.millis()
43 | elems.foldLeft(this) { (acc, next) =>
44 | new TimeSet[T](clock, acc.timestamps + (next -> t), (acc.underlying - next) + next)
45 | }
46 | }
47 | }
48 |
49 | object TimeSet {
50 |
51 | private val emptyInstance = new TimeSet[Any]()
52 |
53 | def empty[T]: TimeSet[T] = emptyInstance.asInstanceOf[TimeSet[T]]
54 |
55 | def apply[T](elems: T*): TimeSet[T] = {
56 | new TimeSet[T]().addAll(elems: _*)
57 | }
58 |
59 | def apply[T](clock: Clock, elems: T*): TimeSet[T] = {
60 | new TimeSet[T](clock).addAll(elems: _*)
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/kademlia/Xor.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import scodec.bits.BitVector
4 |
5 | object Xor {
6 |
7 | def d(a: BitVector, b: BitVector): BigInt = {
8 | assert(a.length == b.length)
9 | BigInt(1, alignRight(a xor b).toByteArray)
10 | }
11 |
12 | private def alignRight(b: BitVector): BitVector = {
13 | BitVector.low(roundUp(b.length) - b.length) ++ b
14 | }
15 |
16 | private def roundUp(i: Long): Long = {
17 | i + (8 - i % 8) % 8
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/kademlia/XorOrdering.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import cats.Order
4 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
5 | import scodec.bits.BitVector
6 |
7 | class XorOrdering(val base: BitVector) extends Ordering[BitVector] {
8 |
9 | override def compare(lhs: BitVector, rhs: BitVector): Int = {
10 | if (lhs.length != base.length || rhs.length != base.length)
11 | throw new IllegalArgumentException(
12 | s"Unmatched bit lengths for bit vectors in XorOrdering. (base, lhs, rhs) = ($base, $lhs, $rhs)"
13 | )
14 | val lb = Xor.d(lhs, base)
15 | val rb = Xor.d(rhs, base)
16 | if (lb < rb)
17 | -1
18 | else if (lb > rb)
19 | 1
20 | else
21 | 0
22 | }
23 | }
24 |
25 | object XorOrdering {
26 |
27 | /** Create an ordering that uses the XOR distance as well as a unique
28 | * secondary index (based on the object hash) so values at the same
29 | * distance can still be distinguished from each other. This is required
30 | * for a SortedSet to work correctly, otherwise it just keeps one of the
31 | * values at any given distance.
32 | *
33 | * In practice it shouldn't matter since all keys are unique, therefore
34 | * they all have a different distance, but in pathological tests it's not
35 | * intuitive that sets of different nodes with the same ID but different
36 | * attributes disappear from the set.
37 | *
38 | * It could also be an attack vector if a malicious node deliberately
39 | * fabricates nodes that look like the target but with different ports
40 | * for example, so the SortedSet would keep a random instance.
41 | *
42 | * The method has a `B <: BitVector` generic parameter so the compiler
43 | * warns us if we're trying to compare different tagged types.
44 | */
45 | def apply[A, B <: BitVector](f: A => B)(base: B): Ordering[A] = {
46 | val xorOrdering = new XorOrdering(base)
47 | val tupleOrdering = Ordering.Tuple2(xorOrdering, Ordering.Int)
48 | Ordering.by[A, (BitVector, Int)] { x =>
49 | // Using hashCode to make them unique form each other within the same distance.
50 | f(x) -> x.hashCode
51 | }(tupleOrdering)
52 | }
53 | }
54 |
55 | object XorNodeOrdering {
56 | def apply[A](base: BitVector): Ordering[NodeRecord[A]] =
57 | XorOrdering[NodeRecord[A], BitVector](_.id)(base)
58 | }
59 |
60 | class XorNodeOrder[A](val base: BitVector) extends Order[NodeRecord[A]] {
61 | val xorNodeOrdering = XorNodeOrdering[A](base)
62 |
63 | override def compare(lhs: NodeRecord[A], rhs: NodeRecord[A]): Int =
64 | xorNodeOrdering.compare(lhs, rhs)
65 | }
66 |
--------------------------------------------------------------------------------
/scalanet/discovery/src/io/iohk/scalanet/kademlia/codec/DefaultCodecs.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia.codec
2 |
3 | import io.iohk.scalanet.kademlia.KMessage
4 | import io.iohk.scalanet.kademlia.KMessage.KRequest.{FindNodes, Ping}
5 | import io.iohk.scalanet.kademlia.KMessage.KResponse.{Nodes, Pong}
6 | import scodec.codecs.{Discriminated, Discriminator, uint4}
7 | import scodec.Codec
8 |
9 | /** Encodings for scodec. */
10 | object DefaultCodecs extends DefaultCodecDerivations {
11 | implicit def kMessageCodec[A: Codec]: Codec[KMessage[A]] =
12 | deriveKMessageCodec[A]
13 | }
14 |
15 | trait DefaultCodecDerivations {
16 | implicit def kMessageDiscriminator[A]: Discriminated[KMessage[A], Int] =
17 | Discriminated[KMessage[A], Int](uint4)
18 |
19 | implicit def findNodesDiscriminator[A]: Discriminator[KMessage[A], FindNodes[A], Int] =
20 | Discriminator[KMessage[A], FindNodes[A], Int](0)
21 |
22 | implicit def pingDiscriminator[A]: Discriminator[KMessage[A], Ping[A], Int] =
23 | Discriminator[KMessage[A], Ping[A], Int](1)
24 |
25 | implicit def nodesDiscriminator[A]: Discriminator[KMessage[A], Nodes[A], Int] =
26 | Discriminator[KMessage[A], Nodes[A], Int](2)
27 |
28 | implicit def pongDiscriminator[A]: Discriminator[KMessage[A], Pong[A], Int] =
29 | Discriminator[KMessage[A], Pong[A], Int](3)
30 |
31 | protected def deriveKMessageCodec[A: Codec]: Codec[KMessage[A]] = {
32 | import io.iohk.scalanet.codec.DefaultCodecs.seqCoded
33 | import scodec.codecs.implicits._
34 | implicitly[Codec[KMessage[A]]]
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %t %0logger %-5level %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/discovery/crypto/SigAlgSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.crypto
2 |
3 | import org.scalatest._
4 |
5 | class SigAlgSpec extends FlatSpec with Matchers {
6 | // Use test vectors from https://wizardforcel.gitbooks.io/practical-cryptography-for-developers-book/content/digital-signatures/ecdsa-sign-verify-examples.html
7 | // Implement recovery based on https://github.com/ConsenSysMesh/cava/blob/master/crypto/src/main/java/net/consensys/cava/crypto/SECP256K1.java
8 | }
9 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/discovery/ethereum/EthereumNodeRecordSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum
2 |
3 | import io.iohk.scalanet.discovery.ethereum.codecs.DefaultCodecs
4 | import io.iohk.scalanet.discovery.ethereum.v4.mocks.MockSigAlg
5 | import java.net.InetAddress
6 | import org.scalatest._
7 |
8 | class EthereumNodeRecordSpec extends FlatSpec with Matchers {
9 | import DefaultCodecs._
10 | import EthereumNodeRecord.Keys
11 |
12 | implicit val sigalg = new MockSigAlg()
13 | val (publicKey, privateKey) = sigalg.newKeyPair
14 |
15 | behavior of "fromNode"
16 |
17 | it should "use the right keys for IPv6 addresses" in {
18 | val addr = InetAddress.getByName("2001:0db8:85a3:0000:0000:8a2e:0370:7334")
19 | val node = Node(publicKey, Node.Address(addr, 30000, 40000))
20 |
21 | val enr = EthereumNodeRecord.fromNode(node, privateKey, seq = 1).require
22 | Inspectors.forAll(List(Keys.ip6, Keys.tcp6, Keys.udp6)) { k =>
23 | enr.content.attrs should contain key (k)
24 | }
25 | Inspectors.forAll(List(Keys.ip, Keys.tcp, Keys.udp)) { k =>
26 | enr.content.attrs should not contain key(k)
27 | }
28 |
29 | val nodeAddress = Node.Address.fromEnr(enr)
30 | nodeAddress shouldBe Some(node.address)
31 | }
32 |
33 | it should "use the right keys for IPv4 addresses" in {
34 | val addr = InetAddress.getByName("127.0.0.1")
35 | val node = Node(publicKey, Node.Address(addr, 31000, 42000))
36 |
37 | val enr = EthereumNodeRecord.fromNode(node, privateKey, seq = 2).require
38 | Inspectors.forAll(List(Keys.ip6, Keys.tcp6, Keys.udp6)) { k =>
39 | enr.content.attrs should not contain key(k)
40 | }
41 | Inspectors.forAll(List(Keys.ip, Keys.tcp, Keys.udp)) { k =>
42 | enr.content.attrs should contain key (k)
43 | }
44 |
45 | val nodeAddress = Node.Address.fromEnr(enr)
46 | nodeAddress shouldBe Some(node.address)
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/discovery/ethereum/NodeSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum
2 |
3 | import org.scalatest._
4 | import java.net.InetAddress
5 | import org.scalatest.prop.TableDrivenPropertyChecks
6 | import io.iohk.scalanet.peergroup.InetMultiAddress
7 | import java.net.InetSocketAddress
8 |
9 | class NodeSpec extends FlatSpec with Matchers with TableDrivenPropertyChecks {
10 |
11 | behavior of "Node.Address.checkRelay"
12 |
13 | val cases = Table(
14 | ("sender", "relayed", "isValid"),
15 | ("localhost", "localhost", true),
16 | ("127.0.0.1", "192.168.1.2", true),
17 | ("127.0.0.1", "140.82.121.4", true),
18 | ("140.82.121.4", "192.168.1.2", false),
19 | ("140.82.121.4", "52.206.42.104", true),
20 | ("140.82.121.4", "0.0.0.0", false),
21 | ("140.82.121.4", "255.255.255.255", false),
22 | ("127.0.0.1", "0.0.0.0", false),
23 | ("127.0.0.1", "192.175.48.127", false)
24 | )
25 |
26 | it should "correctly calculate the flag for sender-address pairs" in {
27 | forAll(cases) {
28 | case (sender, relayed, isValid) =>
29 | withClue(s"$relayed from $sender") {
30 | val senderIP = InetAddress.getByName(sender)
31 | val relayedIP = InetAddress.getByName(relayed)
32 |
33 | Node.Address.checkRelay(sender = senderIP, address = relayedIP) shouldBe isValid
34 | }
35 | }
36 | }
37 |
38 | it should "work on the address instance" in {
39 | forAll(cases) {
40 | case (sender, relayed, isValid) =>
41 | withClue(s"$relayed from $sender") {
42 | val nodeAddress = Node.Address(InetAddress.getByName(relayed), 30000, 40000)
43 | val senderMulti = InetMultiAddress(new InetSocketAddress(InetAddress.getByName(sender), 50000))
44 |
45 | nodeAddress.checkRelay(senderMulti) shouldBe isValid
46 | }
47 | }
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/discovery/ethereum/v4/KBucketsWithSubnetLimitsSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4
2 |
3 | import org.scalatest._
4 | import java.net.InetSocketAddress
5 | import io.iohk.scalanet.discovery.ethereum.v4.KBucketsWithSubnetLimits.SubnetLimits
6 | import io.iohk.scalanet.discovery.ethereum.Node
7 | import io.iohk.scalanet.discovery.hash.Keccak256
8 | import java.net.InetAddress
9 | import io.iohk.scalanet.discovery.ethereum.v4.DiscoveryNetwork.Peer
10 | import scodec.bits.BitVector
11 | import io.iohk.scalanet.discovery.crypto.PublicKey
12 |
13 | class KBucketsWithSubnetLimitsSpec extends FlatSpec with Matchers with Inspectors {
14 |
15 | // For the tests I only care about the IP addresses; a 1-to-1 mapping is convenient.
16 | def fakeNodeId(address: InetAddress): Node.Id =
17 | PublicKey(Keccak256(BitVector(address.getAddress)))
18 |
19 | def makeNode(address: InetSocketAddress) =
20 | Node(fakeNodeId(address.getAddress), Node.Address(address.getAddress, address.getPort, address.getPort))
21 |
22 | def makePeer(address: InetAddress, port: Int = 30303) =
23 | Peer[InetSocketAddress](id = fakeNodeId(address), address = new InetSocketAddress(address, port))
24 |
25 | def makeIp(name: String) = InetAddress.getByName(name)
26 |
27 | val localNode = makeNode(new InetSocketAddress("127.0.0.1", 30303))
28 | val defaultLimits = SubnetLimits(prefixLength = 24, forBucket = 2, forTable = 10)
29 |
30 | trait Fixture {
31 | lazy val limits = defaultLimits
32 | lazy val ips: Vector[String] = Vector.empty
33 | lazy val peers = ips.map(ip => makePeer(makeIp(ip)))
34 | lazy val kBuckets = peers.foldLeft(KBucketsWithSubnetLimits(localNode, limits = limits))(_.add(_))
35 | }
36 |
37 | behavior of "KBucketsWithSubnetLimits"
38 |
39 | it should "increment the count of the subnet after add" in new Fixture {
40 | override lazy val ips = Vector("5.67.8.9", "5.67.8.10", "5.67.1.2")
41 | val subnet = makeIp("5.67.8.0")
42 | val idx = kBuckets.getBucket(peers.head)._1
43 | kBuckets.tableLevelCounts(subnet) shouldBe 2
44 | kBuckets.tableLevelCounts.values should contain theSameElementsAs List(2, 1)
45 | kBuckets.bucketLevelCounts(idx)(subnet) shouldBe >=(1)
46 | }
47 |
48 | it should "not increment the count if the peer is already in the table" in new Fixture {
49 | override lazy val ips = Vector("5.67.8.9", "5.67.8.9", "5.67.8.9")
50 | val subnet = makeIp("5.67.8.0")
51 | val idx = kBuckets.getBucket(peers.head)._1
52 | kBuckets.tableLevelCounts(subnet) shouldBe 1
53 | kBuckets.bucketLevelCounts(idx)(subnet) shouldBe 1
54 | }
55 |
56 | it should "decrement the count after removal" in new Fixture {
57 | override lazy val ips = Vector("5.67.8.9", "5.67.8.10")
58 |
59 | val removed0 = kBuckets.remove(peers(0))
60 | removed0.tableLevelCounts.values.toList shouldBe List(1)
61 | removed0.bucketLevelCounts.values.toList shouldBe List(Map(makeIp("5.67.8.0") -> 1))
62 |
63 | val removed1 = removed0.remove(peers(1))
64 | removed1.tableLevelCounts shouldBe empty
65 | removed1.bucketLevelCounts shouldBe empty
66 | }
67 |
68 | it should "not decrement if the peer is not in the table" in new Fixture {
69 | override lazy val ips = Vector("1.2.3.4")
70 | val removed = kBuckets.remove(makePeer(makeIp("1.2.3.5")))
71 | kBuckets.tableLevelCounts should not be empty
72 | kBuckets.bucketLevelCounts should not be empty
73 | }
74 |
75 | it should "not add IP if it violates the limits" in new Fixture {
76 | override lazy val ips = Vector.range(0, defaultLimits.forTable + 1).map(i => s"192.168.1.$i")
77 |
78 | forAll(peers.take(defaultLimits.forBucket)) { peer =>
79 | kBuckets.contains(peer) shouldBe true
80 | }
81 |
82 | forAtLeast(1, peers) { peer =>
83 | kBuckets.contains(peer) shouldBe false
84 | }
85 |
86 | forAll(peers) { peer =>
87 | val (_, bucket) = kBuckets.getBucket(peer)
88 | bucket.size shouldBe <=(defaultLimits.forBucket)
89 | }
90 | }
91 |
92 | it should "treat limits separately per subnet" in new Fixture {
93 | override lazy val ips = Vector.range(0, 256).map { i =>
94 | s"192.168.1.$i"
95 | } :+ "192.168.2.1"
96 |
97 | kBuckets.contains(peers.last) shouldBe true
98 | }
99 |
100 | it should "add peers after removing previous ones" in new Fixture {
101 | override lazy val ips = Vector.range(0, 255).map(i => s"192.168.1.$i")
102 |
103 | kBuckets.tableLevelCounts.values.toList shouldBe List(defaultLimits.forTable)
104 |
105 | val peer = makePeer(makeIp("192.168.1.255"))
106 | kBuckets.add(peer).contains(peer) shouldBe false
107 | kBuckets.remove(peer).add(peer).contains(peer) shouldBe false
108 | kBuckets.remove(peers.head).add(peer).contains(peer) shouldBe true
109 | }
110 |
111 | it should "not use limits if the prefix is 0" in new Fixture {
112 | override lazy val limits = defaultLimits.copy(prefixLength = 0)
113 | override lazy val ips = Vector.range(0, 256).map(i => s"192.168.1.$i")
114 |
115 | kBuckets.tableLevelCounts.values.toList shouldBe List(256)
116 | }
117 |
118 | it should "not use limits if the table level limit is 0, but still apply the bucket limit" in new Fixture {
119 | override lazy val limits = defaultLimits.copy(forTable = 0)
120 | override lazy val ips = Vector.range(0, 256).map(i => s"192.168.1.$i")
121 |
122 | kBuckets.tableLevelCounts.values.toList.head shouldBe >(defaultLimits.forTable)
123 | forAll(peers) { peer =>
124 | val (i, _) = kBuckets.getBucket(peer)
125 | kBuckets.bucketLevelCounts(i).values.head shouldBe <=(defaultLimits.forBucket)
126 | }
127 | }
128 |
129 | it should "not limit buckets if the bucket level limit is 0" in new Fixture {
130 | override lazy val limits = defaultLimits.copy(forBucket = 0)
131 | override lazy val ips = Vector.range(0, 256).map(i => s"192.168.1.$i")
132 |
133 | kBuckets.tableLevelCounts.values.toList shouldBe List(limits.forTable)
134 | forAtLeast(1, peers) { peer =>
135 | val (i, _) = kBuckets.getBucket(peer)
136 | kBuckets.bucketLevelCounts(i).values.head shouldBe >(defaultLimits.forBucket)
137 | }
138 | }
139 | }
140 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/discovery/ethereum/v4/PacketSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4
2 |
3 | import io.iohk.scalanet.discovery.crypto.{Signature, PrivateKey, PublicKey}
4 | import io.iohk.scalanet.discovery.hash.{Hash, Keccak256}
5 | import io.iohk.scalanet.discovery.ethereum.codecs.DefaultCodecs
6 | import io.iohk.scalanet.discovery.ethereum.v4.mocks.MockSigAlg
7 | import org.scalatest._
8 | import scodec.{Attempt, Codec, Err}
9 | import scodec.bits.BitVector
10 | import scala.util.Random
11 |
12 | class PacketSpec extends FlatSpec with Matchers {
13 |
14 | import DefaultCodecs._
15 | implicit val sigalg = new MockSigAlg()
16 |
17 | implicit val packetCodec = Packet.packetCodec(allowDecodeOverMaxPacketSize = false)
18 |
19 | val MaxPacketBytesSize = Packet.MaxPacketBitsSize / 8
20 | val MacBytesSize = Packet.MacBitsSize / 8
21 | val SigBytesSize = Packet.SigBitsSize / 8
22 | val MaxDataBytesSize = MaxPacketBytesSize - MacBytesSize - SigBytesSize
23 |
24 | def nBytesAsBits(n: Int): BitVector = {
25 | val bytes = Array.ofDim[Byte](n)
26 | Random.nextBytes(bytes)
27 | BitVector(bytes)
28 | }
29 |
30 | def randomPacket(
31 | hashBytesSize: Int = MacBytesSize,
32 | sigBytesSize: Int = SigBytesSize,
33 | dataBytesSize: Int = MaxDataBytesSize
34 | ): Packet =
35 | Packet(
36 | hash = Hash(nBytesAsBits(hashBytesSize)),
37 | signature = Signature(nBytesAsBits(sigBytesSize)),
38 | data = nBytesAsBits(dataBytesSize)
39 | )
40 |
41 | def expectFailure(msg: String)(attempt: Attempt[_]) = {
42 | attempt match {
43 | case Attempt.Successful(_) => fail(s"Expected to fail with $msg; got success.")
44 | case Attempt.Failure(err) => err.messageWithContext shouldBe msg
45 | }
46 | }
47 |
48 | behavior of "encode"
49 |
50 | it should "succeed on a random packet within size limits" in {
51 | Codec.encode(randomPacket()).isSuccessful shouldBe true
52 | }
53 |
54 | it should "fail if data exceeds the maximum size" in {
55 | expectFailure("Encoded packet exceeded maximum size.") {
56 | Codec.encode(randomPacket(dataBytesSize = MaxDataBytesSize + 1))
57 | }
58 | }
59 |
60 | it should "fail if the hash has wrong size" in {
61 | expectFailure("Unexpected hash size.") {
62 | Codec.encode(randomPacket(hashBytesSize = MacBytesSize * 2))
63 | }
64 | }
65 |
66 | it should "fail if the signature has wrong size" in {
67 | expectFailure("Unexpected signature size.") {
68 | Codec.encode(randomPacket(sigBytesSize = SigBytesSize - 1))
69 | }
70 | }
71 |
72 | behavior of "decode"
73 |
74 | it should "succeed with a packet size within the allowed maximum" in {
75 | Codec.decode[Packet](nBytesAsBits(MaxPacketBytesSize)).isSuccessful shouldBe true
76 | }
77 |
78 | it should "fail if the data exceeds the maximum size" in {
79 | expectFailure("Packet to decode exceeds maximum size.") {
80 | Codec.decode[Packet](nBytesAsBits(MaxPacketBytesSize + 1))
81 | }
82 | }
83 |
84 | it should "optionally allow the data to exceed the maximum size" in {
85 | val permissiblePacketCodec: Codec[Packet] = Packet.packetCodec(allowDecodeOverMaxPacketSize = true)
86 | Codec.decode[Packet](nBytesAsBits(MaxPacketBytesSize * 2))(permissiblePacketCodec).isSuccessful shouldBe true
87 | }
88 |
89 | it should "fail if there's less data than the hash size" in {
90 | expectFailure(
91 | s"Hash: cannot acquire ${Packet.MacBitsSize} bits from a vector that contains ${Packet.MacBitsSize - 8} bits"
92 | ) {
93 | Codec.decode[Packet](nBytesAsBits(MacBytesSize - 1))
94 | }
95 | }
96 |
97 | it should "fail if there's less data than the signature size" in {
98 | expectFailure(
99 | s"Signature: cannot acquire ${Packet.SigBitsSize} bits from a vector that contains ${Packet.SigBitsSize - 8} bits"
100 | ) {
101 | Codec.decode[Packet](nBytesAsBits(MacBytesSize + SigBytesSize - 1))
102 | }
103 | }
104 |
105 | trait PackFixture {
106 | val payload = Payload.FindNode(
107 | target = PublicKey(nBytesAsBits(sigalg.PublicKeyBytesSize)),
108 | expiration = System.currentTimeMillis
109 | )
110 | val privateKey = PrivateKey(nBytesAsBits(sigalg.PrivateKeyBytesSize))
111 | val publicKey = PublicKey(privateKey) // This is how the MockSignature will recover it.
112 | val packet = Packet.pack(payload, privateKey).require
113 | }
114 |
115 | behavior of "pack"
116 |
117 | it should "serialize the payload into the data" in new PackFixture {
118 | packet.data shouldBe Codec.encode[Payload](payload).require
119 | Codec[Payload].decodeValue(packet.data).require shouldBe payload
120 | }
121 |
122 | it should "calculate the signature based on the data" in new PackFixture {
123 | packet.signature shouldBe sigalg.sign(privateKey, packet.data)
124 | }
125 |
126 | it should "calculate the hash based on the signature and the data" in new PackFixture {
127 | packet.hash shouldBe Keccak256(packet.signature ++ packet.data)
128 | }
129 |
130 | behavior of "unpack"
131 |
132 | it should "deserialize the data into the payload" in new PackFixture {
133 | Packet.unpack(packet).require._1 shouldBe payload
134 | }
135 |
136 | it should "recover the public key" in new PackFixture {
137 | Packet.unpack(packet).require._2 shouldBe publicKey
138 | }
139 |
140 | it should "fail if the hash is incorrect" in new PackFixture {
141 | val corrupt = packet.copy(hash = Hash(nBytesAsBits(32)))
142 |
143 | expectFailure("Invalid hash.") {
144 | Packet.unpack(corrupt)
145 | }
146 | }
147 |
148 | it should "fail if the signature is incorrect" in new PackFixture {
149 | implicit val sigalg = new MockSigAlg {
150 | override def recoverPublicKey(signature: Signature, data: BitVector): Attempt[PublicKey] =
151 | Attempt.failure(Err("Invalid signature."))
152 | }
153 | val randomSig = Signature(nBytesAsBits(32))
154 | val corrupt = packet.copy(signature = randomSig, hash = Keccak256(randomSig ++ packet.data))
155 |
156 | expectFailure("Invalid signature.") {
157 | Packet.unpack(corrupt)
158 | }
159 | }
160 | }
161 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/discovery/ethereum/v4/mocks/MockPeerGroup.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4.mocks
2 |
3 | import cats.effect.Resource
4 | import io.iohk.scalanet.peergroup.PeerGroup
5 | import io.iohk.scalanet.peergroup.PeerGroup.ServerEvent
6 | import io.iohk.scalanet.peergroup.PeerGroup.ServerEvent.ChannelCreated
7 | import io.iohk.scalanet.peergroup.Channel.{ChannelEvent, MessageReceived}
8 | import io.iohk.scalanet.peergroup.Channel
9 | import monix.eval.Task
10 | import monix.execution.{Scheduler, BufferCapacity}
11 | import scala.collection.concurrent.TrieMap
12 | import monix.execution.atomic.AtomicInt
13 | import monix.catnap.ConcurrentQueue
14 | import scala.concurrent.duration._
15 |
16 | class MockPeerGroup[A, M](
17 | override val processAddress: A
18 | )(implicit val s: Scheduler)
19 | extends PeerGroup[A, M] {
20 |
21 | private val channels = TrieMap.empty[A, MockChannel[A, M]]
22 | private val serverEvents = ConcurrentQueue[Task].unsafe[ServerEvent[A, M]](BufferCapacity.Unbounded())
23 |
24 | // Intended for the System Under Test to read incoming channels.
25 | override def nextServerEvent: Task[Option[PeerGroup.ServerEvent[A, M]]] =
26 | serverEvents.poll.map(Some(_))
27 |
28 | // Intended for the System Under Test to open outgoing channels.
29 | override def client(to: A): Resource[Task, Channel[A, M]] = {
30 | Resource.make(
31 | for {
32 | channel <- getOrCreateChannel(to)
33 | _ <- Task(channel.refCount.increment())
34 | } yield channel
35 | ) { channel =>
36 | Task(channel.refCount.decrement())
37 | }
38 | }
39 |
40 | def getOrCreateChannel(to: A): Task[MockChannel[A, M]] =
41 | Task(channels.getOrElseUpdate(to, new MockChannel[A, M](processAddress, to)))
42 |
43 | def createServerChannel(from: A): Task[MockChannel[A, M]] =
44 | for {
45 | channel <- Task(new MockChannel[A, M](processAddress, from))
46 | _ <- Task(channel.refCount.increment())
47 | event = ChannelCreated(channel, Task(channel.refCount.decrement()))
48 | _ <- serverEvents.offer(event)
49 | } yield channel
50 | }
51 |
52 | class MockChannel[A, M](
53 | override val from: A,
54 | override val to: A
55 | )(implicit val s: Scheduler)
56 | extends Channel[A, M] {
57 |
58 | // In lieu of actually closing the channel,
59 | // just count how many times t was opened and released.
60 | val refCount = AtomicInt(0)
61 |
62 | private val messagesFromSUT = ConcurrentQueue[Task].unsafe[ChannelEvent[M]](BufferCapacity.Unbounded())
63 | private val messagesToSUT = ConcurrentQueue[Task].unsafe[ChannelEvent[M]](BufferCapacity.Unbounded())
64 |
65 | def isClosed: Boolean =
66 | refCount.get() == 0
67 |
68 | // Messages coming from the System Under Test.
69 | override def sendMessage(message: M): Task[Unit] =
70 | messagesFromSUT.offer(MessageReceived(message))
71 |
72 | // Messages consumed by the System Under Test.
73 | override def nextChannelEvent: Task[Option[Channel.ChannelEvent[M]]] =
74 | messagesToSUT.poll.map(Some(_))
75 |
76 | // Send a message from the test.
77 | def sendMessageToSUT(message: M): Task[Unit] =
78 | messagesToSUT.offer(MessageReceived(message))
79 |
80 | def nextMessageFromSUT(timeout: FiniteDuration = 250.millis): Task[Option[ChannelEvent[M]]] =
81 | messagesFromSUT.poll.map(Some(_)).timeoutTo(timeout, Task.pure(None))
82 | }
83 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/discovery/ethereum/v4/mocks/MockSigAlg.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.ethereum.v4.mocks
2 |
3 | import io.iohk.scalanet.discovery.crypto.{Signature, PublicKey, PrivateKey, SigAlg}
4 | import scodec.bits.BitVector
5 | import scodec.Attempt
6 | import scala.util.Random
7 |
8 | class MockSigAlg extends SigAlg {
9 | override val name = "MockSignature"
10 |
11 | // A Secp256k1 public key is 32 bytes compressed or 64 bytes uncompressed,
12 | // with a 1 byte prefix showing which version it is.
13 | // See https://davidederosa.com/basic-blockchain-programming/elliptic-curve-keys
14 | //
15 | // However in the discovery v4 protocol the prefix is omitted.
16 | override val PublicKeyBytesSize = 64
17 | // Normal Secp256k1 would be 32 bytes, but here we use the same value for
18 | // both public and private.
19 | override val PrivateKeyBytesSize = 64
20 | // A normal Secp256k1 signature consists of 2 bigints followed by a recovery ID,
21 | // but it can be just 64 bytes if that's omitted, like in the ENR.
22 | override val SignatureBytesSize = 65
23 |
24 | // For testing I'll use the same key for public and private,
25 | // so that I can recover the public key from the signature.
26 | override def newKeyPair: (PublicKey, PrivateKey) = {
27 | val bytes = Array.ofDim[Byte](PrivateKeyBytesSize)
28 | Random.nextBytes(bytes)
29 | val privateKey = PrivateKey(BitVector(bytes))
30 | val publicKey = PublicKey(privateKey)
31 | publicKey -> privateKey
32 | }
33 |
34 | override def sign(privateKey: PrivateKey, data: BitVector): Signature =
35 | Signature(xor(privateKey, data))
36 |
37 | override def removeRecoveryId(signature: Signature): Signature =
38 | signature
39 |
40 | override def verify(publicKey: PublicKey, signature: Signature, data: BitVector): Boolean =
41 | publicKey == recoverPublicKey(signature, data).require
42 |
43 | override def recoverPublicKey(signature: Signature, data: BitVector): Attempt[PublicKey] = {
44 | Attempt.successful(PublicKey(xor(signature, data).take(PublicKeyBytesSize * 8)))
45 | }
46 |
47 | override def toPublicKey(privateKey: PrivateKey): PublicKey =
48 | PublicKey(privateKey)
49 |
50 | override def compressPublicKey(publicKey: PublicKey): PublicKey =
51 | publicKey
52 |
53 | // Using XOR twice recovers the original data.
54 | // Pad the data so we don't lose the key if the data is shorter.
55 | private def xor(key: BitVector, data: BitVector): BitVector = {
56 | (pad(key) ^ pad(data)).take(SignatureBytesSize * 8)
57 | }
58 |
59 | private def pad(bits: BitVector): BitVector =
60 | if (bits.length < SignatureBytesSize * 8) bits.padTo(SignatureBytesSize * 8) else bits
61 | }
62 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/discovery/hash/Keccak256Spec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.discovery.hash
2 |
3 | import org.scalatest._
4 | import scodec.bits._
5 |
6 | class Keccak256Spec extends FlatSpec with Matchers {
7 | behavior of "Keccak256"
8 |
9 | it should "hash empty data" in {
10 | Keccak256(BitVector("".getBytes)).toByteVector shouldBe hex"c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
11 | }
12 |
13 | it should "hash non-empty data" in {
14 | Keccak256(BitVector("abc".getBytes)).toByteVector shouldBe hex"4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45"
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/kademlia/Generators.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
4 | import org.scalacheck.Arbitrary.arbitrary
5 | import org.scalacheck.Gen
6 | import scodec.bits.BitVector
7 |
8 | import scala.collection.mutable.ListBuffer
9 | import scala.util.Random
10 |
11 | object Generators {
12 |
13 | val defaultBitLength = 16
14 |
15 | def genBitVector(bitLength: Int = defaultBitLength): Gen[BitVector] =
16 | for {
17 | bools <- Gen.listOfN(bitLength, arbitrary[Boolean])
18 | } yield BitVector.bits(bools)
19 |
20 | def genBitVectorPairs(
21 | bitLength: Int = defaultBitLength
22 | ): Gen[(BitVector, BitVector)] =
23 | for {
24 | v1 <- genBitVector(bitLength)
25 | v2 <- genBitVector(bitLength)
26 | } yield (v1, v2)
27 |
28 | def genBitVectorTrips(
29 | bitLength: Int = defaultBitLength
30 | ): Gen[(BitVector, BitVector, BitVector)] =
31 | for {
32 | v1 <- genBitVector(bitLength)
33 | v2 <- genBitVector(bitLength)
34 | v3 <- genBitVector(bitLength)
35 | } yield (v1, v2, v3)
36 |
37 | def genBitVectorExhaustive(
38 | bitLength: Int = defaultBitLength
39 | ): List[BitVector] = {
40 | def loop(acc: ListBuffer[BitVector], b: BitVector, i: Int, n: Int): Unit = {
41 | if (i == n) {
42 | acc.append(b)
43 | } else {
44 | loop(acc, b.clear(i), i + 1, n)
45 | loop(acc, b.set(i), i + 1, n)
46 | }
47 | }
48 |
49 | val l = ListBuffer[BitVector]()
50 | loop(l, BitVector.low(bitLength), 0, bitLength)
51 | l.toList
52 | }
53 |
54 | def genBitVectorTripsExhaustive(
55 | bitLength: Int
56 | ): List[(BitVector, BitVector, BitVector)] = {
57 | for {
58 | x <- genBitVectorExhaustive(bitLength)
59 | y <- genBitVectorExhaustive(bitLength)
60 | z <- genBitVectorExhaustive(bitLength)
61 | } yield (x, y, z)
62 | }
63 |
64 | def aRandomBitVector(bitLength: Int = defaultBitLength): BitVector =
65 | BitVector.bits(Range(0, bitLength).map(_ => Random.nextBoolean()))
66 |
67 | def aRandomNodeRecord(
68 | bitLength: Int = defaultBitLength
69 | ): NodeRecord[String] = {
70 | NodeRecord(
71 | id = aRandomBitVector(bitLength),
72 | routingAddress = Random.alphanumeric.take(4).mkString,
73 | messagingAddress = Random.alphanumeric.take(4).mkString
74 | )
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/kademlia/KBucketsSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import java.security.SecureRandom
4 | import java.time.Clock
5 |
6 | import io.iohk.scalanet.kademlia.Generators._
7 | import io.iohk.scalanet.kademlia.KBucketsSpec._
8 | import org.scalatest.FlatSpec
9 | import org.scalatest.Matchers._
10 | import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks._
11 | import scodec.bits.BitVector
12 |
13 | import scala.util.Random
14 |
15 | class KBucketsSpec extends FlatSpec {
16 |
17 | behavior of "KBuckets"
18 |
19 | they should "retrieve the base node id" in {
20 | val id = aRandomBitVector()
21 | val kBuckets = new KBuckets(id, clock)
22 |
23 | kBuckets.contains(id) shouldBe true
24 | kBuckets.closestNodes(id, Int.MaxValue) shouldBe List(id)
25 | }
26 |
27 | they should "generate random id of the same length as base id" in {
28 | val baseId = aRandomBitVector()
29 |
30 | val randomId = KBuckets.generateRandomId(baseId.length, new SecureRandom())
31 |
32 | baseId.length shouldEqual randomId.length
33 | }
34 |
35 | they should "retrieve any node added via put" in forAll(genBitVector()) { v =>
36 | kb.add(v).contains(v) shouldBe true
37 | }
38 |
39 | they should "retrieve any node added via touch" in forAll(genBitVector()) { v =>
40 | kb.touch(v).contains(v) shouldBe true
41 | }
42 |
43 | they should "not retrieve any node removed via remove" in forAll(genBitVector()) { v =>
44 | kb.add(v).remove(v).contains(v) shouldBe false
45 | }
46 |
47 | they should "reject addition of nodeIds with inconsistent length" in {
48 | an[IllegalArgumentException] should be thrownBy kb.add(
49 | aRandomBitVector(bitLength = 24)
50 | )
51 | }
52 |
53 | they should "return the n closest nodes when N are available" in {
54 | val ids: Seq[BitVector] = genBitVectorExhaustive(4)
55 | val arbitraryId: BitVector = ids(Random.nextInt(ids.length))
56 | val kBuckets = new KBuckets(arbitraryId, clock)
57 |
58 | val sortedRecords =
59 | ids.sortBy(nodeId => Xor.d(nodeId, arbitraryId))
60 |
61 | val kBuckets2 = ids.foldLeft(kBuckets)((acc, next) => acc.add(next))
62 |
63 | for (n <- 1 to ids.length) {
64 | val closestNodes = kBuckets2.closestNodes(arbitraryId, n)
65 | closestNodes shouldBe sortedRecords.take(n)
66 | }
67 | }
68 |
69 | they should "require the closest single node is the node itself" in {
70 |
71 | val ids: Seq[BitVector] = genBitVectorExhaustive(4)
72 | val arbitraryId: BitVector = ids(Random.nextInt(ids.length))
73 | val kBuckets = new KBuckets(arbitraryId, clock)
74 |
75 | val kBuckets2 = ids.foldLeft(kBuckets)((acc, next) => acc.add(next))
76 |
77 | ids.foreach { nodeId =>
78 | kBuckets2.closestNodes(nodeId, 1) shouldBe List(nodeId)
79 | }
80 | }
81 | }
82 |
83 | object KBucketsSpec {
84 | private val clock = Clock.systemUTC()
85 |
86 | private val kb = new KBuckets(aRandomBitVector(), clock)
87 | }
88 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/kademlia/KNetworkRequestProcessing.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import io.iohk.scalanet.kademlia.KMessage.KRequest.{FindNodes, Ping}
4 | import io.iohk.scalanet.kademlia.KMessage.KResponse.{Nodes, Pong}
5 | import io.iohk.scalanet.kademlia.KMessage.{KRequest, KResponse}
6 | import monix.eval.Task
7 | import monix.execution.Scheduler
8 | import monix.reactive.Observable
9 |
10 | /**
11 | * If a user of KNetwork wanted to consume only one kind of request,
12 | * it is not sufficient to collect or filter the request stream, since it is
13 | * still necessary to invoke response handers to close channels for excluded request types.
14 | * The code to do this is demonstrated here.
15 | * Note that findNodesRequests and pingRequests are mutually exclusive.
16 | */
17 | object KNetworkRequestProcessing {
18 |
19 | implicit class KNetworkExtension[A](kNetwork: KNetwork[A])(implicit scheduler: Scheduler) {
20 |
21 | type KRequestT = (KRequest[A], Option[KResponse[A]] => Task[Unit])
22 | type FindNodesT = (FindNodes[A], Option[Nodes[A]] => Task[Unit])
23 | type PingT = (Ping[A], Option[Pong[A]] => Task[Unit])
24 |
25 | def findNodesRequests(): Observable[FindNodesT] =
26 | kNetwork.kRequests
27 | .collect {
28 | case (f @ FindNodes(_, _, _), h) =>
29 | Some((f, h))
30 | case (_, h) =>
31 | ignore(h)
32 | }
33 | .collect { case Some(v) => v }
34 |
35 | def pingRequests(): Observable[PingT] =
36 | kNetwork.kRequests
37 | .map {
38 | case (p @ Ping(_, _), h) =>
39 | Some((p, h))
40 | case (_, h) =>
41 | ignore(h)
42 | }
43 | .collect { case Some(v) => v }
44 |
45 | private def ignore(
46 | handler: Option[KResponse[A]] => Task[Unit]
47 | ): None.type = {
48 | handler(None).runSyncUnsafe()
49 | None
50 | }
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/kademlia/TimeSetSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import java.time.Clock
4 |
5 | import org.mockito.Mockito.when
6 | import org.scalatest.FlatSpec
7 | import org.scalatest.Matchers._
8 | import org.scalatestplus.mockito.MockitoSugar._
9 | import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks._
10 |
11 | import scala.util.Random
12 |
13 | class TimeSetSpec extends FlatSpec {
14 |
15 | private val random = new Random()
16 | private val clock = mock[Clock]
17 |
18 | "touch" should "resort elements by access time" in forAll { s: Set[String] =>
19 | {
20 | when(clock.millis()).thenReturn(0)
21 | val ss: Seq[String] = s.toSeq
22 | val ts = TimeSet(clock, ss: _*)
23 | val ssShuffled = random.shuffle(ss)
24 |
25 | val ts2 = ssShuffled.foldLeft(ts) { (acc, next) =>
26 | val millis = clock.millis()
27 | when(clock.millis()).thenReturn(millis + 1)
28 | acc.touch(next)
29 | }
30 |
31 | ts2.zip(ssShuffled).foreach {
32 | case (l, r) =>
33 | l shouldBe r
34 | }
35 | ts2.size shouldBe ss.size
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/kademlia/XorOrderingSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import org.scalatest.FlatSpec
4 | import org.scalatest.Matchers._
5 | import scodec.bits.BitVector
6 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
7 | import scala.collection.SortedSet
8 |
9 | class XorOrderingSpec extends FlatSpec {
10 |
11 | val id0 = BitVector.fromValidBin("0000")
12 |
13 | val ids: List[BitVector] = Generators.genBitVectorExhaustive(4)
14 |
15 | "NodeIdOrdering" should "return correct comparable values" in {
16 | ids.foreach { base =>
17 | val ordering = new XorOrdering(base)
18 | ids.foreach { a =>
19 | ids.foreach { b =>
20 | val result = ordering.compare(a, b)
21 | if (Xor.d(a, base) < Xor.d(b, base))
22 | result shouldBe -1
23 | else if (Xor.d(a, base) > Xor.d(b, base))
24 | result shouldBe 1
25 | else
26 | result shouldBe 0
27 | }
28 | }
29 | }
30 | }
31 |
32 | it should "throw if the lhs argument does not match the base bit length" in {
33 | val ordering = new XorOrdering(id0)
34 | val lhs = BitVector.fromValidBin("0000000000000000")
35 | val rhs = BitVector.fromValidBin("0000")
36 |
37 | an[IllegalArgumentException] should be thrownBy ordering.compare(lhs, rhs)
38 | }
39 |
40 | it should "throw if the rhs argument does not match the base bit length" in {
41 | val ordering = new XorOrdering(id0)
42 | val lhs = BitVector.fromValidBin("0000")
43 | val rhs = BitVector.fromValidBin("0000000000000000")
44 |
45 | an[IllegalArgumentException] should be thrownBy ordering.compare(lhs, rhs)
46 | }
47 |
48 | "XorNodeOrdering" should "work with SortedSet" in {
49 | implicit val ordering = XorNodeOrdering[Int](id0)
50 | val node0 = NodeRecord[Int](BitVector.fromValidBin("0000"), 1, 2)
51 | val node1 = NodeRecord[Int](BitVector.fromValidBin("0000"), 3, 4)
52 | val nodes = SortedSet(node0, node1)
53 | nodes should have size 2
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/scalanet/discovery/ut/src/io/iohk/scalanet/kademlia/XorSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kademlia
2 |
3 | import io.iohk.scalanet.kademlia.Generators._
4 | import io.iohk.scalanet.kademlia.Xor._
5 | import org.scalacheck.Gen.posNum
6 | import org.scalatest.FlatSpec
7 | import org.scalatest.Matchers._
8 | import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks._
9 | import scodec.bits.BitVector
10 |
11 | class XorSpec extends FlatSpec {
12 |
13 | it should "satisfy d(x,x) = 0" in {
14 | forAll(genBitVector(8)) { x =>
15 | d(x, x) shouldBe 0
16 | }
17 | }
18 |
19 | it should "satisfy d(x,y) > 0 when x != y" in {
20 | forAll(genBitVectorPairs(8)) {
21 | case (x, y) =>
22 | if (x != y)
23 | d(x, y) > 0 shouldBe true
24 | }
25 | }
26 |
27 | it should "satisfy the symmetry condition" in {
28 | forAll(genBitVectorPairs(8)) {
29 | case (x, y) =>
30 | d(x, y) shouldBe d(y, x)
31 | }
32 | }
33 |
34 | it should "satisfy the triangle equality" in {
35 | forAll(genBitVectorTrips(8)) {
36 | case (x, y, z) =>
37 | d(x, z) <= d(x, y) + d(y, z) shouldBe true
38 | }
39 | }
40 |
41 | it should "provide the correct maximal distance" in forAll(posNum[Int]) { bitCount =>
42 | val zero = BitVector.low(bitCount)
43 | val max = BitVector.high(bitCount)
44 | d(zero, max) shouldBe BigInt(2).pow(bitCount) - 1
45 | }
46 |
47 | it should "satisfy the unidirectional property (from the last para of section 2.1)" in
48 | genBitVectorTripsExhaustive(4).foreach {
49 | case (x, y, z) =>
50 | if (y != z)
51 | d(x, y) should not be d(x, z)
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/scalanet/examples/readme.md:
--------------------------------------------------------------------------------
1 | ### Scalanet examples
2 | Scalanet is now capable of building against Scala 2.12.10 and 2.13.4
3 |
4 | This guide will be using version 2.13.4 build: `mill csm[2.13.4]...`
5 | But it is legitimate for any other version available as well.
6 |
7 | In order to trigger all-version build - replace a version with a placeholder: `mill csm[_]...`
8 |
9 | In order to run examples in this folder you should run
10 | ```bash
11 | mill csm[2.13.4].scalanet.examples.assembly
12 | java -cp out/csm/2.13.4/scalanet/examples/assembly/dest/out.jar [args]
13 | ```
14 |
15 | ### Kademlia Console
16 | This is a test app for kademlia, creating a simple console which allows the user to manually query for, add and
17 | remove entries from the kademlia node.
18 |
19 | After assembling the examples out.jar, run the following command to get a list of available command line options:
20 | ```bash
21 | java -cp out/csm/2.13.4/scalanet/examples/assembly/dest/out.jar io.iohk.scalanet.kconsole.App -h
22 | ```
23 |
24 | Here is an example of starting a simple, two-node network
25 |
26 | ```bash
27 | # First console
28 | $ java -cp out/csm/2.13.4/scalanet/examples/assembly/dest/out.jar io.iohk.scalanet.kconsole.App
29 |
30 | # ... log output ...
31 | Initialized with node record {"id":"a98e6fa629b7b4ae679748ad65915f1bf1178ac0","messaging-address":"localhost:52570","routing-address":"localhost:52569"}
32 |
33 | Command summary:
34 | get perform a lookup for the given nodeId and prints the record returned (if any).
35 | add adds the given node record to this node. The record format should be the same as that returned by get.
36 | remove remove the given nodeId from this nodes kbuckets.
37 | dump dump the contents of this nodes kbuckets to the console.
38 | help print this message.
39 | exit shutdown the node and quit the application.
40 |
41 | >
42 |
43 | # In a second console start another node, bootstrapping it from the the first...
44 | $ java -cp out/csm/2.13.4/scalanet/examples/assembly/dest/out.jar io.iohk.scalanet.kconsole.App -b '{"id":"a98e6fa629b7b4ae679748ad65915f1bf1178ac0","messaging-address":"localhost:52570","routing-address":"localhost:52569"}'
45 |
46 | Initialized with node record {"id":"442c01efed34c16002e5943932f2c765d45c3baa","messaging-address":"localhost:52561","routing-address":"localhost:52560"}
47 |
48 | Command summary:
49 | get perform a lookup for the given nodeId and prints the record returned (if any).
50 | add adds the given node record to this node. The record format should be the same as that returned by get.
51 | remove remove the given nodeId from this nodes kbuckets.
52 | dump dump the contents of this nodes kbuckets to the console.
53 | help print this message.
54 | exit shutdown the node and quit the application.
55 |
56 | # execute a dump command to display the current contents of the node's kbuckets
57 | > dump
58 |
59 | {"id":"442c01efed34c16002e5943932f2c765d45c3baa","messaging-address":"localhost:52561","routing-address":"localhost:52560"}
60 | {"id":"a98e6fa629b7b4ae679748ad65915f1bf1178ac0","messaging-address":"localhost:52570","routing-address":"localhost:52569"}
61 | ```
62 |
--------------------------------------------------------------------------------
/scalanet/examples/resources/kconsole-sample.conf:
--------------------------------------------------------------------------------
1 | {
2 | "alpha" : 3,
3 | "k" : 20,
4 | "known-peers" : [],
5 | "node-record" : {
6 | "id" : "a0fd10a54e202b7d9a4948b4890d14447bf93a08",
7 | "routing-address" : "127.0.0.1:1034"
8 | "messaging-address" : "127.0.0.1:1035",
9 | }
10 | }
--------------------------------------------------------------------------------
/scalanet/examples/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %t %0logger %-5level %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/scalanet/examples/src/io/iohk/scalanet/kconsole/App.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kconsole
2 | import java.io.File
3 | import java.nio.file.Path
4 | import cats.effect.ExitCode
5 | import io.iohk.scalanet.kconsole.Utils.{configToStr, generateRandomConfig}
6 | import io.iohk.scalanet.peergroup.InetMultiAddress
7 | import io.iohk.scalanet.kademlia.KRouter
8 | import monix.execution.Scheduler.Implicits.global
9 | import monix.eval.{Task, TaskApp}
10 | import scopt.OptionParser
11 | import scala.util.control.NonFatal
12 |
13 | object App extends TaskApp with CommandParser {
14 |
15 | case class CommandLineOptions(
16 | configFile: Option[Path] = None,
17 | generateConfig: Boolean = false,
18 | bootstrapRecord: Option[String] = None,
19 | k: Int = 20,
20 | alpha: Int = 3
21 | )
22 |
23 | val optionsParser = new OptionParser[CommandLineOptions]("kconsole") {
24 |
25 | head("kconsole", "0.1")
26 |
27 | opt[File]('c', "config")
28 | .action((p, c) => c.copy(configFile = Some(p.toPath)))
29 | .text("""
30 | | Read config for the node from the given file.
31 | | The format is the same as that created using the -g option.
32 | |""".stripMargin)
33 |
34 | opt[String]('b', "bootstrap")
35 | .action((b, c) => c.copy(bootstrapRecord = Some(b)))
36 | .text("""
37 | | Specify a bootstrap node record in the format
38 | | {"id":"","messaging-address":":","routing-address":":"}
39 | |""".stripMargin)
40 |
41 | opt[Unit]('g', "generate")
42 | .action((_, c) => c.copy(generateConfig = true))
43 | .text("""
44 | | Generate a config, print it to stdout and exit.
45 | | You may then copy-and-paste this config into a file for later use as an argument to -c.
46 | |""".stripMargin)
47 |
48 | opt[Int]('k', "K")
49 | .action((k, c) => c.copy(k = k))
50 | .text("""
51 | | Override the node's k value.
52 | |""".stripMargin)
53 |
54 | opt[Int]('a', "alpha")
55 | .action((a, c) => c.copy(alpha = a))
56 | .text("""
57 | | Override the node's alpha value.
58 | |""".stripMargin)
59 |
60 | help('h', "help").text("print usage and exit")
61 |
62 | note("""
63 | | It is simplest to run the app with no arguments.
64 | | In this scenario, it will generate a node configuration and use that.
65 | | The -b option can be helpful if you wish to tell the node how
66 | | to join an existing network. Alternatively, you can use the 'add'
67 | | console command, to add a known node, after this node has started.
68 | | The default value of k (kademlia's connectivity parameter) is 20,
69 | | which is a very large value for a small, manually created network.
70 | | A better value is 2. You can set this with the -k parameter.
71 | | The default value of alpha (the node's concurrency parameter) is 3.
72 | | It is easier to understand what the node is doing if you set this
73 | | to 1 using the -a parameter.
74 | |""".stripMargin)
75 | checkConfig((c: CommandLineOptions) => success)
76 | }
77 |
78 | override def run(args: List[String]): Task[ExitCode] = {
79 | Task(optionsParser.parse(args, CommandLineOptions())) flatMap {
80 | case None =>
81 | Task.pure(ExitCode.Error)
82 |
83 | case Some(options) =>
84 | if (options.generateConfig) {
85 | generateAndWriteConfigAndExit
86 | } else {
87 | val nodeConfig = configFromBootstrapOption(options)
88 | .orElse(configFromConfigFile(options))
89 | .getOrElse(randomConfig(options))
90 |
91 | AppContext(nodeConfig)
92 | .use { kRouter =>
93 | Task(ConsoleLoop.run(kRouter)).as(ExitCode.Success)
94 | }
95 | .onErrorRecover {
96 | case NonFatal(ex) =>
97 | System.err.println(s"Error running Kademlia: $ex")
98 | ExitCode.Error
99 | }
100 | }
101 | }
102 | }
103 |
104 | private def configFromBootstrapOption(
105 | options: CommandLineOptions
106 | ): Option[KRouter.Config[InetMultiAddress]] = {
107 | options.bootstrapRecord
108 | .map(
109 | bootstrapRecordStr => configFromBootstrapOption(bootstrapRecordStr).copy(k = options.k, alpha = options.alpha)
110 | )
111 | }
112 |
113 | private def configFromBootstrapOption(
114 | bootstrapRecordStr: String
115 | ): KRouter.Config[InetMultiAddress] = {
116 |
117 | val bootstrapRecord = Utils.parseRecord(bootstrapRecordStr)
118 | generateRandomConfig.copy(knownPeers = Set(bootstrapRecord))
119 | }
120 |
121 | private def configFromConfigFile(options: CommandLineOptions): Option[KRouter.Config[InetMultiAddress]] = {
122 | options.configFile.map(configFile => configFromConfigFile(configFile).copy(k = options.k, alpha = options.alpha))
123 | }
124 |
125 | private def configFromConfigFile(
126 | configFile: Path
127 | ): KRouter.Config[InetMultiAddress] = {
128 | import pureconfig.generic.auto._
129 | import PureConfigReadersAndWriters._
130 |
131 | pureconfig.loadConfigOrThrow[KRouter.Config[InetMultiAddress]](configFile)
132 | }
133 |
134 | private def randomConfig(options: CommandLineOptions): KRouter.Config[InetMultiAddress] = {
135 | generateRandomConfig.copy(k = options.k, alpha = options.alpha)
136 | }
137 |
138 | private def generateAndWriteConfigAndExit: Task[ExitCode] = {
139 | Task(println(configToStr(generateRandomConfig))).as(ExitCode.Success)
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/scalanet/examples/src/io/iohk/scalanet/kconsole/AppContext.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kconsole
2 |
3 | import cats.effect.Resource
4 | import io.iohk.scalanet.codec.DefaultCodecs._
5 | import io.iohk.scalanet.kademlia.codec.DefaultCodecs._
6 | import io.iohk.scalanet.kademlia.KNetwork.KNetworkScalanetImpl
7 | import io.iohk.scalanet.kademlia.{KMessage, KRouter}
8 | import io.iohk.scalanet.peergroup.{InetMultiAddress}
9 | import io.iohk.scalanet.peergroup.udp.StaticUDPPeerGroup
10 | import monix.execution.Scheduler
11 | import monix.eval.Task
12 |
13 | object AppContext {
14 | def apply(
15 | nodeConfig: KRouter.Config[InetMultiAddress]
16 | )(implicit scheduler: Scheduler): Resource[Task, KRouter[InetMultiAddress]] = {
17 | val routingConfig =
18 | StaticUDPPeerGroup.Config(nodeConfig.nodeRecord.routingAddress.inetSocketAddress, channelCapacity = 100)
19 | for {
20 | routingPeerGroup <- StaticUDPPeerGroup[KMessage[InetMultiAddress]](routingConfig)
21 | kNetwork = new KNetworkScalanetImpl[InetMultiAddress](routingPeerGroup)
22 | kRouter <- Resource.liftF(KRouter.startRouterWithServerSeq(nodeConfig, kNetwork))
23 | } yield kRouter
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/scalanet/examples/src/io/iohk/scalanet/kconsole/CommandParser.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kconsole
2 |
3 | import io.iohk.scalanet.kconsole.Utils.parseRecord
4 | import io.iohk.scalanet.peergroup.InetMultiAddress
5 | import io.iohk.scalanet.kademlia.KRouter
6 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
7 | import scodec.bits.BitVector
8 |
9 | import scala.concurrent.{Await, Promise}
10 | import scala.util.parsing.combinator._
11 | import scala.concurrent.duration._
12 | import scala.language.postfixOps
13 |
14 | trait CommandParser extends RegexParsers {
15 |
16 | sealed trait Command {
17 | def applyTo(kRouter: KRouter[InetMultiAddress]): String
18 | }
19 |
20 | object Command {
21 | import monix.execution.Scheduler.Implicits.global
22 | case class GetCommand(nodeId: BitVector) extends Command {
23 | override def applyTo(kRouter: KRouter[InetMultiAddress]): String = {
24 | val p = Promise[String]()
25 | kRouter.get(nodeId).runToFuture.onComplete {
26 | case util.Failure(exception) =>
27 | p.success(exception.getMessage)
28 | case util.Success(nodeRecord) =>
29 | p.success(Utils.recordToStr(nodeRecord))
30 | }
31 | Await.result(p.future, 1 second)
32 | }
33 | }
34 |
35 | case class AddCommand(nodeRecord: NodeRecord[InetMultiAddress]) extends Command {
36 | val dumpCommand = DumpCommand()
37 | override def applyTo(kRouter: KRouter[InetMultiAddress]): String = {
38 | kRouter.add(nodeRecord).runToFuture
39 | dumpCommand.applyTo(kRouter)
40 | }
41 | }
42 |
43 | case class RemoveCommand(nodeId: BitVector) extends Command {
44 | override def applyTo(kRouter: KRouter[InetMultiAddress]): String = {
45 | kRouter.remove(nodeId)
46 | s"Node id ${nodeId.toHex} removed from local kBuckets"
47 | }
48 | }
49 |
50 | case class DumpCommand() extends Command {
51 | override def applyTo(kRouter: KRouter[InetMultiAddress]): String = {
52 | kRouter.nodeRecords.runSyncUnsafe().map { case (_, record) => Utils.recordToStr(record) }.mkString("\n")
53 | }
54 | }
55 |
56 | case class ExitCommand() extends Command {
57 | override def applyTo(kRouter: KRouter[InetMultiAddress]): String = {
58 | System.exit(0)
59 | ""
60 | }
61 | }
62 |
63 | case class HelpCommand() extends Command {
64 | override def applyTo(kRouter: KRouter[InetMultiAddress]): String = help
65 | }
66 |
67 | val help: String =
68 | """
69 | | Command summary:
70 | | get perform a lookup for the given nodeId and prints the record returned (if any).
71 | | add adds the given node record to this node. The record format should be the same as that returned by get.
72 | | remove remove the given nodeId from this nodes kbuckets.
73 | | dump dump the contents of this nodes kbuckets to the console.
74 | | help print this message.
75 | | exit shutdown the node and quit the application.
76 | |""".stripMargin
77 | }
78 |
79 | import Command._
80 |
81 | def command: Parser[Command] = getCommand | addCommand | removeCommand | dumpCommand | helpCommand | exitCommand
82 |
83 | def getCommand: Parser[GetCommand] = "get" ~> nodeId ^^ GetCommand
84 |
85 | def addCommand: Parser[AddCommand] = "add" ~> nodeRecord ^^ AddCommand
86 |
87 | def removeCommand: Parser[RemoveCommand] = "remove" ~> nodeId ^^ RemoveCommand
88 |
89 | def dumpCommand: Parser[DumpCommand] = "dump" ^^ (_ => DumpCommand())
90 |
91 | def helpCommand: Parser[HelpCommand] = "help" ^^ (_ => HelpCommand())
92 |
93 | def exitCommand: Parser[ExitCommand] = "exit" ^^ (_ => ExitCommand())
94 |
95 | def nodeId: Parser[BitVector] = "^[a-fA-F0-9]+$".r ^^ (BitVector.fromValidHex(_))
96 |
97 | def nodeRecord: Parser[NodeRecord[InetMultiAddress]] = ".+".r ^^ parseRecord
98 | }
99 |
100 | object CommandParser extends CommandParser
101 |
--------------------------------------------------------------------------------
/scalanet/examples/src/io/iohk/scalanet/kconsole/ConsoleLoop.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kconsole
2 |
3 | import io.iohk.scalanet.peergroup.InetMultiAddress
4 | import io.iohk.scalanet.kademlia.KRouter
5 |
6 | import scala.io.StdIn
7 |
8 | object ConsoleLoop extends CommandParser {
9 | def run(kRouter: KRouter[InetMultiAddress]): Unit = {
10 | import Console.{GREEN, RED, RESET, YELLOW}
11 |
12 | Console.println(
13 | s"${RESET}${GREEN}Initialized with node record ${Utils.recordToStr(kRouter.config.nodeRecord)}"
14 | )
15 | Console.println(s"${CommandParser.Command.help}${RESET}")
16 |
17 | while (true) {
18 | val commandStr: String = StdIn.readLine("> ")
19 | if (commandStr != null && commandStr.replaceAll("\\s+", "").nonEmpty) {
20 | parse(command, commandStr) match {
21 | case Success(result, _) =>
22 | try {
23 | val output = result.applyTo(kRouter)
24 | Console.println(s"${RESET}${GREEN}$output${RESET}")
25 | } catch {
26 | case e: Exception =>
27 | Console.println(s"${RESET}${RED}$e${RESET}")
28 | }
29 | case Failure(msg, _) =>
30 | Console.println(s"${RESET}${YELLOW}$msg${RESET}")
31 | case Error(msg, _) =>
32 | Console.println(s"${RESET}${RED}$msg${RESET}")
33 | }
34 | }
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/scalanet/examples/src/io/iohk/scalanet/kconsole/PureConfigReadersAndWriters.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kconsole
2 |
3 | import java.net.{InetSocketAddress, URI}
4 |
5 | import io.iohk.scalanet.peergroup.InetMultiAddress
6 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
7 | import pureconfig.{ConfigReader, ConfigWriter}
8 | import pureconfig.ConvertHelpers.catchReadError
9 | import pureconfig.configurable.{genericMapReader, genericMapWriter}
10 | import pureconfig.generic.auto._
11 | import scodec.bits.BitVector
12 |
13 | object PureConfigReadersAndWriters {
14 | implicit val bitvectorReader: ConfigReader[BitVector] =
15 | ConfigReader[String].map(BitVector.fromValidHex(_))
16 | // implicit val inetAddressReader: ConfigReader[InetAddress] =
17 | // ConfigReader[String].map(InetAddress.getByName)
18 | // implicit val inetSocketAddressReader: ConfigReader[InetSocketAddress] =
19 | // ConfigReader[String].map(parseAddressString)
20 | implicit val inetMultiAddressReader: ConfigReader[InetMultiAddress] =
21 | ConfigReader[String].map(parseAddressString)
22 | implicit def knownPeerReader: ConfigReader[Map[BitVector, NodeRecord[InetMultiAddress]]] =
23 | genericMapReader[BitVector, NodeRecord[InetMultiAddress]](
24 | catchReadError(BitVector.fromValidHex(_))
25 | )
26 |
27 | implicit val bitvectorWriter: ConfigWriter[BitVector] =
28 | ConfigWriter[String].contramap(_.toHex)
29 | // implicit val inetAddressWriter: ConfigWriter[InetAddress] =
30 | // ConfigWriter[String].contramap(_.getHostAddress)
31 | // implicit val inetSocketAddressWriter: ConfigWriter[InetSocketAddress] =
32 | // ConfigWriter[String].contramap(address => s"${address.getHostString}:${address.getPort}")
33 | implicit val inetMultiAddressWriter: ConfigWriter[InetMultiAddress] =
34 | ConfigWriter[String].contramap(
35 | address => s"${address.inetSocketAddress.getHostString}:${address.inetSocketAddress.getPort}"
36 | )
37 | implicit def knownPeerWriter: ConfigWriter[Map[BitVector, NodeRecord[InetMultiAddress]]] =
38 | genericMapWriter[BitVector, NodeRecord[InetMultiAddress]](_.toHex)
39 |
40 | private def parseAddressString(s: String): InetMultiAddress = {
41 | val uri: URI = new URI("my://" + s)
42 | InetMultiAddress(new InetSocketAddress(uri.getHost, uri.getPort))
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/scalanet/examples/src/io/iohk/scalanet/kconsole/Utils.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kconsole
2 |
3 | import com.typesafe.config.{ConfigFactory, ConfigRenderOptions, ConfigValue}
4 | import io.iohk.scalanet.peergroup.InetMultiAddress
5 | import io.iohk.scalanet.kademlia.KRouter
6 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
7 | import pureconfig.ConfigWriter
8 | import scodec.bits.BitVector
9 |
10 | import java.net.{InetSocketAddress, ServerSocket}
11 | import scala.util.Random
12 |
13 | object Utils {
14 | private def aRandomAddress(): InetSocketAddress = {
15 | val s = new ServerSocket(0)
16 | try {
17 | new InetSocketAddress("localhost", s.getLocalPort)
18 | } finally {
19 | s.close()
20 | }
21 | }
22 |
23 | def generateRandomConfig: KRouter.Config[InetMultiAddress] = {
24 |
25 | def randomNodeId: BitVector =
26 | BitVector.bits(Range(0, 160).map(_ => Random.nextBoolean()))
27 |
28 | def aRandomNodeRecord: NodeRecord[InetMultiAddress] = {
29 | NodeRecord(
30 | id = randomNodeId,
31 | routingAddress = InetMultiAddress(aRandomAddress()),
32 | messagingAddress = InetMultiAddress(aRandomAddress())
33 | )
34 | }
35 | KRouter.Config(aRandomNodeRecord, Set.empty)
36 | }
37 |
38 | def configToStr(config: KRouter.Config[InetMultiAddress]): String = {
39 | import pureconfig.generic.auto._
40 | import PureConfigReadersAndWriters._
41 | val configValue: ConfigValue =
42 | ConfigWriter[KRouter.Config[InetMultiAddress]].to(generateRandomConfig)
43 |
44 | configValue.render(ConfigRenderOptions.defaults().setComments(false))
45 | }
46 |
47 | def recordToStr(nodeRecord: NodeRecord[InetMultiAddress]): String = {
48 | import pureconfig.generic.auto._
49 | import PureConfigReadersAndWriters._
50 | val configValue: ConfigValue =
51 | ConfigWriter[NodeRecord[InetMultiAddress]].to(nodeRecord)
52 |
53 | configValue.render(ConfigRenderOptions.concise())
54 | }
55 |
56 | def parseRecord(nodeRecordStr: String): NodeRecord[InetMultiAddress] = {
57 | import pureconfig.generic.auto._
58 | import PureConfigReadersAndWriters._
59 |
60 | pureconfig.loadConfigOrThrow[NodeRecord[InetMultiAddress]](
61 | ConfigFactory.parseString(nodeRecordStr)
62 | )
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/scalanet/examples/ut/src/io/iohk/scalanet/kconsole/CommandParserSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.kconsole
2 |
3 | import java.net.InetSocketAddress
4 |
5 | import io.iohk.scalanet.kconsole.CommandParser.Command.{
6 | AddCommand,
7 | DumpCommand,
8 | ExitCommand,
9 | GetCommand,
10 | HelpCommand,
11 | RemoveCommand
12 | }
13 | import io.iohk.scalanet.peergroup.InetMultiAddress
14 | import io.iohk.scalanet.kademlia.KRouter.NodeRecord
15 | import org.scalatest.FlatSpec
16 | import org.scalatest.Matchers._
17 | import org.scalatest.prop.TableDrivenPropertyChecks._
18 | import scodec.bits._
19 |
20 | class CommandParserSpec extends FlatSpec {
21 |
22 | val t = Table(
23 | ("command", "result"),
24 | (
25 | "add {id:\"a0fd10a54e202b7d9a4948b4890d14447bf93a08\", routing-address:\"127.0.0.1:1034\", messaging-address:\"127.0.0.1:1035\"}",
26 | AddCommand(
27 | NodeRecord(
28 | hex"a0fd10a54e202b7d9a4948b4890d14447bf93a08".bits,
29 | InetMultiAddress(new InetSocketAddress("127.0.0.1", 1034)),
30 | InetMultiAddress(new InetSocketAddress("127.0.0.1", 1035))
31 | )
32 | )
33 | ),
34 | (
35 | "get a0fd10a54e202b7d9a4948b4890d14447bf93a08",
36 | GetCommand(hex"a0fd10a54e202b7d9a4948b4890d14447bf93a08".bits)
37 | ),
38 | (
39 | "remove a0fd10a54e202b7d9a4948b4890d14447bf93a08",
40 | RemoveCommand(hex"a0fd10a54e202b7d9a4948b4890d14447bf93a08".bits)
41 | ),
42 | ("dump", DumpCommand()),
43 | ("exit", ExitCommand()),
44 | ("help", HelpCommand())
45 | )
46 |
47 | "CommandParser" should "parse commands" in {
48 | forAll(t) { (command, expectedResult) =>
49 | CommandParser.parse(CommandParser.command, command).get shouldBe expectedResult
50 | }
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/codec/DefaultCodecs.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.codec
2 |
3 | import java.net.{InetAddress, InetSocketAddress}
4 |
5 | import io.iohk.scalanet.peergroup.InetMultiAddress
6 | import scodec.bits.{BitVector, ByteVector}
7 | import scodec.{Codec, DecodeResult}
8 | import scodec.codecs._
9 | import shapeless.Lazy
10 | import scodec.bits._
11 |
12 | /**
13 | *
14 | * Default encodings for different objects provided by scalanet,
15 | * using scodec specific codecs.
16 | *
17 | */
18 | object DefaultCodecs {
19 |
20 | val ipv4Pad = hex"00 00 00 00 00 00 00 00 00 00 FF FF"
21 |
22 | implicit val inetAddress = Codec[InetAddress](
23 | (ia: InetAddress) => {
24 | val bts = ByteVector(ia.getAddress)
25 | if (bts.length == 4) {
26 | bytes(16).encode(ipv4Pad ++ bts)
27 | } else {
28 | bytes(16).encode(bts)
29 | }
30 | },
31 | (buf: BitVector) =>
32 | bytes(16).decode(buf).map { b =>
33 | val bts = if (b.value.take(12) == ipv4Pad) {
34 | b.value.drop(12)
35 | } else {
36 | b.value
37 | }
38 | DecodeResult(InetAddress.getByAddress(bts.toArray), b.remainder)
39 | }
40 | )
41 |
42 | implicit val inetSocketAddress: Codec[InetSocketAddress] = {
43 | ("host" | Codec[InetAddress]) ::
44 | ("port" | uint16)
45 | }.as[(InetAddress, Int)]
46 | .xmap({ case (host, port) => new InetSocketAddress(host, port) }, isa => (isa.getAddress, isa.getPort))
47 |
48 | implicit val inetMultiAddressCodec: Codec[InetMultiAddress] = {
49 | ("inetSocketAddress" | Codec[InetSocketAddress])
50 | }.as[InetMultiAddress]
51 |
52 | implicit def seqCoded[A](implicit listCodec: Lazy[Codec[List[A]]]): Codec[Seq[A]] = {
53 | listCodec.value.xmap(l => l.toSeq, seq => seq.toList)
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/crypto/CryptoUtils.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.crypto
2 |
3 | import java.math.BigInteger
4 | import java.security._
5 | import java.security.cert.X509Certificate
6 | import java.security.spec.{ECGenParameterSpec, PKCS8EncodedKeySpec, X509EncodedKeySpec}
7 | import java.util.Date
8 | import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSExtension.Extension
9 | import org.bouncycastle.asn1.sec.SECNamedCurves
10 | import org.bouncycastle.asn1.x500.X500Name
11 | import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo
12 | import org.bouncycastle.asn1.x9.X9ECParameters
13 | import org.bouncycastle.cert.X509v3CertificateBuilder
14 | import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter
15 | import org.bouncycastle.crypto.AsymmetricCipherKeyPair
16 | import org.bouncycastle.crypto.generators.ECKeyPairGenerator
17 | import org.bouncycastle.crypto.params.{ECDomainParameters, ECKeyGenerationParameters}
18 | import org.bouncycastle.crypto.util.{PrivateKeyInfoFactory, SubjectPublicKeyInfoFactory}
19 | import org.bouncycastle.jce.interfaces.ECPublicKey
20 | import org.bouncycastle.jce.provider.BouncyCastleProvider
21 | import org.bouncycastle.jce.spec.{ECParameterSpec, ECPublicKeySpec}
22 | import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder
23 | import scodec.bits.BitVector
24 |
25 | import scala.util.Try
26 |
27 | private[scalanet] object CryptoUtils {
28 |
29 | /**
30 | * Elliptic Curve Groups(ECDHE) recommended by TLS 1.3
31 | */
32 | sealed abstract class SupportedCurves(val name: String)
33 | case object Secp256r1 extends SupportedCurves("secp256r1")
34 | case object Secp384r1 extends SupportedCurves("secp384r1")
35 | case object Secp521r1 extends SupportedCurves("secp521r1")
36 |
37 | private val curveName = "secp256k1"
38 |
39 | type SignatureBytes = Array[Byte]
40 |
41 | abstract class SignatureScheme(val name: String)
42 | case object SHA256withECDSA extends SignatureScheme("SHA256withECDSA")
43 |
44 | private val usedKeyScheme = "EC"
45 |
46 | private val PROVIDER = new BouncyCastleProvider()
47 |
48 | private val curveParams: X9ECParameters = SECNamedCurves.getByName(curveName)
49 |
50 | private val curve: ECDomainParameters =
51 | new ECDomainParameters(curveParams.getCurve, curveParams.getG, curveParams.getN, curveParams.getH)
52 |
53 | private def getEcdsaSignature: Signature = {
54 | Signature.getInstance(SHA256withECDSA.name, PROVIDER)
55 | }
56 |
57 | private def getEcKeyFactory: KeyFactory = {
58 | KeyFactory.getInstance(usedKeyScheme, PROVIDER)
59 | }
60 |
61 | def generateKeyPair(secureRandom: SecureRandom): AsymmetricCipherKeyPair = {
62 | val generator = new ECKeyPairGenerator
63 | generator.init(new ECKeyGenerationParameters(curve, secureRandom))
64 | generator.generateKeyPair()
65 | }
66 |
67 | def genEcKeyPair(secureRandom: SecureRandom, curveName: String): KeyPair = {
68 | val ecSpec = new ECGenParameterSpec(curveName)
69 | val g = KeyPairGenerator.getInstance("EC", PROVIDER)
70 | g.initialize(ecSpec, secureRandom)
71 | g.generateKeyPair();
72 | }
73 |
74 | def genTlsSupportedKeyPair(secureRandom: SecureRandom, curveName: SupportedCurves): KeyPair = {
75 | genEcKeyPair(secureRandom, curveName.name)
76 | }
77 |
78 | def signEcdsa(data: Array[Byte], privateKey: PrivateKey, secureRandom: SecureRandom): SignatureBytes = {
79 | val ecdsaSign = getEcdsaSignature
80 | ecdsaSign.initSign(privateKey, secureRandom)
81 | ecdsaSign.update(data);
82 | ecdsaSign.sign();
83 | }
84 |
85 | def verifyEcdsa(data: Array[Byte], signature: SignatureBytes, publicKey: java.security.PublicKey): Boolean =
86 | Try {
87 | val ecdsaVerify = getEcdsaSignature
88 | ecdsaVerify.initVerify(publicKey)
89 | ecdsaVerify.update(data)
90 | ecdsaVerify.verify(signature)
91 | }.fold(_ => false, result => result)
92 |
93 | def convertBcToJceKeyPair(bcKeyPair: AsymmetricCipherKeyPair): KeyPair = {
94 | val pkcs8Encoded = PrivateKeyInfoFactory.createPrivateKeyInfo(bcKeyPair.getPrivate).getEncoded()
95 | val pkcs8KeySpec = new PKCS8EncodedKeySpec(pkcs8Encoded)
96 | val spkiEncoded = SubjectPublicKeyInfoFactory.createSubjectPublicKeyInfo(bcKeyPair.getPublic).getEncoded()
97 | val spkiKeySpec = new X509EncodedKeySpec(spkiEncoded)
98 | val keyFac = getEcKeyFactory
99 | new KeyPair(keyFac.generatePublic(spkiKeySpec), keyFac.generatePrivate(pkcs8KeySpec))
100 | }
101 |
102 | def getSecp256k1KeyFromBytes(bytes: Array[Byte]): Try[PublicKey] = Try {
103 | val ecPoint = curve.getCurve.decodePoint(bytes)
104 | val spec = new ECParameterSpec(curveParams.getCurve, curveParams.getG, curveParams.getN)
105 | val pubKeySpec = new ECPublicKeySpec(ecPoint, spec)
106 | val keyFac = getEcKeyFactory
107 | keyFac.generatePublic(pubKeySpec)
108 | }
109 |
110 | def getBouncyCastlePubKey(bytes: Array[Byte], algorithm: String): Try[PublicKey] = Try {
111 | val spec = new X509EncodedKeySpec(bytes)
112 | val keyFac = KeyFactory.getInstance(algorithm, PROVIDER)
113 | keyFac.generatePublic(spec)
114 | }
115 |
116 | def getEcPublicKey(publicKey: PublicKey): Try[BitVector] = Try {
117 | BitVector(publicKey.asInstanceOf[ECPublicKey].getQ.getEncoded(false))
118 | }
119 |
120 | def buildCertificateWithExtensions(
121 | connectionKeyPair: KeyPair,
122 | random: SecureRandom,
123 | extensions: List[Extension],
124 | beforeDate: Date,
125 | afterDate: Date,
126 | signatureScheme: SignatureScheme
127 | ): X509Certificate = {
128 | val name = "scalanet-tls"
129 | val sn = new BigInteger(64, random)
130 | val owner = new X500Name("CN=" + name);
131 | val sub = SubjectPublicKeyInfo.getInstance(connectionKeyPair.getPublic.getEncoded)
132 | val certificateBuilder = new X509v3CertificateBuilder(owner, sn, beforeDate, afterDate, owner, sub)
133 |
134 | extensions.foreach { extension =>
135 | certificateBuilder.addExtension(extension.oid, extension.isCritical, extension.value)
136 | }
137 |
138 | val signer = new JcaContentSignerBuilder(signatureScheme.name).build(connectionKeyPair.getPrivate);
139 |
140 | val ca = certificateBuilder.build(signer)
141 |
142 | val cert = new JcaX509CertificateConverter().setProvider(PROVIDER).getCertificate(ca)
143 |
144 | cert
145 | }
146 | }
147 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/BufferConversionOps.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import java.nio.ByteBuffer
4 |
5 | trait BufferConversionOps {
6 |
7 | implicit class ByteBufferConversionOps(val byteBuffer: ByteBuffer) {
8 | def toArray: Array[Byte] = {
9 | if (byteBuffer.hasArray)
10 | byteBuffer.array
11 | else {
12 | (byteBuffer: java.nio.Buffer).position(0)
13 | val arr = new Array[Byte](byteBuffer.remaining())
14 | byteBuffer.get(arr)
15 | arr
16 | }
17 | }
18 | }
19 | }
20 |
21 | object BufferConversionOps extends BufferConversionOps
22 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/CloseableQueue.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import cats.implicits._
4 | import cats.effect.concurrent.{TryableDeferred, Deferred}
5 | import monix.catnap.ConcurrentQueue
6 | import monix.eval.Task
7 | import monix.execution.{BufferCapacity, ChannelType}
8 | import scala.util.{Left, Right}
9 |
10 | /** Wraps an underlying concurrent queue so that polling can return None when
11 | * the producer side is finished, or vice versa the producer can tell when
12 | * the consumer is no longer interested in receiving more values.
13 | *
14 | *
15 | * @param closed indicates whether the producer side has finished and whether
16 | * the messages already in the queue should or discarded (true) or consumed (false).
17 | * @param queue is the underlying message queue
18 | */
19 | class CloseableQueue[A](
20 | closed: TryableDeferred[Task, Boolean],
21 | queue: ConcurrentQueue[Task, A]
22 | ) {
23 | import CloseableQueue.Closed
24 |
25 | /** Fetch the next item from the queue, or None if the production has finished
26 | * and the queue has been emptied.
27 | */
28 | def next: Task[Option[A]] =
29 | closed.tryGet.flatMap {
30 | case Some(true) =>
31 | // `clear` must be called by the consumer side.
32 | queue.clear.as(None)
33 |
34 | case Some(false) =>
35 | queue.tryPoll
36 |
37 | case None =>
38 | Task.race(closed.get, queue.poll).flatMap {
39 | case Left(_) =>
40 | next
41 | case Right(item) =>
42 | Task.pure(Some(item))
43 | }
44 | }
45 |
46 | /** Stop accepting items in the queue. Clear items if `discard` is true, otherwise let them be drained.
47 | * If the queue is already closed it does nothing; this is because either the producer or the consumer
48 | * could have closed the queue before.
49 | */
50 | def close(discard: Boolean): Task[Unit] =
51 | closed.complete(discard).attempt.void
52 |
53 | /** Close the queue and discard any remaining items in it. */
54 | def closeAndDiscard: Task[Unit] = close(discard = true)
55 |
56 | /** Close the queue but allow the consumer to pull the remaining items from it. */
57 | def closeAndKeep: Task[Unit] = close(discard = false)
58 |
59 | /** Try to put a new item in the queue, unless the capactiy has been reached or the queue has been closed. */
60 | def tryOffer(item: A): Task[Either[Closed, Boolean]] =
61 | // We could drop the oldest item if the queue is full, rather than drop the latest,
62 | // but the capacity should be set so it only prevents DoS attacks, so it shouldn't
63 | // be that crucial to serve clients who overproduce.
64 | unlessClosed(queue.tryOffer(item))
65 |
66 | /** Try to put a new item in the queue unless the queue has already been closed. Waits if the capacity has been reached. */
67 | def offer(item: A): Task[Either[Closed, Unit]] =
68 | unlessClosed {
69 | Task.race(closed.get, queue.offer(item)).map(_.leftMap(_ => Closed))
70 | }.map(_.joinRight)
71 |
72 | private def unlessClosed[T](task: Task[T]): Task[Either[Closed, T]] =
73 | closed.tryGet
74 | .map(_.isDefined)
75 | .ifM(
76 | Task.pure(Left(Closed)),
77 | task.map(Right(_))
78 | )
79 | }
80 |
81 | object CloseableQueue {
82 |
83 | /** Indicate that the queue was closed. */
84 | object Closed
85 | type Closed = Closed.type
86 |
87 | /** Create a queue with a given capacity; 0 or negative means unbounded. */
88 | def apply[A](capacity: Int, channelType: ChannelType) = {
89 | val buffer = capacity match {
90 | case i if i <= 0 => BufferCapacity.Unbounded()
91 | // Capacity is approximate and a power of 2, min value 2.
92 | case i => BufferCapacity.Bounded(math.max(2, i))
93 | }
94 | for {
95 | closed <- Deferred.tryable[Task, Boolean]
96 | queue <- ConcurrentQueue.withConfig[Task, A](buffer, channelType)
97 | } yield new CloseableQueue[A](closed, queue)
98 | }
99 |
100 | def unbounded[A](channelType: ChannelType = ChannelType.MPMC) =
101 | apply[A](capacity = 0, channelType)
102 | }
103 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/ControlEvent.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import scala.util.control.NoStackTrace
4 |
5 | sealed trait ControlEvent
6 |
7 | object ControlEvent {
8 |
9 | case object Initialized
10 |
11 | case class InitializationError(message: String, cause: Throwable)
12 | extends RuntimeException(message, cause)
13 | with NoStackTrace
14 | }
15 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/ExternalAddressResolver.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import java.io.{BufferedReader, InputStreamReader}
4 | import java.net.{InetAddress, URL}
5 |
6 | import monix.eval.Task
7 | import scala.util.control.NonFatal
8 |
9 | /** Resolve the external address based on a list of URLs that each return the IP of the caller. */
10 | class ExternalAddressResolver(urls: List[String]) {
11 | def resolve: Task[Option[InetAddress]] =
12 | ExternalAddressResolver.checkUrls(urls)
13 | }
14 |
15 | object ExternalAddressResolver {
16 | val default = new ExternalAddressResolver(List("http://checkip.amazonaws.com", "http://bot.whatismyipaddress.com"))
17 |
18 | /** Retrieve the external address from a URL that returns a single line containing the IP. */
19 | def checkUrl(url: String): Task[InetAddress] = Task.async { cb =>
20 | try {
21 | val ipCheckUrl = new URL(url)
22 | val in: BufferedReader = new BufferedReader(new InputStreamReader(ipCheckUrl.openStream()))
23 | cb.onSuccess(InetAddress.getByName(in.readLine()))
24 | } catch {
25 | case NonFatal(ex) => cb.onError(ex)
26 | }
27 | }
28 |
29 | /** Try multiple URLs until an IP address is found. */
30 | def checkUrls(urls: List[String]): Task[Option[InetAddress]] = {
31 | if (urls.isEmpty) {
32 | Task.now(None)
33 | } else {
34 | checkUrl(urls.head).attempt.flatMap {
35 | case Left(_) =>
36 | checkUrls(urls.tail)
37 | case Right(value) =>
38 | Task.now(Some(value))
39 | }
40 | }
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/InetAddressOps.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import com.github.jgonian.ipmath.{Ipv6Range, Ipv4Range, Ipv4, Ipv6}
4 | import java.net.{InetAddress, Inet4Address, Inet6Address}
5 | import scala.language.implicitConversions
6 | import com.github.jgonian.ipmath.AbstractIp
7 |
8 | class InetAddressOps(val address: InetAddress) extends AnyVal {
9 | import InetAddressOps._
10 |
11 | def isIPv4: Boolean =
12 | address.isInstanceOf[Inet4Address]
13 |
14 | def isIPv6: Boolean =
15 | address.isInstanceOf[Inet6Address]
16 |
17 | def isSpecial: Boolean =
18 | address.isMulticastAddress || isIPv4 && isInRange4(special4) || isIPv6 && isInRange6(special6)
19 |
20 | def isLAN: Boolean =
21 | address.isLoopbackAddress || isIPv4 && isInRange4(lan4) || isIPv6 && isInRange6(lan6)
22 |
23 | def isUnspecified: Boolean =
24 | address == unspecified4 || address == unspecified6
25 |
26 | private def isInRange4(infos: List[Ipv4Range]): Boolean = {
27 | val ip = toIpv4
28 | infos.exists(_.contains(ip))
29 | }
30 |
31 | private def isInRange6(infos: List[Ipv6Range]): Boolean = {
32 | val ip = toIpv6
33 | infos.exists(_.contains(ip))
34 | }
35 |
36 | private def toIpv4 =
37 | Ipv4.of(address.getHostAddress)
38 |
39 | private def toIpv6 =
40 | Ipv6.of(address.getHostAddress)
41 |
42 | private def toAbstractIp: AbstractIp[_, _] =
43 | if (isIPv4) toIpv4 else toIpv6
44 |
45 | private def toInetAddress(ip: AbstractIp[_, _]) =
46 | InetAddress.getByName(ip.toString)
47 |
48 | /** Truncate the IP address to the first `prefixLength` bits. */
49 | def truncate(prefixLength: Int): InetAddress =
50 | toInetAddress(toAbstractIp.lowerBoundForPrefix(prefixLength))
51 | }
52 |
53 | object InetAddressOps {
54 | implicit def toInetAddressOps(address: InetAddress): InetAddressOps =
55 | new InetAddressOps(address)
56 |
57 | // https://tools.ietf.org/html/rfc5735.html
58 | private val unspecified4 = InetAddress.getByName("0.0.0.0")
59 |
60 | // https://tools.ietf.org/html/rfc2373.html
61 | private val unspecified6 = InetAddress.getByName("0:0:0:0:0:0:0:0")
62 |
63 | // https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
64 | private val special4 =
65 | List(
66 | "100.64.0.0/10", // Shared Address Space
67 | "169.254.0.0/16", // Link Local
68 | "192.0.0.0/24", // [2] IETF Protocol Assignments
69 | "192.0.0.0/29", // IPv4 Service Continuity Prefix
70 | "192.0.0.8/32", // IPv4 dummy address
71 | "192.0.0.9/32", // Port Control Protocol Anycast
72 | "192.0.0.10/32", // Traversal Using Relays around NAT Anycast
73 | "192.0.0.170/32", // NAT64/DNS64 Discovery
74 | "192.0.0.171/32", // NAT64/DNS64 Discovery
75 | "192.0.2.0/24", // Documentation (TEST-NET-1)
76 | "192.31.196.0/24", // AS112-v4
77 | "192.52.193.0/24", // AMT
78 | "192.88.99.0/24", // Deprecated (6to4 Relay Anycast)
79 | "192.175.48.0/24", // Direct Delegation AS112 Service
80 | "198.18.0.0/15", // Benchmarking
81 | "198.51.100.0/24", // Documentation (TEST-NET-2)
82 | "203.0.113.0/24", // Documentation (TEST-NET-3)
83 | "240.0.0.0/4", // Reserved
84 | "255.255.255.255/32" // Limited Broadcast
85 | ).map(Ipv4Range.parse(_))
86 |
87 | // https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
88 | private val special6 =
89 | List(
90 | "100::/64",
91 | "2001::/32",
92 | "2001:1::1/128",
93 | "2001:2::/48",
94 | "2001:3::/32",
95 | "2001:4:112::/48",
96 | "2001:5::/32",
97 | "2001:10::/28",
98 | "2001:20::/28",
99 | "2001:db8::/32",
100 | "2002::/16"
101 | ).map(Ipv6Range.parse(_))
102 |
103 | private val lan4 =
104 | List(
105 | "0.0.0.0/8", // "This host on this network"
106 | "10.0.0.0/8", // Private-Use
107 | "172.16.0.0/12", // Private-Use
108 | "192.168.0.0/16" // Private-Use
109 | ).map(Ipv4Range.parse(_))
110 |
111 | private val lan6 =
112 | List(
113 | "fe80::/10", // Link-Local
114 | "fc00::/7" // Unique-Local
115 | ).map(Ipv6Range.parse(_))
116 | }
117 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/InetMultiAddress.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import java.net.{InetAddress, InetSocketAddress}
4 |
5 | trait Addressable[A] {
6 | def getAddress(a: A): InetSocketAddress
7 | }
8 |
9 | object Addressable {
10 | def apply[A](implicit sh: Addressable[A]): Addressable[A] = sh
11 |
12 | implicit val `Addressable[InetSocketAddress]` : Addressable[InetSocketAddress] = new Addressable[InetSocketAddress] {
13 | override def getAddress(a: InetSocketAddress): InetSocketAddress = a
14 | }
15 | }
16 |
17 | /**
18 | * TCP and UDP (and other socket-based protocols) have a problem where addressing and multiplexing are coupled.
19 | * This means that, in TCP world a single node can have multiple addresses. Even though port numbers are used
20 | * to support multiplexing, those port numbers leak into the address space.
21 | *
22 | * This leads to a tricky problem. On the one hand, a client cannot obtain a connection to another node without
23 | * specifying a port number. On the other, if a server receives two inbound connections from a client, it will
24 | * read two separate addresses as the remote address from the client (e.g. client:60441, client:60442), even
25 | * though both requests are from the same node.
26 | *
27 | * This class provides a solution to the problem. Firstly, for clients, it wraps an InetSocketAddress
28 | * (i.e. a host:port combo) so that clients can specify the port number this way. Secondly, for servers
29 | * it overrides equals/hashcode to ignore the port number. Therefore, if the server compares the addresses
30 | * of two connections from the same node (for example in using them as map keys), it will correctly determine that
31 | * they are from the same node.
32 | *
33 | * @param inetSocketAddress a host:port combo address.
34 | */
35 | case class InetMultiAddress(inetSocketAddress: InetSocketAddress) {
36 | private val inetAddress: InetAddress = inetSocketAddress.getAddress
37 |
38 | def canEqual(other: Any): Boolean = other.isInstanceOf[InetMultiAddress]
39 |
40 | override def equals(other: Any): Boolean = other match {
41 | case that: InetMultiAddress =>
42 | (that canEqual this) &&
43 | inetAddress == that.inetAddress
44 | case _ => false
45 | }
46 |
47 | override def hashCode(): Int = {
48 | val state = Seq(inetAddress)
49 | state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
50 | }
51 |
52 | override def toString = inetSocketAddress.toString
53 | }
54 |
55 | object InetMultiAddress {
56 | implicit val addressableInetMultiAddressInst = new Addressable[InetMultiAddress] {
57 | override def getAddress(a: InetMultiAddress): InetSocketAddress = a.inetSocketAddress
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/NettyFutureUtils.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import io.netty
4 | import io.netty.util.concurrent.{Future, GenericFutureListener}
5 | import monix.eval.Task
6 |
7 | import java.util.concurrent.CancellationException
8 |
9 | private[scalanet] object NettyFutureUtils {
10 | def toTask(f: => netty.util.concurrent.Future[_]): Task[Unit] = {
11 | fromNettyFuture(Task.delay(f)).void
12 | }
13 |
14 | def fromNettyFuture[A](ff: Task[netty.util.concurrent.Future[A]]): Task[A] = {
15 | ff.flatMap { nettyFuture =>
16 | Task.cancelable { cb =>
17 | subscribeToFuture(nettyFuture, cb)
18 | Task.delay({ nettyFuture.cancel(true); () })
19 | }
20 | }
21 | }
22 |
23 | private def subscribeToFuture[A](cf: netty.util.concurrent.Future[A], cb: Either[Throwable, A] => Unit): Unit = {
24 | cf.addListener(new GenericFutureListener[Future[A]] {
25 | override def operationComplete(future: Future[A]): Unit = {
26 | if (future.isSuccess) {
27 | cb(Right(future.getNow))
28 | } else {
29 | future.cause() match {
30 | case _: CancellationException =>
31 | ()
32 | case ex => cb(Left(ex))
33 | }
34 | }
35 | }
36 | })
37 | ()
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/dynamictls/ChannelAwareQueue.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup.dynamictls
2 |
3 | import io.iohk.scalanet.peergroup.CloseableQueue
4 | import io.iohk.scalanet.peergroup.CloseableQueue.Closed
5 | import io.netty.channel.ChannelConfig
6 | import monix.eval.Task
7 | import monix.execution.ChannelType
8 |
9 | import java.util.concurrent.atomic.AtomicLong
10 |
11 | /**
12 | * Wraps an underlying unbounded CloseableQueue queue and bounds it based on netty auto-read feature.
13 | * While auto-read is disabled received messages start to accumulate in underlying os RCV tcp buffer. When RCV buffer is full
14 | * sender SND buffer will start to buffer un-sent bytes. When sender SND buffer is full, the default behaviour is that
15 | * write(xxx) will block. In our case, sendMessage Task will not finish until there will be place in SND buffer
16 | *
17 | *
18 | * WARNING: Actual limit may sometimes go higher, as each netty read can return more than one message.
19 | *
20 | * @param limit how many items can accumulate in the queue
21 | * @param queue is the underlying closeable message queue
22 | */
23 | private[scalanet] final class ChannelAwareQueue[M] private (
24 | limit: Int,
25 | queue: CloseableQueue[M],
26 | channelConfig: ChannelConfig
27 | ) {
28 | private val queueSize = new AtomicLong(0)
29 |
30 | private val lowerBound: Int = Math.max(1, limit / 2)
31 |
32 | def size: Long = queueSize.get()
33 |
34 | def offer(a: M): Task[Either[Closed, Unit]] = {
35 | Task(enableBackPressureIfNecessary()) >> queue.offer(a)
36 | }
37 |
38 | def next: Task[Option[M]] = {
39 | queue.next.map {
40 | case Some(value) =>
41 | disableBackPressureIfNecessary()
42 | Some(value)
43 | case None =>
44 | None
45 | }
46 | }
47 |
48 | def close(discard: Boolean): Task[Unit] = queue.close(discard)
49 |
50 | private def enableBackPressureIfNecessary(): Unit =
51 | if (queueSize.incrementAndGet() >= limit && channelConfig.isAutoRead) {
52 | channelConfig.setAutoRead(false)
53 | ()
54 | }
55 |
56 | private def disableBackPressureIfNecessary(): Unit =
57 | if (queueSize.decrementAndGet() <= lowerBound && !channelConfig.isAutoRead) {
58 | channelConfig.setAutoRead(true)
59 | ()
60 | }
61 | }
62 |
63 | object ChannelAwareQueue {
64 | def apply[M](limit: Int, channelType: ChannelType, channelConfig: ChannelConfig): Task[ChannelAwareQueue[M]] = {
65 | CloseableQueue.unbounded[M](channelType).map(queue => new ChannelAwareQueue[M](limit, queue, channelConfig))
66 | }
67 |
68 | }
69 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/dynamictls/CustomHandlers.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup.dynamictls
2 |
3 | import com.github.benmanes.caffeine.cache.Caffeine
4 | import io.netty.channel.ChannelHandler.Sharable
5 | import io.netty.channel.ChannelHandlerContext
6 | import io.netty.handler.ipfilter.AbstractRemoteAddressFilter
7 |
8 | import java.net.{InetAddress, InetSocketAddress}
9 |
10 | private[scalanet] object CustomHandlers {
11 |
12 | /**
13 | *
14 | * Custom handler which keeps recent history of incoming connections. If it receive new connection from the ip address
15 | * which is still in history, it rejects it as it means the remote caller tries to often.
16 | *
17 | * Handler needs to be thread safe as it is shared between several netty pipelines
18 | *
19 | * To share handlers between pipelines they need to be marked as @Sharable, if not netty refuses to share it.
20 | */
21 | @Sharable
22 | class ThrottlingIpFilter(config: DynamicTLSPeerGroup.IncomingConnectionThrottlingConfig)
23 | extends AbstractRemoteAddressFilter[InetSocketAddress] {
24 |
25 | private val cacheView = Caffeine
26 | .newBuilder()
27 | .expireAfterWrite(config.throttlingDuration.length, config.throttlingDuration.unit)
28 | .build[InetAddress, java.lang.Boolean]()
29 | .asMap()
30 |
31 | private def isQuotaAvailable(address: InetAddress): Boolean = {
32 | cacheView.putIfAbsent(address, java.lang.Boolean.TRUE) == null
33 | }
34 |
35 | override def accept(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): Boolean = {
36 | val address = remoteAddress.getAddress
37 | val localNoThrottle = (address.isLoopbackAddress && !config.throttleLocalhost)
38 |
39 | localNoThrottle || isQuotaAvailable(address)
40 | }
41 | }
42 |
43 | }
44 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/dynamictls/DynamicTLSPeerGroupUtils.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup.dynamictls
2 |
3 | import java.net.Socket
4 | import java.security.KeyStore
5 | import java.security.cert.X509Certificate
6 | import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSPeerGroup.PeerInfo
7 | import io.netty.handler.ssl.util.SimpleTrustManagerFactory
8 | import io.netty.handler.ssl.{ClientAuth, SslContext, SslContextBuilder, SslProvider}
9 |
10 | import javax.net.ssl._
11 | import scodec.bits.BitVector
12 |
13 | private[scalanet] object DynamicTLSPeerGroupUtils {
14 | // key for peerId passed in Handshake session, used in sslEngine
15 | val peerIdKey = "peerId"
16 |
17 | /**
18 | *
19 | * Custom manager which is used by netty ssl to accept or reject peer certificates.
20 | *
21 | * Extended version is needed to have access to SslEngine to, to pass client id to other parts of the system
22 | * via getSSLParameters
23 | *
24 | * Methods without SslEngine argument are left with `???` to make sure that if there would arise case that they would
25 | * be called, then exception will be thrown instead of just trusting external peer without validations.
26 | *
27 | */
28 | class DynamicTlsTrustManager(info: Option[BitVector]) extends X509ExtendedTrustManager {
29 | override def checkClientTrusted(x509Certificates: Array[X509Certificate], s: String): Unit = ???
30 |
31 | override def checkServerTrusted(x509Certificates: Array[X509Certificate], s: String): Unit = ???
32 |
33 | override def getAcceptedIssuers: Array[X509Certificate] = {
34 | new Array[X509Certificate](0)
35 | }
36 |
37 | override def checkClientTrusted(x509Certificates: Array[X509Certificate], s: String, socket: Socket): Unit = ???
38 |
39 | override def checkClientTrusted(x509Certificates: Array[X509Certificate], s: String, sslEngine: SSLEngine): Unit = {
40 | CustomTlsValidator.validateCertificates(x509Certificates, info) match {
41 | case Left(er) => throw er
42 | case Right(value) =>
43 | val id = value.publicKey.getNodeId
44 | sslEngine.getHandshakeSession.putValue(peerIdKey, id)
45 | }
46 | }
47 |
48 | override def checkServerTrusted(x509Certificates: Array[X509Certificate], s: String, socket: Socket): Unit = ???
49 |
50 | override def checkServerTrusted(x509Certificates: Array[X509Certificate], s: String, sslEngine: SSLEngine): Unit = {
51 | CustomTlsValidator.validateCertificates(x509Certificates, info) match {
52 | case Left(er) => throw er
53 | case Right(_) => ()
54 | }
55 | }
56 | }
57 |
58 | class CustomTrustManagerFactory(info: Option[BitVector]) extends SimpleTrustManagerFactory {
59 |
60 | private val tm = new DynamicTlsTrustManager(info)
61 |
62 | override def engineGetTrustManagers(): Array[TrustManager] = {
63 | Array[TrustManager] { tm }
64 | }
65 |
66 | override def engineInit(keyStore: KeyStore): Unit = {}
67 |
68 | override def engineInit(managerFactoryParameters: ManagerFactoryParameters): Unit = {}
69 | }
70 |
71 | sealed trait SSLContextFor
72 | case object SSLContextForServer extends SSLContextFor
73 | case class SSLContextForClient(to: PeerInfo) extends SSLContextFor
74 |
75 | def buildCustomSSlContext(f: SSLContextFor, config: DynamicTLSPeerGroup.Config): SslContext = {
76 | val sslProvider = if (config.useNativeTlsImplementation) SslProvider.OPENSSL else SslProvider.JDK
77 |
78 | f match {
79 | case SSLContextForServer =>
80 | SslContextBuilder
81 | .forServer(config.connectionKeyPair.getPrivate, List(config.connectionCertificate): _*)
82 | .trustManager(new CustomTrustManagerFactory(None))
83 | .sslProvider(sslProvider)
84 | .clientAuth(ClientAuth.REQUIRE)
85 | .protocols("TLSv1.3")
86 | .build()
87 |
88 | case SSLContextForClient(info) =>
89 | SslContextBuilder
90 | .forClient()
91 | .keyManager(config.connectionKeyPair.getPrivate, List(config.connectionCertificate): _*)
92 | .trustManager(new CustomTrustManagerFactory(Some(info.id)))
93 | .sslProvider(sslProvider)
94 | .protocols("TLSv1.3")
95 | .build()
96 | }
97 | }
98 |
99 | }
100 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/implicits.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import cats.effect.concurrent.Deferred
4 | import monix.eval.Task
5 | import monix.tail.Iterant
6 | import monix.reactive.Observable
7 | import io.iohk.scalanet.peergroup.PeerGroup.ServerEvent
8 | import io.iohk.scalanet.peergroup.Channel.ChannelEvent
9 |
10 | package object implicits {
11 | // Functions to be applied on the `.nextChannelEvent()` or `.nextServerEvent()` results.
12 | implicit class NextOps[A](val next: Task[Option[A]]) extends AnyVal {
13 | def toIterant: Iterant[Task, A] =
14 | Iterant.repeatEvalF(next).takeWhile(_.isDefined).map(_.get)
15 |
16 | def toObservable: Observable[A] =
17 | Observable.repeatEvalF(next).takeWhile(_.isDefined).map(_.get)
18 |
19 | def withCancelToken(token: Deferred[Task, Unit]): Task[Option[A]] =
20 | Task.race(token.get, next).map {
21 | case Left(()) => None
22 | case Right(x) => x
23 | }
24 | }
25 |
26 | implicit class PeerGroupOps[A, M](val group: PeerGroup[A, M]) extends AnyVal {
27 | def serverEventObservable: Observable[ServerEvent[A, M]] =
28 | group.nextServerEvent.toObservable
29 | }
30 |
31 | implicit class ChannelOps[A, M](val channel: Channel[A, M]) extends AnyVal {
32 | // NB: Not making an equivalent version for Iterant because it doesn't support timeout
33 | // directly; instead, use `next().timeout(5.second).toIterant`
34 | def channelEventObservable: Observable[ChannelEvent[M]] =
35 | channel.nextChannelEvent.toObservable
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/scalanet/src/io/iohk/scalanet/peergroup/package.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet
2 |
3 | import monix.eval.Task
4 |
5 | package object peergroup {
6 | // Task that closes a PeerGroup or Channel.
7 | type Release = Task[Unit]
8 | }
9 |
--------------------------------------------------------------------------------
/scalanet/ut/resources/alice.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIB4zCCAYagAwIBAgIEXU5pKzAMBggqhkjOPQQDAgUAME0xDjAMBgNVBAMTBWNm
3 | LWNhMQ4wDAYDVQQLEwVBdGFsYTENMAsGA1UEChMESU9ISzEPMA0GA1UEBxMGTG9u
4 | ZG9uMQswCQYDVQQGEwJVSzAeFw0xOTA1MjgyMjIzNTFaFw0yMDA1MjcyMjIzNTFa
5 | MFAxETAPBgNVBAMTCGNmLWFsaWNlMQ4wDAYDVQQLEwVBdGFsYTENMAsGA1UEChME
6 | SU9ISzEPMA0GA1UEBxMGTG9uZG9uMQswCQYDVQQGEwJVSzBZMBMGByqGSM49AgEG
7 | CCqGSM49AwEHA0IABNHEw9OxLSU6cY5yPeotascgqTDhKLBldmh3wlxxbYscf8P7
8 | JKNHwu+4EQh9cn+1bbZVjGSqhVyI4LB0IJk/WdyjTzBNMB8GA1UdIwQYMBaAFGqb
9 | 3r8iTSElyv9H81uQJ5yj/+EkMAsGA1UdDwQEAwIHgDAdBgNVHQ4EFgQUf2NSWigq
10 | K3UvZrXoGANOt3Nn5wAwDAYIKoZIzj0EAwIFAANJADBGAiEA5sJMM/VTHe3rzZn2
11 | IqD0fYAzywhnXJIjrYlu4FQZX5gCIQCn4u+wG8jdG2Yay1HntvMkUBvIBdZeknGo
12 | dq/MN3YdxQ==
13 | -----END CERTIFICATE-----
14 | -----BEGIN CERTIFICATE-----
15 | MIIB5TCCAYmgAwIBAgIEZO2nbjAMBggqhkjOPQQDAgUAME8xEDAOBgNVBAMTB2Nm
16 | LXJvb3QxDjAMBgNVBAsTBUF0YWxhMQ0wCwYDVQQKEwRJT0hLMQ8wDQYDVQQHEwZM
17 | b25kb24xCzAJBgNVBAYTAlVLMB4XDTE5MDUyODIyMjM1MFoXDTIwMDUyNzIyMjM1
18 | MFowTTEOMAwGA1UEAxMFY2YtY2ExDjAMBgNVBAsTBUF0YWxhMQ0wCwYDVQQKEwRJ
19 | T0hLMQ8wDQYDVQQHEwZMb25kb24xCzAJBgNVBAYTAlVLMFkwEwYHKoZIzj0CAQYI
20 | KoZIzj0DAQcDQgAEZJmzU7njOmu+dW6jQsbMevlBEkxg2B+Jz7i2zS4Kcg5OxwMc
21 | nEe3JJ6Z1G4SVnK1+GvZACsipI2nlvE3tBUR/6NTMFEwHwYDVR0jBBgwFoAUJzaj
22 | Yji//4A+7ZIibyHEcBFb348wDwYDVR0TBAgwBgEB/wIBADAdBgNVHQ4EFgQUapve
23 | vyJNISXK/0fzW5AnnKP/4SQwDAYIKoZIzj0EAwIFAANIADBFAiEAvIDbMPGiVnO6
24 | pIzO90dElY1iVwMKqYAKtRvO6elH4uICICIlVEEmxf6eu3CsxcV0e4aeiAbeJryp
25 | FsnrGw3xgFcX
26 | -----END CERTIFICATE-----
27 |
--------------------------------------------------------------------------------
/scalanet/ut/resources/bob.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIB4DCCAYSgAwIBAgIEEosjCjAMBggqhkjOPQQDAgUAME0xDjAMBgNVBAMTBWNm
3 | LWNhMQ4wDAYDVQQLEwVBdGFsYTENMAsGA1UEChMESU9ISzEPMA0GA1UEBxMGTG9u
4 | ZG9uMQswCQYDVQQGEwJVSzAeFw0xOTA1MjgyMjIzNTZaFw0yMDA1MjcyMjIzNTZa
5 | ME4xDzANBgNVBAMTBmNmLWJvYjEOMAwGA1UECxMFQXRhbGExDTALBgNVBAoTBElP
6 | SEsxDzANBgNVBAcTBkxvbmRvbjELMAkGA1UEBhMCVUswWTATBgcqhkjOPQIBBggq
7 | hkjOPQMBBwNCAASISfvZevGooKUYwQdydJB2I6DvAJ7uLhodSdGWIN5XDa1Ltjkn
8 | xKRu/K6yq+hxBXGsH9llOh7gZuQUyY6uji4Wo08wTTAfBgNVHSMEGDAWgBRqm96/
9 | Ik0hJcr/R/NbkCeco//hJDALBgNVHQ8EBAMCB4AwHQYDVR0OBBYEFLv30J3pRLYO
10 | 0bMMZvU28AYqBpjqMAwGCCqGSM49BAMCBQADSAAwRQIhAKDzIxqZUfu6+K9/z21b
11 | +ox+VgFRywNXNh3tiJxh288eAiBJlw/uuwopGPB5CXpXm8sMRgp59nncVksPvDb9
12 | zM6hqg==
13 | -----END CERTIFICATE-----
14 | -----BEGIN CERTIFICATE-----
15 | MIIB5TCCAYmgAwIBAgIEZO2nbjAMBggqhkjOPQQDAgUAME8xEDAOBgNVBAMTB2Nm
16 | LXJvb3QxDjAMBgNVBAsTBUF0YWxhMQ0wCwYDVQQKEwRJT0hLMQ8wDQYDVQQHEwZM
17 | b25kb24xCzAJBgNVBAYTAlVLMB4XDTE5MDUyODIyMjM1MFoXDTIwMDUyNzIyMjM1
18 | MFowTTEOMAwGA1UEAxMFY2YtY2ExDjAMBgNVBAsTBUF0YWxhMQ0wCwYDVQQKEwRJ
19 | T0hLMQ8wDQYDVQQHEwZMb25kb24xCzAJBgNVBAYTAlVLMFkwEwYHKoZIzj0CAQYI
20 | KoZIzj0DAQcDQgAEZJmzU7njOmu+dW6jQsbMevlBEkxg2B+Jz7i2zS4Kcg5OxwMc
21 | nEe3JJ6Z1G4SVnK1+GvZACsipI2nlvE3tBUR/6NTMFEwHwYDVR0jBBgwFoAUJzaj
22 | Yji//4A+7ZIibyHEcBFb348wDwYDVR0TBAgwBgEB/wIBADAdBgNVHQ4EFgQUapve
23 | vyJNISXK/0fzW5AnnKP/4SQwDAYIKoZIzj0EAwIFAANIADBFAiEAvIDbMPGiVnO6
24 | pIzO90dElY1iVwMKqYAKtRvO6elH4uICICIlVEEmxf6eu3CsxcV0e4aeiAbeJryp
25 | FsnrGw3xgFcX
26 | -----END CERTIFICATE-----
27 |
--------------------------------------------------------------------------------
/scalanet/ut/resources/create-keystore.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash +x
2 |
3 | KEY_STORE=keystore
4 | KEY_STORE_PWD=password
5 | VALIDITY=365
6 |
7 | TRUST_STORE=truststore
8 | TRUST_STORE_PWD=password
9 |
10 | rm -f ${KEY_STORE}.p12 ${KEY_STORE}.jks ${TRUST_STORE}.jks ${TRUST_STORE}.p12
11 |
12 |
13 | echo "creating root key and certificate..."
14 | keytool -genkeypair -alias root -keyalg EC -dname 'C=UK,L=London,O=IOHK,OU=Atala,CN=cf-root' \
15 | -ext BC=ca:true -validity ${VALIDITY} -keypass ${TRUST_STORE_PWD} -keystore ${TRUST_STORE}.jks -storepass ${TRUST_STORE_PWD}
16 |
17 | echo "creating CA key and certificate..."
18 | keytool -genkeypair -alias ca -keyalg EC -dname 'C=UK,L=London,O=IOHK,OU=Atala,CN=cf-ca' \
19 | -ext BC=ca:true -validity ${VALIDITY} -keypass ${TRUST_STORE_PWD} -keystore ${TRUST_STORE}.jks -storepass ${TRUST_STORE_PWD}
20 | keytool -keystore ${TRUST_STORE}.jks -storepass ${TRUST_STORE_PWD} -certreq -alias ca | \
21 | keytool -keystore ${TRUST_STORE}.jks -storepass ${TRUST_STORE_PWD} -alias root -gencert -validity ${VALIDITY} -ext BC=0 -rfc | \
22 | keytool -alias ca -importcert -keystore ${TRUST_STORE}.jks -storepass ${TRUST_STORE_PWD}
23 |
24 | echo "creating alice's key and certificate..."
25 | keytool -genkeypair -alias alice -keyalg EC -dname 'C=UK,L=London,O=IOHK,OU=Atala,CN=cf-alice' \
26 | -validity ${VALIDITY} -keypass ${KEY_STORE_PWD} -keystore ${KEY_STORE}.jks -storepass ${KEY_STORE_PWD}
27 | keytool -keystore ${KEY_STORE}.jks -storepass ${KEY_STORE_PWD} -certreq -alias alice | \
28 | keytool -keystore ${TRUST_STORE}.jks -storepass ${TRUST_STORE_PWD} -alias ca -gencert -ext KU=dig -validity ${VALIDITY} -rfc > alice.pem
29 | keytool -alias alice -importcert -keystore ${KEY_STORE}.jks -storepass ${KEY_STORE_PWD} -trustcacerts -file alice.pem
30 |
31 | echo "creating bob's key and certificate..."
32 | keytool -genkeypair -alias bob -keyalg EC -dname 'C=UK,L=London,O=IOHK,OU=Atala,CN=cf-bob' \
33 | -validity ${VALIDITY} -keypass ${KEY_STORE_PWD} -keystore ${KEY_STORE}.jks -storepass ${KEY_STORE_PWD}
34 | keytool -keystore ${KEY_STORE}.jks -storepass ${KEY_STORE_PWD} -certreq -alias bob | \
35 | keytool -keystore ${TRUST_STORE}.jks -storepass ${TRUST_STORE_PWD} -alias ca -gencert -ext KU=dig -validity ${VALIDITY} -rfc > bob.pem
36 | keytool -alias bob -importcert -keystore ${KEY_STORE}.jks -storepass ${KEY_STORE_PWD} -trustcacerts -file bob.pem
37 |
38 | echo "exporting keys into PKCS#12 format"
39 | keytool -v -importkeystore -srckeystore ${KEY_STORE}.jks -srcstorepass ${KEY_STORE_PWD} \
40 | -destkeystore ${KEY_STORE}.p12 -deststorepass ${KEY_STORE_PWD} -deststoretype PKCS12
41 | keytool -v -importkeystore -srckeystore ${TRUST_STORE}.jks -srcstorepass ${TRUST_STORE_PWD} \
42 | -destkeystore ${TRUST_STORE}.p12 -deststorepass ${TRUST_STORE_PWD} -deststoretype PKCS12
43 |
44 |
--------------------------------------------------------------------------------
/scalanet/ut/resources/keystore.jks:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/scalanet/fce50a156a5be213093cc8dec65cc6d6e9af5a6e/scalanet/ut/resources/keystore.jks
--------------------------------------------------------------------------------
/scalanet/ut/resources/keystore.p12:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/scalanet/fce50a156a5be213093cc8dec65cc6d6e9af5a6e/scalanet/ut/resources/keystore.p12
--------------------------------------------------------------------------------
/scalanet/ut/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %t %0logger %-5level %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/scalanet/ut/resources/truststore.jks:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/scalanet/fce50a156a5be213093cc8dec65cc6d6e9af5a6e/scalanet/ut/resources/truststore.jks
--------------------------------------------------------------------------------
/scalanet/ut/resources/truststore.p12:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/input-output-hk/scalanet/fce50a156a5be213093cc8dec65cc6d6e9af5a6e/scalanet/ut/resources/truststore.p12
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/NetUtils.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet
2 |
3 | import java.net._
4 | import java.nio.ByteBuffer
5 | import java.security.KeyStore
6 | import java.security.cert.Certificate
7 | import scala.util.Random
8 | import scala.annotation.nowarn
9 |
10 | object NetUtils {
11 |
12 | val keyStore: KeyStore = loadKeyStore("keystore.p12", "password")
13 | val trustStore: KeyStore = loadKeyStore("truststore.p12", "password")
14 | @nowarn
15 | val trustedCerts: Array[Certificate] = {
16 | import scala.collection.JavaConverters._
17 | trustStore.aliases().asScala.toArray.map(trustStore.getCertificate(_))
18 | }
19 |
20 | def loadKeyStore(keystoreLocation: String, keystorePassword: String): KeyStore = {
21 | val keystore = KeyStore.getInstance("PKCS12")
22 | keystore.load(NetUtils.getClass.getClassLoader.getResourceAsStream(keystoreLocation), keystorePassword.toCharArray)
23 | keystore
24 | }
25 |
26 | def aRandomAddress(): InetSocketAddress = {
27 | val s = new ServerSocket(0)
28 | try {
29 | new InetSocketAddress("localhost", s.getLocalPort)
30 | } finally {
31 | s.close()
32 | }
33 | }
34 |
35 | def isListening(address: InetSocketAddress): Boolean = {
36 | try {
37 | new Socket(address.getHostName, address.getPort).close()
38 | true
39 | } catch {
40 | case _: Exception =>
41 | false
42 | }
43 | }
44 |
45 | def isListeningUDP(address: InetSocketAddress): Boolean = {
46 | try {
47 | new DatagramSocket(address).close()
48 | false
49 | } catch {
50 | case _: Exception =>
51 | true
52 | }
53 | }
54 |
55 | def toArray(b: ByteBuffer): Array[Byte] = {
56 | val a = new Array[Byte](b.remaining())
57 | b.get(a)
58 | a
59 | }
60 |
61 | def withAddressInUse(testCode: InetSocketAddress => Any): Unit = {
62 | val address = aRandomAddress()
63 | val socket = new ServerSocket(address.getPort, 0, InetAddress.getLoopbackAddress)
64 | try {
65 | testCode(address)
66 | ()
67 | } finally {
68 | socket.close()
69 | }
70 | }
71 |
72 | def withUDPAddressInUse(testCode: InetSocketAddress => Any): Unit = {
73 | val socket = new DatagramSocket()
74 | val address = socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress]
75 | try {
76 | testCode(address)
77 | ()
78 | } finally {
79 | socket.close()
80 | }
81 | }
82 |
83 | def randomBytes(n: Int): Array[Byte] = {
84 | val a = new Array[Byte](n)
85 | Random.nextBytes(a)
86 | a
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/PortForward.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet
2 |
3 | /**
4 | * This program is an example from the book "Internet
5 | * programming with Java" by Svetlin Nakov. It is freeware.
6 | * For more information: http://www.nakov.com/books/inetjava/
7 | */
8 | import java.io._
9 | import java.net._
10 |
11 | import PortForward._
12 |
13 | /**
14 | * TCPForwardServer is a simple TCP bridging software that
15 | * allows a TCP port on some host to be transparently forwarded
16 | * to some other TCP port on some other host. TCPForwardServer
17 | * continuously accepts client connections on the listening TCP
18 | * port (source port) and starts a thread (ClientThread) that
19 | * connects to the destination host and starts forwarding the
20 | * data between the client socket and destination socket.
21 | */
22 | class PortForward(sourcePort: Int, dest: InetSocketAddress) {
23 |
24 | private val serverSocket: ServerSocket = new ServerSocket(sourcePort)
25 |
26 | def start(): Unit = {
27 | while (true) {
28 | val clientSocket: Socket = serverSocket.accept()
29 | val clientThread: ClientThread = new ClientThread(clientSocket, dest.getHostName, dest.getPort)
30 | clientThread.start()
31 | }
32 | }
33 |
34 | def stop(): Unit = {
35 | println("stopping the forward")
36 | serverSocket.close()
37 | }
38 | }
39 |
40 | object PortForward {
41 |
42 | /**
43 | * ClientThread is responsible for starting forwarding between
44 | * the client and the server. It keeps track of the client and
45 | * servers sockets that are both closed on input/output error
46 | * durinf the forwarding. The forwarding is bidirectional and
47 | * is performed by two ForwardThread instances.
48 | */
49 | class ClientThread(clientSocket: Socket, destHost: String, destPort: Int) extends Thread {
50 |
51 | private var forwardingActive: Boolean = false
52 |
53 | /**
54 | * Establishes connection to the destination server and
55 | * starts bidirectional forwarding ot data between the
56 | * client and the server.
57 | */
58 | override def run(): Unit = {
59 |
60 | // Connect to the destination server
61 | val serverSocket = new Socket(destHost, destPort)
62 |
63 | try {
64 |
65 | // Turn on keep-alive for both the sockets
66 | serverSocket.setKeepAlive(true);
67 | clientSocket.setKeepAlive(true);
68 |
69 | // Obtain client & server input & output streams
70 | val clientIn = clientSocket.getInputStream
71 | val clientOut = clientSocket.getOutputStream
72 | val serverIn = serverSocket.getInputStream
73 | val serverOut = serverSocket.getOutputStream
74 |
75 | // Start forwarding data between server and client
76 | forwardingActive = true
77 | val clientForward: ForwardThread = new ForwardThread(this, clientIn, serverOut)
78 | clientForward.start()
79 |
80 | val serverForward: ForwardThread = new ForwardThread(this, serverIn, clientOut)
81 | serverForward.start()
82 |
83 | println(
84 | "TCP Forwarding " +
85 | clientSocket.getInetAddress.getHostAddress +
86 | ":" + clientSocket.getPort + " <--> " +
87 | serverSocket.getInetAddress.getHostAddress +
88 | ":" + serverSocket.getPort + " started."
89 | );
90 |
91 | } catch {
92 | case _: IOException =>
93 | println(
94 | "TCP Forwarding " +
95 | clientSocket.getInetAddress.getHostAddress
96 | + ":" + clientSocket.getPort + " <--> " +
97 | serverSocket.getInetAddress.getHostAddress
98 | + ":" + serverSocket.getPort + " stopped."
99 | )
100 | forwardingActive = false
101 | serverSocket.close()
102 | clientSocket.close()
103 | }
104 | }
105 | }
106 |
107 | /**
108 | * ForwardThread handles the TCP forwarding between a socket
109 | * input stream (source) and a socket output stream (dest).
110 | * It reads the input stream and forwards everything to the
111 | * output stream. If some of the streams fails, the forwarding
112 | * stops and the parent is notified to close all its sockets.
113 | */
114 | class ForwardThread(parent: ClientThread, inputStream: InputStream, outputStream: OutputStream) extends Thread {
115 |
116 | val BUFFER_SIZE: Int = 8192
117 |
118 | /**
119 | * Runs the thread. Continuously reads the input stream and
120 | * writes the read data to the output stream. If reading or
121 | * writing fail, exits the thread and notifies the parent
122 | * about the failure.
123 | */
124 | override def run(): Unit = {
125 | val buffer: Array[Byte] = new Array(BUFFER_SIZE)
126 | try {
127 | var bytesRead: Int = inputStream.read(buffer)
128 | while (bytesRead != -1) {
129 | outputStream.write(buffer, 0, bytesRead)
130 | outputStream.flush()
131 | bytesRead = inputStream.read(buffer)
132 | }
133 | } catch {
134 | case ioe: IOException => ???
135 | }
136 | }
137 | }
138 | }
139 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/TaskValues.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet
2 |
3 | import monix.eval.Task
4 | import monix.execution.Scheduler
5 | import org.scalatest.concurrent.ScalaFutures._
6 |
7 | object TaskValues {
8 |
9 | implicit class TaskOps[T](task: Task[T]) {
10 | def evaluated(implicit scheduler: Scheduler, patienceConfig: PatienceConfig): T = {
11 | task.runToFuture.futureValue
12 | }
13 | def evaluatedFailure(implicit scheduler: Scheduler, patienceConfig: PatienceConfig): Throwable = {
14 | task.runToFuture.failed.futureValue
15 | }
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/crypto/SignatureVerificationSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.crypto
2 |
3 | import java.security.SecureRandom
4 |
5 | import io.iohk.scalanet.peergroup.dynamictls.Secp256k1
6 | import org.scalatest.{FlatSpec, Matchers}
7 | import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks._
8 | import io.iohk.scalanet.testutils.GeneratorUtils
9 | import org.scalacheck.Gen
10 |
11 | class SignatureVerificationSpec extends FlatSpec with Matchers {
12 | val rnd = new SecureRandom()
13 | val minDataSize = 0
14 | val maxDataSize = 1024
15 |
16 | "Signature verification" should "success for all properly signed data" in {
17 | forAll(GeneratorUtils.randomSizeByteArrayGen(minDataSize, maxDataSize)) { data =>
18 | val key = CryptoUtils.genEcKeyPair(rnd, Secp256k1.curveName)
19 | val sig = CryptoUtils.signEcdsa(data, key.getPrivate, rnd)
20 | val verification = CryptoUtils.verifyEcdsa(data, sig, key.getPublic)
21 | assert(verification)
22 | }
23 | }
24 |
25 | it should "success for bouncy castle converted keys" in {
26 | forAll(GeneratorUtils.randomSizeByteArrayGen(minDataSize, maxDataSize)) { data =>
27 | val bcKey = CryptoUtils.generateKeyPair(rnd)
28 | val key = CryptoUtils.convertBcToJceKeyPair(bcKey)
29 | val sig = CryptoUtils.signEcdsa(data, key.getPrivate, rnd)
30 | val verification = CryptoUtils.verifyEcdsa(data, sig, key.getPublic)
31 | assert(verification)
32 | }
33 | }
34 |
35 | it should "fail when verifying with wrong public key" in {
36 | forAll(GeneratorUtils.randomSizeByteArrayGen(minDataSize, maxDataSize)) { data =>
37 | val key = CryptoUtils.genEcKeyPair(rnd, Secp256k1.curveName)
38 | val key1 = CryptoUtils.genEcKeyPair(rnd, Secp256k1.curveName)
39 | val sig = CryptoUtils.signEcdsa(data, key.getPrivate, rnd)
40 | val verification = CryptoUtils.verifyEcdsa(data, sig, key1.getPublic)
41 | assert(!verification)
42 | }
43 | }
44 |
45 | it should "fail when verifying with changed signature " in {
46 | forAll(GeneratorUtils.randomSizeByteArrayGen(minDataSize, maxDataSize), Gen.choose(10, 40)) { (data, idx) =>
47 | val key = CryptoUtils.genEcKeyPair(rnd, Secp256k1.curveName)
48 | val sig = CryptoUtils.signEcdsa(data, key.getPrivate, rnd)
49 | val newByte = (sig(idx) + 1).toByte
50 | sig(idx) = newByte
51 | val verification = CryptoUtils.verifyEcdsa(data, sig, key.getPublic)
52 | assert(!verification)
53 | }
54 | }
55 |
56 | it should "fail when verifying with changed data " in {
57 | forAll(GeneratorUtils.randomSizeByteArrayGen(50, maxDataSize), Gen.choose(10, 40)) { (data, idx) =>
58 | val key = CryptoUtils.genEcKeyPair(rnd, Secp256k1.curveName)
59 | val sig = CryptoUtils.signEcdsa(data, key.getPrivate, rnd)
60 | val newByte = (data(idx) + 1).toByte
61 | data(idx) = newByte
62 | val verification = CryptoUtils.verifyEcdsa(data, sig, key.getPublic)
63 | assert(!verification)
64 | }
65 | }
66 |
67 | }
68 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/dynamictls/SignedKeyExtensionSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.dynamictls
2 |
3 | import java.security.SecureRandom
4 | import io.iohk.scalanet.crypto.CryptoUtils
5 | import io.iohk.scalanet.crypto.CryptoUtils.{SHA256withECDSA, Secp256r1}
6 | import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSExtension.{
7 | ExtensionPublicKey,
8 | SignedKey,
9 | SignedKeyExtensionNodeData
10 | }
11 | import io.iohk.scalanet.peergroup.dynamictls.Secp256k1
12 | import io.iohk.scalanet.testutils.GeneratorUtils
13 | import org.scalatest.{FlatSpec, Matchers}
14 | import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks._
15 |
16 | class SignedKeyExtensionSpec extends FlatSpec with Matchers {
17 | val rnd = new SecureRandom()
18 |
19 | "SignedKeyExtension" should "create extension from correct data" in {
20 | forAll(GeneratorUtils.genKey(Secp256k1.curveName, rnd)) { keyPair =>
21 | val extension = ExtensionPublicKey(Secp256k1, keyPair.getPublic)
22 | assert(extension.isSuccess)
23 | }
24 | }
25 |
26 | it should "create extension from converted key" in {
27 | val keyPair = CryptoUtils.generateKeyPair(rnd)
28 | val converted = CryptoUtils.convertBcToJceKeyPair(keyPair)
29 | val extension = ExtensionPublicKey(Secp256k1, converted.getPublic)
30 | assert(extension.isSuccess)
31 | }
32 |
33 | it should "not create extension from wrong key" in {
34 | val keyPair = CryptoUtils.genEcKeyPair(rnd, Secp256r1.name)
35 | val extension = ExtensionPublicKey(Secp256k1, keyPair.getPublic)
36 | assert(extension.isFailure)
37 | }
38 |
39 | it should "encode and decode correct extension" in {
40 | forAll(GeneratorUtils.genKey(Secp256k1.curveName, rnd)) { keyPair =>
41 | val extension = ExtensionPublicKey(Secp256k1, keyPair.getPublic).get
42 | val enc = ExtensionPublicKey.extensionPublicKeyCodec.encode(extension).require
43 | val dec = ExtensionPublicKey.extensionPublicKeyCodec.decodeValue(enc).require
44 | assert(extension == dec)
45 | }
46 | }
47 |
48 | it should "successfully build extension node data" in {
49 | forAll(GeneratorUtils.genKey(Secp256k1.curveName, rnd)) { hostKey =>
50 | val nodeData = SignedKeyExtensionNodeData(Secp256k1, hostKey, Secp256r1, rnd, SHA256withECDSA)
51 | assert(nodeData.isSuccess)
52 | }
53 | }
54 |
55 | it should "successfully parse node data from extension" in {
56 | forAll(GeneratorUtils.genKey(Secp256k1.curveName, rnd)) { hostKey =>
57 | val nodeData = SignedKeyExtensionNodeData(Secp256k1, hostKey, Secp256r1, rnd, SHA256withECDSA).get
58 | val extensionBytes = nodeData.certWithExtension.getExtensionValue(SignedKey.extensionIdentifier)
59 | val recoveredSignedKey = SignedKey.parseAsn1EncodedValue(extensionBytes)
60 | assert(recoveredSignedKey.isSuccessful)
61 | assert(recoveredSignedKey.require.publicKey.getNodeId == nodeData.calculatedNodeId)
62 | }
63 | }
64 |
65 | it should "fail to parse random bytes without throwing exceptions" in {
66 | forAll(GeneratorUtils.randomSizeByteArrayGen(0, 1024)) { randomBytes =>
67 | val recoveredSignedKey = SignedKey.parseAsn1EncodedValue(randomBytes)
68 | assert(recoveredSignedKey.isFailure)
69 | }
70 | }
71 |
72 | }
73 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/CloseableQueueSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import cats.implicits._
4 | import monix.execution.{ChannelType, Scheduler}
5 | import monix.eval.Task
6 | import org.scalatest.{FlatSpec, Matchers}
7 |
8 | import scala.concurrent.duration._
9 | import org.scalatest.compatible.Assertion
10 |
11 | class CloseableQueueSpec extends FlatSpec with Matchers {
12 | import Scheduler.Implicits.global
13 | import CloseableQueue.Closed
14 |
15 | def testQueue(
16 | capacity: Int = 0
17 | )(f: CloseableQueue[String] => Task[Assertion]): Unit = {
18 | CloseableQueue[String](capacity, ChannelType.MPMC).flatMap(f).void.runSyncUnsafe(5.seconds)
19 | }
20 |
21 | behavior of "ClosableQueue"
22 |
23 | it should "publish a message" in testQueue() { queue =>
24 | val msg = "Hello!"
25 | for {
26 | _ <- queue.tryOffer(msg)
27 | maybeMessage <- queue.next
28 | } yield {
29 | maybeMessage shouldBe Some(msg)
30 | }
31 | }
32 |
33 | it should "return None if it's closed" in testQueue() { queue =>
34 | for {
35 | _ <- queue.close(discard = true)
36 | maybeOffered <- queue.tryOffer("Hung up?")
37 | maybeMessage <- queue.next
38 | } yield {
39 | maybeOffered shouldBe Left(Closed)
40 | maybeMessage shouldBe None
41 | }
42 | }
43 |
44 | it should "return all remaining messages if they aren't discarded during close" in testQueue() { queue =>
45 | for {
46 | _ <- queue.tryOffer("Foo")
47 | _ <- queue.tryOffer("Bar")
48 | _ <- queue.close(discard = false)
49 | maybeFoo <- queue.next
50 | maybeBar <- queue.next
51 | maybeNext <- queue.next
52 | } yield {
53 | maybeFoo should not be empty
54 | maybeBar should not be empty
55 | maybeNext shouldBe empty
56 | }
57 | }
58 |
59 | it should "return none of remaining messages if they are discarded during close" in testQueue() { queue =>
60 | for {
61 | _ <- queue.tryOffer("Foo")
62 | _ <- queue.tryOffer("Bar")
63 | _ <- queue.close(discard = true)
64 | maybeNext <- queue.next
65 | } yield {
66 | maybeNext shouldBe empty
67 | }
68 | }
69 |
70 | it should "not throw if close is called multiple times and use the first discard flag" in testQueue() { queue =>
71 | for {
72 | _ <- queue.offer("Spam")
73 | _ <- queue.close(discard = false)
74 | _ <- queue.close(discard = true) // Does nothing.
75 | m <- queue.next
76 | } yield {
77 | m shouldBe Some("Spam")
78 | }
79 | }
80 |
81 | behavior of "tryOffer"
82 |
83 | it should "discard the latest value if the capacity is reached" in testQueue(capacity = 2) { queue =>
84 | for {
85 | offered <- List.range(0, 5).traverse(i => queue.tryOffer(i.toString))
86 | maybe0 <- queue.next
87 | maybe1 <- queue.next
88 | maybe3 <- queue.next.start // No more message so this would block.
89 | _ <- queue.close(discard = false)
90 | maybe3 <- maybe3.join
91 | } yield {
92 | offered.head shouldBe Right(true)
93 | offered.last shouldBe Right(false)
94 | maybe0 should not be empty
95 | maybe1 should not be empty
96 | maybe3 shouldBe empty
97 | }
98 | }
99 |
100 | behavior of "offer"
101 |
102 | it should "wait until messages are drained if the capacity is reached" in testQueue(capacity = 2) { queue =>
103 | for {
104 | _ <- queue.offer("a")
105 | _ <- queue.offer("b")
106 | attempt1 <- queue.tryOffer("c")
107 | offering <- queue.offer("c").start
108 | _ <- queue.next
109 | attempt2 <- offering.join
110 | } yield {
111 | attempt1 shouldBe Right(false)
112 | attempt2 shouldBe Right(())
113 | }
114 | }
115 |
116 | it should "not wait if the queue is closed" in testQueue() { queue =>
117 | for {
118 | _ <- queue.close(discard = true)
119 | attempt <- queue.offer("Too late.")
120 | } yield {
121 | attempt shouldBe Left(Closed)
122 | }
123 | }
124 |
125 | it should "be interrupted if the queue is closed while offering" in testQueue(capacity = 2) { queue =>
126 | for {
127 | _ <- queue.offer("a")
128 | _ <- queue.offer("b")
129 | offering <- queue.offer("c").start
130 | _ <- queue.close(discard = true)
131 | offered <- offering.join
132 | } yield {
133 | offered shouldBe Left(Closed)
134 | }
135 | }
136 |
137 | behavior of "toIterant"
138 |
139 | it should "not do internal buffering" in testQueue() { queue =>
140 | import implicits.NextOps
141 | implicit val scheduler = Scheduler.fixedPool("test", 16)
142 | for {
143 | _ <- queue.offer("a")
144 | _ <- queue.offer("b")
145 | _ <- queue.offer("c")
146 | // This test is only here to demonstrate that this vertion with `.share` doesn't work:
147 | //o = queue.toObservable.share
148 | // But these ones do:
149 | //o = queue.toObservable
150 | o = queue.next.toIterant
151 | _ <- o.headOptionL
152 | b <- o.take(1).toListL.timeoutTo(10.millis, Task.now(Nil))
153 | _ <- queue.close(discard = false)
154 | c <- queue.next
155 | } yield {
156 | b shouldBe List("b")
157 | c shouldBe Some("c")
158 | }
159 | }
160 | }
161 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/ExternalAddressResolverSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import org.scalatest._
4 | import scala.concurrent.duration._
5 | import monix.execution.Scheduler.Implicits.global
6 |
7 | class ExternalAddressResolverSpec extends FlatSpec with Matchers {
8 |
9 | behavior of "ExternalAddressResolver"
10 |
11 | it should "resolve the external IP" in {
12 | val maybeAddress = ExternalAddressResolver.default.resolve.runSyncUnsafe(5.seconds)
13 |
14 | maybeAddress should not be empty
15 | maybeAddress.get.isLoopbackAddress shouldBe false
16 | }
17 |
18 | it should "return None if all resolutions fail" in {
19 | ExternalAddressResolver.checkUrls(List("", "404.html")).runSyncUnsafe(1.seconds) shouldBe empty
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/InetAddressOpsSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import java.net.InetAddress
4 |
5 | import org.scalatest._
6 |
7 | class InetAddressOpsSpec extends FlatSpec with Matchers with Inspectors {
8 | import InetAddressOps._
9 |
10 | case class TestCase(ip: String, isUnspecified: Boolean = false, isSpecial: Boolean = false, isLAN: Boolean = false)
11 |
12 | val cases = List(
13 | TestCase("0.0.0.0", isUnspecified = true, isLAN = true),
14 | TestCase("0.0.0.1", isLAN = true),
15 | TestCase("0:0:0:0:0:0:0:0", isUnspecified = true),
16 | TestCase("127.0.0.1", isLAN = true),
17 | TestCase("::1", isLAN = true),
18 | TestCase("192.168.1.2", isLAN = true),
19 | TestCase("192.175.47"),
20 | TestCase("192.175.48.0", isSpecial = true),
21 | TestCase("192.175.48.127", isSpecial = true),
22 | TestCase("192.175.48.255", isSpecial = true),
23 | TestCase("192.175.49"),
24 | TestCase("255.255.255.255", isSpecial = true),
25 | TestCase("2001:4:112::", isSpecial = true),
26 | TestCase("140.82.121.4")
27 | )
28 |
29 | behavior of "InetAddressOps"
30 |
31 | it should "correctly calculate each flag" in {
32 | forAll(cases) {
33 | case TestCase(ip, isUnspecified, isSpecial, isLAN) =>
34 | withClue(ip) {
35 | val addr = InetAddress.getByName(ip)
36 | withClue("isUnspecified") {
37 | addr.isUnspecified shouldBe isUnspecified
38 | }
39 | withClue("isSpecial") {
40 | addr.isSpecial shouldBe isSpecial
41 | }
42 | withClue("isLAN") {
43 | addr.isLAN shouldBe isLAN
44 | }
45 | }
46 | }
47 | }
48 |
49 | behavior of "truncate"
50 |
51 | it should "truncate the first N bits" in {
52 | val ip = InetAddress.getByName("192.175.48.127")
53 | ip.truncate(24) shouldBe InetAddress.getByName("192.175.48.0")
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/PeerUtils.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import monix.eval.Task
4 | import monix.execution.Scheduler
5 |
6 | import scala.language.higherKinds
7 |
8 | object PeerUtils {
9 |
10 | def apply[A, M, PG[_, _]](implicit tu: PeerUtils[A, M, PG]): PeerUtils[A, M, PG] = tu
11 |
12 | def instance[A, M, PG[_, _]](
13 | isLis: PG[A, M] => Boolean,
14 | generateRandomPG: () => PG[A, M],
15 | shutd: PG[A, M] => Task[Unit],
16 | init: PG[A, M] => Task[Unit]
17 | ): PeerUtils[A, M, PG] = new PeerUtils[A, M, PG] {
18 | override def isListening(peer: PG[A, M]): Boolean = isLis(peer)
19 | override def generateRandomPeerGroup(): PG[A, M] = generateRandomPG()
20 | override def shutdown(peer: PG[A, M]): Task[Unit] = shutd(peer)
21 | override def initialize(peer: PG[A, M]): Task[Unit] = init(peer)
22 | }
23 | }
24 |
25 | trait PeerUtils[A, M, PG[_, _]] {
26 | def isListening(peer: PG[A, M]): Boolean
27 | def generateRandomPeerGroup(): PG[A, M]
28 | def shutdown(peer: PG[A, M]): Task[Unit]
29 | def initialize(peer: PG[A, M]): Task[Unit]
30 |
31 | def withTwoRandomPeerGroups(
32 | testFunction: (PG[A, M], PG[A, M]) => Any
33 | )(implicit sc: Scheduler): Unit = {
34 | val alice = generateRandomPeerGroup()
35 | val bob = generateRandomPeerGroup()
36 | initialize(alice).runToFuture
37 | initialize(bob).runToFuture
38 | try {
39 | testFunction(alice, bob)
40 | ()
41 | } finally {
42 | shutdown(alice).runToFuture
43 | shutdown(bob).runToFuture
44 | ()
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/StandardTestPack.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import io.iohk.scalanet.peergroup.implicits._
4 | import io.iohk.scalanet.peergroup.Channel.MessageReceived
5 | import io.iohk.scalanet.peergroup.PeerGroup.ChannelSetupException
6 | import io.iohk.scalanet.peergroup.PeerGroup.ServerEvent._
7 | import monix.execution.Scheduler
8 | import monix.eval.Task
9 | import org.scalatest.Matchers._
10 | import org.scalatest.RecoverMethods.{recoverToExceptionIf}
11 | import org.scalatest.concurrent.ScalaFutures._
12 | import scala.util.Random
13 |
14 | object StandardTestPack {
15 |
16 | // Test that Alice can send a message to Bob and receive an answer.
17 | def messagingTest[A](alice: PeerGroup[A, String], bob: PeerGroup[A, String])(
18 | implicit scheduler: Scheduler
19 | ): Task[Unit] = {
20 | val alicesMessage = "Hi Bob!"
21 | val bobsMessage = "Hi Alice!"
22 |
23 | (for {
24 | bobReceiver <- bob.serverEventObservable.collectChannelCreated
25 | .mergeMap {
26 | case (channel, release) =>
27 | channel.channelEventObservable
28 | .collect {
29 | case MessageReceived(m) => m
30 | }
31 | .take(1)
32 | .mapEval { msg =>
33 | channel.sendMessage(bobsMessage).as(msg)
34 | }
35 | .guarantee(release)
36 | }
37 | .headL
38 | .start
39 |
40 | aliceClient <- alice.client(bob.processAddress).allocated
41 |
42 | aliceReceiver <- aliceClient._1.channelEventObservable
43 | .collect {
44 | case MessageReceived(m) => m
45 | }
46 | .headL
47 | .start
48 |
49 | _ <- aliceClient._1.sendMessage(alicesMessage)
50 |
51 | bobReceived <- bobReceiver.join
52 | aliceReceived <- aliceReceiver.join
53 | _ <- aliceClient._2
54 | } yield {
55 | bobReceived shouldBe alicesMessage
56 | aliceReceived shouldBe bobsMessage
57 | }).void
58 | }
59 |
60 | // Same as messagingTest but without using the ConnectableObservables.
61 | def messagingTestNext[A](alice: PeerGroup[A, String], bob: PeerGroup[A, String])(
62 | implicit scheduler: Scheduler
63 | ): Task[Unit] = {
64 | val alicesMessage = "Hi Bob!"
65 | val bobsMessage = "Hi Alice!"
66 |
67 | def sendAndReceive(msgOut: String, channel: Channel[A, String]): Task[String] = {
68 | channel.sendMessage(msgOut) >>
69 | // In this test we know there shouldn't be any other message arriving; in practice we'd use an Iterant.
70 | channel.nextChannelEvent
71 | .flatMap {
72 | case Some(MessageReceived(msgIn)) => Task.pure(msgIn)
73 | case other => Task.raiseError(new RuntimeException(s"Unexpected channel event: $other"))
74 | }
75 | }
76 |
77 | (for {
78 | // In this test we know there won't be any other incoming connection; in practice we'd spawn a Fiber.
79 | bobReceiver <- bob.nextServerEvent.flatMap {
80 | case Some(ChannelCreated(channel, release)) =>
81 | sendAndReceive(bobsMessage, channel).guarantee(release)
82 | case other =>
83 | Task.raiseError(new RuntimeException(s"Unexpected server event: $other"))
84 | }.start
85 |
86 | aliceReceived <- alice.client(bob.processAddress).use { channel =>
87 | sendAndReceive(alicesMessage, channel)
88 | }
89 |
90 | bobReceived <- bobReceiver.join
91 | } yield {
92 | bobReceived shouldBe alicesMessage
93 | aliceReceived shouldBe bobsMessage
94 | }).void
95 | }
96 |
97 | // Test that Alice can send messages to Bob concurrently and receive answers on both channels.
98 | def serverMultiplexingTest[A](alice: PeerGroup[A, String], bob: PeerGroup[A, String])(
99 | implicit scheduler: Scheduler
100 | ): Task[Unit] = {
101 | val alicesMessage = Random.alphanumeric.take(1024).mkString
102 | val bobsMessage = Random.alphanumeric.take(1024).mkString
103 |
104 | (for {
105 | _ <- bob.serverEventObservable.collectChannelCreated.foreachL {
106 | case (channel, release) => channel.sendMessage(bobsMessage).guarantee(release).runAsyncAndForget
107 | }.startAndForget
108 |
109 | aliceClient1 <- alice.client(bob.processAddress).allocated
110 | aliceClient2 <- alice.client(bob.processAddress).allocated
111 |
112 | aliceReceiver1 <- aliceClient1._1.channelEventObservable.collect { case MessageReceived(m) => m }.headL.start
113 | aliceReceiver2 <- aliceClient2._1.channelEventObservable.collect { case MessageReceived(m) => m }.headL.start
114 | _ <- aliceClient1._1.sendMessage(alicesMessage)
115 | _ <- aliceClient2._1.sendMessage(alicesMessage)
116 |
117 | aliceReceived1 <- aliceReceiver1.join
118 | aliceReceived2 <- aliceReceiver2.join
119 | _ <- aliceClient1._2
120 | _ <- aliceClient2._2
121 | } yield {
122 | aliceReceived1 shouldBe bobsMessage
123 | aliceReceived2 shouldBe bobsMessage
124 | }).void
125 | }
126 |
127 | def shouldErrorForMessagingAnInvalidAddress[A](alice: PeerGroup[A, String], invalidAddress: A)(
128 | implicit scheduler: Scheduler
129 | ): Unit = {
130 |
131 | val aliceError = recoverToExceptionIf[ChannelSetupException[InetMultiAddress]] {
132 | alice.client(invalidAddress).use(_ => Task.unit).runToFuture
133 | }
134 |
135 | aliceError.futureValue.to shouldBe invalidAddress
136 | ()
137 | }
138 | }
139 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/TestMessage.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import scodec.codecs.{Discriminated, Discriminator, uint4}
4 |
5 | // A message we can use in tests with a format that's definitely not ambiguous with the encoding of a String.
6 | sealed trait TestMessage[A]
7 | object TestMessage {
8 | case class Foo[A](value: A) extends TestMessage[A]
9 | case class Bar[A](value: A) extends TestMessage[A]
10 |
11 | implicit def testMessageDiscriminator[A]: Discriminated[TestMessage[A], Int] =
12 | Discriminated[TestMessage[A], Int](uint4)
13 |
14 | implicit def fooDiscriminator[A]: Discriminator[TestMessage[A], Foo[A], Int] =
15 | Discriminator[TestMessage[A], Foo[A], Int](0)
16 |
17 | implicit def barDiscriminator[A]: Discriminator[TestMessage[A], Bar[A], Int] =
18 | Discriminator[TestMessage[A], Bar[A], Int](1)
19 | }
20 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/ThrottlingIpFilterSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import io.iohk.scalanet.peergroup.dynamictls.CustomHandlers.ThrottlingIpFilter
4 | import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSPeerGroup.IncomingConnectionThrottlingConfig
5 | import io.netty.channel.ChannelHandlerContext
6 | import org.scalatest.FlatSpec
7 | import org.scalatest.concurrent.Eventually
8 | import org.scalatest.time.{Millis, Span}
9 | import org.scalatestplus.mockito.MockitoSugar.mock
10 |
11 | import java.net.InetSocketAddress
12 | import scala.concurrent.duration._
13 |
14 | class ThrottlingIpFilterSpec extends FlatSpec with Eventually {
15 | override implicit val patienceConfig = PatienceConfig(timeout = Span(600, Millis))
16 |
17 | "ThrottlingIpFilter" should "do not accept connection from same ip one after another" in new TestSetup {
18 | assert(filter.accept(mockContext, randomIp1))
19 | assert(!filter.accept(mockContext, randomIp1))
20 | }
21 |
22 | it should "allow connection from different ips" in new TestSetup {
23 | assert(filter.accept(mockContext, randomIp1))
24 | assert(filter.accept(mockContext, randomIp2))
25 | }
26 |
27 | it should "eventually allow connections from same ip" in new TestSetup {
28 | assert(filter.accept(mockContext, randomIp1))
29 | assert(!filter.accept(mockContext, randomIp1))
30 | eventually {
31 | assert(filter.accept(mockContext, randomIp1))
32 | }
33 | }
34 |
35 | it should "allow repeated connections from localhost when configured" in new TestSetup {
36 | assert(filter.accept(mockContext, localHostAddress))
37 | assert(filter.accept(mockContext, localHostAddress))
38 | }
39 |
40 | it should "disallow repeated connections from localhost when configured" in new TestSetup {
41 | val throttleLocal = defaultConfig.copy(throttleLocalhost = true)
42 | val filterWithLocalThrottling = new ThrottlingIpFilter(throttleLocal)
43 | assert(filterWithLocalThrottling.accept(mockContext, localHostAddress))
44 | assert(!filterWithLocalThrottling.accept(mockContext, localHostAddress))
45 | }
46 |
47 | it should "eventually allow repeated connections from localhost when throttling is configured" in new TestSetup {
48 | val throttleLocal = defaultConfig.copy(throttleLocalhost = true)
49 | val filterWithLocalThrottling = new ThrottlingIpFilter(throttleLocal)
50 | assert(filterWithLocalThrottling.accept(mockContext, localHostAddress))
51 | assert(!filterWithLocalThrottling.accept(mockContext, localHostAddress))
52 | eventually {
53 | assert(filterWithLocalThrottling.accept(mockContext, localHostAddress))
54 | }
55 | }
56 |
57 | trait TestSetup {
58 | val defaultConfig = IncomingConnectionThrottlingConfig(throttleLocalhost = false, throttlingDuration = 500.millis)
59 |
60 | val mockContext = mock[ChannelHandlerContext]
61 |
62 | val filter = new ThrottlingIpFilter(defaultConfig)
63 |
64 | val randomIp1 = new InetSocketAddress("90.34.1.20", 90)
65 | val randomIp2 = new InetSocketAddress("90.34.1.21", 90)
66 | val localHostAddress = new InetSocketAddress("127.0.0.1", 90)
67 | }
68 |
69 | }
70 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/TransportPeerGroupAsyncSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup
2 |
3 | import cats.effect.Resource
4 | import cats.implicits._
5 | import io.iohk.scalanet.NetUtils._
6 | import io.iohk.scalanet.peergroup.ReqResponseProtocol._
7 | import io.iohk.scalanet.peergroup.TransportPeerGroupAsyncSpec.{DynamicTLS, DynamicUDP}
8 | import io.iohk.scalanet.peergroup.dynamictls.DynamicTLSPeerGroup.{FramingConfig, PeerInfo}
9 |
10 | import java.util.concurrent.{Executors, TimeUnit}
11 | import monix.eval.Task
12 | import monix.execution.Scheduler
13 | import org.scalatest.{Assertion, AsyncFlatSpec, BeforeAndAfterAll}
14 | import org.scalatest.Matchers._
15 | import org.scalatest.prop.TableDrivenPropertyChecks._
16 | import scodec.Codec
17 |
18 | import scala.concurrent.{ExecutionContext, Future}
19 | import scodec.codecs.implicits._
20 |
21 | import java.net.InetSocketAddress
22 |
23 | /**
24 | *
25 | * Spec to test different concurrency scenario on top of our low level peer groups.
26 | * FIXME Add more scenarios.
27 | *
28 | */
29 | class TransportPeerGroupAsyncSpec extends AsyncFlatSpec with BeforeAndAfterAll {
30 | val threadPool = Executors.newFixedThreadPool(16)
31 | val testContext = ExecutionContext.fromExecutor(threadPool)
32 | implicit val scheduler = Scheduler(testContext)
33 |
34 | override def afterAll(): Unit = {
35 | threadPool.shutdown()
36 | threadPool.awaitTermination(60, TimeUnit.SECONDS)
37 | ()
38 | }
39 |
40 | private val rpcs = Table(
41 | ("Label", "Transport type"),
42 | ("UDP", DynamicUDP),
43 | ("DTLS", DynamicTLS)
44 | )
45 | forAll(rpcs) { (label, transportType) =>
46 | import TransportPeerGroupAsyncSpec._
47 |
48 | s"Request response on top of ${label}" should "exchange messages between clients sequentially" in taskTestCase {
49 | List.fill(2)(transportType.getProtocol[String](aRandomAddress())).sequence.use {
50 | case List(client1, client2) =>
51 | for {
52 | _ <- Task.parZip2(
53 | client1.startHandling(echoDoubleHandler).startAndForget,
54 | client2.startHandling(echoDoubleHandler).startAndForget
55 | )
56 | resp <- client1.send(msg1, client2.processAddress)
57 | resp2 <- client2.send(msg3, client1.processAddress)
58 | resp1 <- client1.send(msg2, client2.processAddress)
59 | resp3 <- client2.send(msg3 ++ msg1, client1.processAddress)
60 | } yield {
61 | resp shouldEqual msg1 ++ msg1
62 | resp1 shouldEqual msg2 ++ msg2
63 | resp2 shouldEqual msg3 ++ msg3
64 | resp3 shouldEqual (msg3 ++ msg1) ++ (msg3 ++ msg1)
65 | }
66 | case _ => fail()
67 | }
68 | }
69 |
70 | s"Request response on top of ${label}" should "exchange messages between clients concurrently" in taskTestCase {
71 | List.fill(3)(transportType.getProtocol[Int](aRandomAddress())).sequence.use {
72 | case List(client1, client2, client3) =>
73 | for {
74 | _ <- Task.parZip3(
75 | client1.startHandling(doublingHandler).startAndForget,
76 | client2.startHandling(doublingHandler).startAndForget,
77 | client3.startHandling(doublingHandler).startAndForget
78 | )
79 | responses <- Task.parZip3(
80 | client1.send(i, client2.processAddress),
81 | client2.send(j, client3.processAddress),
82 | client3.send(k, client1.processAddress)
83 | )
84 | (r1, r2, r3) = responses
85 | responses <- Task.parSequence((1 to 4).map { req =>
86 | if (req % 2 == 0) {
87 | client1.send(req, client3.processAddress)
88 | } else {
89 | client2.send(req, client3.processAddress)
90 | }
91 | })
92 | } yield {
93 | r1 shouldEqual 2 * i
94 | r2 shouldEqual 2 * j
95 | r3 shouldEqual 2 * k
96 | responses shouldEqual (1 to 4).map(2 * _)
97 | }
98 | case _ => fail()
99 | }
100 | }
101 |
102 | s"Request response on top of ${label}" should "exchange messages between clients concurrently for multiple messages" in taskTestCase {
103 | val client1Numbers = (1 to 20).toList
104 | val client2Numbers = (10 to 30).toList
105 | val client3Numbers = (20 to 40).toList
106 | List.fill(3)(transportType.getProtocol[Int](aRandomAddress())).sequence.use {
107 | case List(client1, client2, client3) =>
108 | for {
109 | - <- Task.parZip3(
110 | client1.startHandling(doublingHandler).startAndForget,
111 | client2.startHandling(doublingHandler).startAndForget,
112 | client3.startHandling(doublingHandler).startAndForget
113 | )
114 | responses <- Task.parZip3(
115 | Task.sequence(client1Numbers.map(num => client1.send(num, client3.processAddress))),
116 | Task.sequence(client2Numbers.map(num => client2.send(num, client3.processAddress))),
117 | Task.sequence(client3Numbers.map(num => client3.send(num, client1.processAddress)))
118 | )
119 | (resp1, resp2, resp3) = responses
120 |
121 | } yield {
122 | resp1 shouldEqual client1Numbers.map(2 * _)
123 | resp2 shouldEqual client2Numbers.map(2 * _)
124 | resp3 shouldEqual client3Numbers.map(2 * _)
125 | }
126 | case _ => fail()
127 | }
128 | }
129 | }
130 | }
131 |
132 | object TransportPeerGroupAsyncSpec {
133 | def taskTestCase(t: => Task[Assertion])(implicit s: Scheduler): Future[Assertion] =
134 | t.runToFuture
135 |
136 | val echoDoubleHandler: String => String = s => s ++ s
137 | val msg1 = "Hello"
138 | val msg2 = "Server"
139 | val msg3 = "This is the way"
140 |
141 | val doublingHandler: Int => Int = i => i * 2
142 | val i = 1
143 | val j = 2
144 | val k = 3
145 |
146 | val testFramingConfig = FramingConfig.buildStandardFrameConfig(192000, 4).getOrElse(fail())
147 |
148 | sealed abstract class TransportProtocol extends Product with Serializable {
149 | type AddressingType
150 | def getProtocol[M](
151 | address: InetSocketAddress
152 | )(implicit s: Scheduler, c: Codec[M]): Resource[Task, ReqResponseProtocol[AddressingType, M]]
153 | }
154 | case object DynamicUDP extends TransportProtocol {
155 | override type AddressingType = InetMultiAddress
156 |
157 | override def getProtocol[M](
158 | address: InetSocketAddress
159 | )(implicit s: Scheduler, c: Codec[M]): Resource[Task, ReqResponseProtocol[InetMultiAddress, M]] = {
160 | getDynamicUdpReqResponseProtocolClient(address)
161 | }
162 | }
163 |
164 | case object DynamicTLS extends TransportProtocol {
165 | override type AddressingType = PeerInfo
166 |
167 | override def getProtocol[M](
168 | address: InetSocketAddress
169 | )(implicit s: Scheduler, c: Codec[M]): Resource[Task, ReqResponseProtocol[PeerInfo, M]] = {
170 | getTlsReqResponseProtocolClient(testFramingConfig)(address)
171 | }
172 | }
173 |
174 | }
175 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/udp/DynamicUDPPeerGroupSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup.udp
2 |
3 | import cats.effect.Resource
4 | import java.net.InetSocketAddress
5 | import monix.eval.Task
6 | import monix.execution.Scheduler
7 | import scodec.Codec
8 | import io.iohk.scalanet.peergroup.PeerGroup
9 | import io.iohk.scalanet.peergroup.InetMultiAddress
10 |
11 | class DynamicUDPPeerGroupSpec extends UDPPeerGroupSpec("DynamicUDPPPeerGroup") {
12 | override def initUdpPeerGroup[M](
13 | address: InetSocketAddress
14 | )(implicit scheduler: Scheduler, codec: Codec[M]): Resource[Task, UDPPeerGroupSpec.TestGroup[M]] = {
15 | DynamicUDPPeerGroup[M](DynamicUDPPeerGroup.Config(address)).map { pg =>
16 | new PeerGroup[InetMultiAddress, M] {
17 | override def processAddress = pg.processAddress
18 | override def nextServerEvent = pg.nextServerEvent
19 | override def client(to: InetMultiAddress) = pg.client(to)
20 | def channelCount: Int = pg.activeChannels.size
21 | }
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/peergroup/udp/StaticUDPPeerGroupSpec.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.peergroup.udp
2 |
3 | import cats.effect.Resource
4 | import cats.implicits._
5 | import java.net.InetSocketAddress
6 | import java.util.concurrent.CountDownLatch
7 | import monix.execution.Scheduler
8 | import monix.eval.Task
9 | import io.iohk.scalanet.peergroup.PeerGroup
10 | import io.iohk.scalanet.peergroup.InetMultiAddress
11 | import io.iohk.scalanet.peergroup.Channel.MessageReceived
12 | import org.scalatest.Matchers
13 | import scodec.Codec
14 | import scodec.codecs.implicits._
15 | import scala.concurrent.duration._
16 | import java.util.concurrent.TimeUnit
17 | import io.iohk.scalanet.peergroup.Channel
18 |
19 | class StaticUDPPeerGroupSpec extends UDPPeerGroupSpec("StaticUDPPeerGroup") with Matchers {
20 | import UDPPeerGroupSpec.runEchoServer
21 | import io.iohk.scalanet.peergroup.implicits._
22 |
23 | val timeout = 15.seconds
24 |
25 | override def initUdpPeerGroup[M](
26 | address: InetSocketAddress
27 | )(implicit scheduler: Scheduler, codec: Codec[M]): Resource[Task, UDPPeerGroupSpec.TestGroup[M]] = {
28 | StaticUDPPeerGroup[M](StaticUDPPeerGroup.Config(address)).map { pg =>
29 | new PeerGroup[InetMultiAddress, M] {
30 | override def processAddress = pg.processAddress
31 | override def nextServerEvent = pg.nextServerEvent
32 | override def client(to: InetMultiAddress) = pg.client(to)
33 | def channelCount: Int = pg.channelCount.runSyncUnsafe()
34 | }
35 | }
36 | }
37 |
38 | def startCollectingMessages(timespan: FiniteDuration)(channel: Channel[InetMultiAddress, String]) =
39 | channel.channelEventObservable
40 | .collect {
41 | case MessageReceived(msg) => msg
42 | }
43 | .takeByTimespan(timespan)
44 | .toListL
45 | .start
46 |
47 | it should "use the server port when it opens client channels" in {
48 | (for {
49 | pg1 <- initUdpPeerGroup[String]()
50 | pg2 <- initUdpPeerGroup[String]()
51 | client12 <- pg1.client(pg2.processAddress)
52 | } yield (pg1, pg2, client12))
53 | .use {
54 | case (pg1, pg2, client12) =>
55 | for {
56 | _ <- client12.sendMessage("Hola!")
57 | event <- pg2.serverEventObservable.collectChannelCreated.headL
58 | } yield {
59 | event._1.to shouldBe pg1.processAddress
60 | }
61 | }
62 | .runSyncUnsafe(timeout)
63 | }
64 |
65 | it should "re-emit a server event if a closed channel is re-activated" in {
66 | initUdpPeerGroup[String]().allocated
67 | .flatMap {
68 | case (pg2, pg2Release) =>
69 | (for {
70 | pg1 <- initUdpPeerGroup[String]()
71 | client12 <- pg1.client(pg2.processAddress)
72 | } yield (pg1, client12))
73 | .use {
74 | case (pg1, client12) =>
75 | val messageCounter = new CountDownLatch(3)
76 | for {
77 | // Close incoming channel after two messages and collect which ports they came from.
78 | // Further messages should result in another incoming channel being created.
79 | listener <- pg2.serverEventObservable.collectChannelCreated
80 | .mapEval {
81 | case (channel, release) =>
82 | // Have to do it in the background otherwise it would block the processing of incoming UDP packets.
83 | List
84 | .fill(2)(channel.nextChannelEvent >> Task(messageCounter.countDown()))
85 | .sequence
86 | .guarantee(release)
87 | .startAndForget
88 | .as(channel.to)
89 | }
90 | .toListL
91 | .start
92 |
93 | // Send 3 messages, which should result in two incoming channels emitted.
94 | // Space them out a bit so the third messages doesn't end up in the discarded queue.
95 | _ <- List.range(0, 3).traverse(i => client12.sendMessage(i.toString).delayResult(50.millis))
96 |
97 | // Give the server time to process all messages
98 | _ <- Task(messageCounter.await(timeout.toMillis, TimeUnit.MILLISECONDS))
99 | // Release so the server topic is closed and we get the results.
100 | _ <- pg2Release
101 | ports <- listener.join
102 |
103 | } yield {
104 | ports shouldBe List(pg1.processAddress, pg1.processAddress)
105 | }
106 | }
107 | }
108 | .runSyncUnsafe(timeout)
109 | }
110 |
111 | it should "replicate incoming messages to all client channels connected to the remote address" in {
112 | (for {
113 | pg1 <- initUdpPeerGroup[String]()
114 | pg2 <- initUdpPeerGroup[String]()
115 | pg3 <- initUdpPeerGroup[String]()
116 | client21 <- pg2.client(pg1.processAddress)
117 | client31a <- pg3.client(pg1.processAddress)
118 | client31b <- pg3.client(pg1.processAddress)
119 | } yield (pg1, client21, client31a, client31b))
120 | .use {
121 | case (pg1, client21, client31a, client31b) =>
122 | for {
123 | // Not releasing the incoming channel immediately because then it may or may not
124 | // be there when the next message arrives on another channel. In this test we
125 | // want to show that if there are two client channels they both see the messages.
126 | _ <- runEchoServer(pg1, doRelease = false).startAndForget
127 |
128 | _ <- client21.sendMessage("Two to One")
129 | _ <- client31a.sendMessage("Three to One A")
130 | _ <- client31b.sendMessage("Three to One B")
131 |
132 | receivers <- List(client21, client31a, client31b).traverse(startCollectingMessages(1.second))
133 | received <- receivers.traverse(_.join)
134 | } yield {
135 | received(0) shouldBe List("Two to One")
136 | received(1) should contain theSameElementsAs List("Three to One A", "Three to One B")
137 | received(2) shouldBe received(1)
138 | }
139 | }
140 | .runSyncUnsafe(timeout)
141 | }
142 |
143 | it should "replicate incoming messages to clients as well as the server channel" in {
144 | (for {
145 | pg1 <- initUdpPeerGroup[String]()
146 | pg2 <- initUdpPeerGroup[String]()
147 | client21 <- pg2.client(pg1.processAddress)
148 | } yield (pg1, pg2, client21))
149 | .use {
150 | case (pg1, pg2, client21) =>
151 | for {
152 | _ <- runEchoServer(pg1).startAndForget
153 |
154 | message = "Requests and responses look the same."
155 | _ <- client21.sendMessage(message)
156 |
157 | server21 <- pg2.serverEventObservable.collectChannelCreated.map {
158 | case (channel, _) => channel
159 | }.headL
160 |
161 | receivers <- List(client21, server21).traverse(startCollectingMessages(1.second))
162 | received <- receivers.traverse(_.join)
163 | } yield {
164 | received(0) shouldBe List(message)
165 | received(1) shouldBe received(0)
166 | }
167 | }
168 | .runSyncUnsafe(timeout)
169 | }
170 | }
171 |
--------------------------------------------------------------------------------
/scalanet/ut/src/io/iohk/scalanet/testutils/GeneratorUtils.scala:
--------------------------------------------------------------------------------
1 | package io.iohk.scalanet.testutils
2 |
3 | import java.security.{KeyPair, SecureRandom}
4 |
5 | import io.iohk.scalanet.crypto.CryptoUtils
6 | import org.scalacheck.{Arbitrary, Gen}
7 |
8 | object GeneratorUtils {
9 | def genKey(curveName: String, rnd: SecureRandom): Gen[KeyPair] = {
10 | Gen.resultOf { s: String =>
11 | CryptoUtils.genEcKeyPair(rnd, curveName)
12 | }
13 | }
14 |
15 | def randomSizeByteArrayGen(minSize: Int, maxSize: Int): Gen[Array[Byte]] =
16 | Gen.choose(minSize, maxSize).flatMap(byteArrayOfNItemsGen)
17 |
18 | def byteArrayOfNItemsGen(n: Int): Gen[Array[Byte]] = Gen.listOfN(n, Arbitrary.arbitrary[Byte]).map(_.toArray)
19 |
20 | }
21 |
--------------------------------------------------------------------------------
/versionFile/version:
--------------------------------------------------------------------------------
1 | 0.9.0-SNAPSHOT
--------------------------------------------------------------------------------