├── .github └── ISSUE_TEMPLATE │ ├── config.yml │ └── open_an_issue.md ├── .gitignore ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── RFC ├── .gitignore ├── README.md ├── rfcBBL009 │ └── README.md ├── rfcBBL102 │ ├── README.md │ ├── baseline.toml │ ├── images │ │ └── ttl_slow.gif │ ├── rfcBBL102+rfcBBL104.toml │ └── rfcBBL102.toml ├── rfcBBL104 │ ├── README.md │ ├── baseline.toml │ ├── images │ │ ├── rfcbbL104-result-baseline.png │ │ ├── rfcbbL104-results-rfc.png │ │ └── rfcbbL104.png │ └── rfcBBL104.toml ├── rfcBBL1201 │ └── README.md ├── rfcBBL1205 │ └── README.md ├── rfcBBL203A │ ├── README.md │ ├── baseline.toml │ ├── notes.md │ └── rfcBBL203A.toml ├── rfcBBL203B │ └── README.md ├── rfcBBL207 │ └── README.md ├── rfcBBL208 │ └── README.md ├── run_experiment.sh └── template.md └── testbed ├── README.md ├── compositions ├── bitswap-composition.toml ├── graphsync-composition.toml ├── gs-directory-tnsfr.toml ├── ipfs-composition.toml ├── libp2pHTTP-composition.toml ├── rawLibp2p-composition.toml ├── run_composition_plot.sh └── tcp-composition.toml ├── probe ├── README.md ├── go.mod ├── go.sum ├── graphsync.go ├── ipfs.go └── probe.go ├── test-datasets ├── README.md └── xkcd.png ├── testbed ├── .gitignore ├── README.md ├── go.mod ├── go.sum ├── main.go ├── manifest.toml ├── scripts │ ├── config.yaml │ ├── dashboard.ipynb │ ├── exec.sh │ ├── pdf.py │ ├── pdf_composition.py │ ├── process.py │ ├── random-file.sh │ ├── requirements.txt │ ├── runner.py │ ├── single_run.sh │ ├── ui.py │ └── utils.py ├── test │ ├── common.go │ ├── tcpTransfer.go │ └── transfer.go └── utils │ ├── bitswap.go │ ├── dagadder.go │ ├── dialer │ └── dialer.go │ ├── exchange.go │ ├── files.go │ ├── graphsync.go │ ├── http.go │ ├── ipfs.go │ ├── libp2pHTTP.go │ ├── net.go │ ├── node.go │ ├── params.go │ ├── rawLibp2p.go │ ├── tcp.go │ └── walker.go └── viewer ├── README.md ├── image.png └── server.js /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Getting Help on IPFS 4 | url: https://ipfs.io/help 5 | about: All information about how and where to get help on IPFS. 6 | - name: IPFS Official Forum 7 | url: https://discuss.ipfs.io 8 | about: Please post general questions, support requests, and discussions here. 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/open_an_issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Open an issue 3 | about: Only for actionable issues relevant to this repository. 4 | title: '' 5 | labels: need/triage 6 | assignees: '' 7 | 8 | --- 9 | 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | experiments/ 2 | my-datasets/ 3 | datasets/ 4 | results/ 5 | .idea 6 | testbed/.DS_Store 7 | testbed/compositions/.DS_Store 8 | testbed/test-datasets/* 9 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 2 | 3 | http://www.apache.org/licenses/LICENSE-2.0 4 | 5 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 6 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Project: Beyond Bitswap 2 | 3 |
4 |
5 |
6 |
7 |
: \
2 | * Status: `Draft`
3 | * Implementation here: https://github.com/adlrocha/
4 |
5 | ## Abstract
6 |
7 |
8 |
9 | ## Shortcomings
10 |
11 | ## Description
12 |
13 | ## Implementation plan
14 |
15 | # Impact
16 |
17 | ## Evaluation Plan
18 |
19 | ## Prior Work
20 |
21 | ## Results
22 |
23 | ## Future Work
24 |
--------------------------------------------------------------------------------
/testbed/README.md:
--------------------------------------------------------------------------------
1 | # Beyond Bitswap Testbed
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | This repo implements a testbed to evaluate the performance of different IPFS exchange interfaces. It is currently used in the scope of the Beyond Bitswap project to test improvement proposals over the base code.
10 |
11 | For the full project description, please consult [BEYOND_BITSWAP](https://github.com/protocol/beyond-bitswap)
12 |
13 | The repo is conformed by the following parts:
14 | * [Testbed](./testbed): It implements a Testground test environment and a set of python scripts to run the tests and process the results.
15 | * [Probe](./probe): A simple CLI tool that comes pretty handy to test new implementations and for debugging purposes.
16 | * [Bitswap Viewer](./viewer): This ObservableHQ notebook would enable you with a visual way to observe the messages exchanged between Bitswap nodes step by step in a file-sharing execution.
17 | * [Datasets](./test-datasets): Add in this directory any of the datasets you want to use in your tests.
18 |
19 | You will find additional documentation in these directories.
20 |
--------------------------------------------------------------------------------
/testbed/compositions/bitswap-composition.toml:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = "transfer"
3 |
4 |
5 | [global]
6 | plan = "testbed"
7 | case = "transfer"
8 | builder = "docker:go"
9 | runner = "local:docker"
10 |
11 | total_instances = 2
12 |
13 | [[groups]]
14 | id = "nodes"
15 | instances = { count = 2 }
16 |
17 | [groups.build]
18 |
19 |
20 |
21 |
22 | [groups.run]
23 | [groups.run.test_params]
24 | input_data = "files"
25 | data_dir = "../extra/test-datasets"
26 | run_timeout_secs = "3000"
27 | timeout_secs = "12000"
28 | run_count = "4"
29 | leech_count= "1"
30 | passive_count = "0"
31 | max_connection_rate = "100"
32 | # input_data = "random"
33 | # file_size = "10000000,30000000,50000000"
34 | latency_ms= "100,10"
35 | bandwidth_mb= "100,50"
36 | enable_tcp= "false"
37 | enable_dht= "false"
38 | node_type = "bitswap"
39 | long_lasting = "true"
40 |
--------------------------------------------------------------------------------
/testbed/compositions/graphsync-composition.toml:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = "transfer"
3 |
4 |
5 | [global]
6 | plan = "testbed"
7 | case = "transfer"
8 | builder = "docker:go"
9 | runner = "local:docker"
10 |
11 | total_instances = 2
12 |
13 | [[groups]]
14 | id = "nodes"
15 | instances = { count = 2 }
16 |
17 | [groups.build]
18 |
19 |
20 |
21 |
22 | [groups.run]
23 | [groups.run.test_params]
24 | input_data = "files"
25 | data_dir = "../extra/test-datasets"
26 | run_timeout_secs = "3000"
27 | timeout_secs = "12000"
28 | run_count = "4"
29 | leech_count= "1"
30 | passive_count = "0"
31 | max_connection_rate = "100"
32 | # input_data = "random"
33 | # file_size = "10000000,30000000,50000000"
34 | latency_ms= "100,10"
35 | bandwidth_mb= "100,50"
36 | enable_tcp= "false"
37 | enable_dht= "false"
38 | node_type = "graphsync"
39 | long_lasting = "true"
40 |
--------------------------------------------------------------------------------
/testbed/compositions/gs-directory-tnsfr.toml:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = "graphsync-directory-transfer"
3 |
4 | [global]
5 | plan = "testbed"
6 | case = "sparse"
7 | builder = "docker:go"
8 | runner = "local:docker"
9 |
10 | total_instances = 30
11 |
12 | [[groups]]
13 | id = "nodes"
14 | instances = { count = 30 }
15 |
16 | [groups.build]
17 | skip_runtime_image = "true"
18 | dependencies = [
19 | { module = "github.com/ipfs/go-bitswap", target="github.com/adlrocha/go-bitswap", version = "6f5c6dc5e81bb7a49c73d20aa3d9004747164928"}
20 | ]
21 |
22 | [groups.run]
23 | [groups.run.test_params]
24 | input_data = "files"
25 | data_dir = "../extra/test-datasets"
26 | run_timeout_secs = "900"
27 | timeout_secs = "2000"
28 | run_count = "3"
29 | leech_count= "15"
30 | passive_count = "10"
31 | max_connection_rate = "100"
32 | # input_data = "random"
33 | # file_size = "10000000,30000000,50000000"
34 | latency_ms= "100"
35 | bandwidth_mb= "100"
36 | enable_tcp= "false"
37 | enable_dht= "false"
38 |
--------------------------------------------------------------------------------
/testbed/compositions/ipfs-composition.toml:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = "transfer"
3 |
4 |
5 | [global]
6 | plan = "testbed"
7 | case = "transfer"
8 | builder = "docker:go"
9 | runner = "local:docker"
10 |
11 | total_instances = 2
12 |
13 | [[groups]]
14 | id = "nodes"
15 | instances = { count = 2 }
16 |
17 | [groups.build]
18 |
19 |
20 |
21 |
22 | [groups.run]
23 | [groups.run.test_params]
24 | input_data = "files"
25 | data_dir = "../extra/test-datasets"
26 | run_timeout_secs = "3000"
27 | timeout_secs = "12000"
28 | run_count = "4"
29 | leech_count= "1"
30 | passive_count = "0"
31 | max_connection_rate = "100"
32 | # input_data = "random"
33 | # file_size = "10000000,30000000,50000000"
34 | latency_ms= "100,10"
35 | bandwidth_mb= "100,50"
36 | enable_tcp= "false"
37 | enable_dht= "false"
38 | node_type = "ipfs"
39 | long_lasting = "true"
40 |
--------------------------------------------------------------------------------
/testbed/compositions/libp2pHTTP-composition.toml:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = "transfer"
3 |
4 |
5 | [global]
6 | plan = "testbed"
7 | case = "transfer"
8 | builder = "docker:go"
9 | runner = "local:docker"
10 |
11 | total_instances = 2
12 |
13 | [[groups]]
14 | id = "nodes"
15 | instances = { count = 2 }
16 |
17 | [groups.build]
18 |
19 | [groups.run]
20 | [groups.run.test_params]
21 | input_data = "random"
22 | file_size = "4000000000"
23 | run_timeout_secs = "3000"
24 | timeout_secs = "12000"
25 | run_count = "1"
26 | leech_count= "1"
27 | passive_count = "0"
28 | max_connection_rate = "100"
29 | # input_data = "random"
30 | # file_size = "10000000,30000000,50000000"
31 | latency_ms= "0"
32 | bandwidth_mb= "0"
33 | enable_tcp= "false"
34 | enable_dht= "false"
35 | node_type = "libp2pHTTP"
36 | long_lasting = "true"
--------------------------------------------------------------------------------
/testbed/compositions/rawLibp2p-composition.toml:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = "transfer"
3 |
4 |
5 | [global]
6 | plan = "testbed"
7 | case = "transfer"
8 | builder = "docker:go"
9 | runner = "local:docker"
10 |
11 | total_instances = 2
12 |
13 | [[groups]]
14 | id = "nodes"
15 | instances = { count = 2 }
16 |
17 | [groups.build]
18 |
19 | [groups.run]
20 | [groups.run.test_params]
21 | input_data = "random"
22 | file_size = "4000000000"
23 | run_timeout_secs = "3000"
24 | timeout_secs = "12000"
25 | run_count = "1"
26 | leech_count= "1"
27 | passive_count = "0"
28 | max_connection_rate = "100"
29 | # input_data = "random"
30 | # file_size = "10000000,30000000,50000000"
31 | latency_ms= "0"
32 | bandwidth_mb= "0"
33 | enable_tcp= "false"
34 | enable_dht= "false"
35 | node_type = "rawLibp2p"
36 | long_lasting = "true"
--------------------------------------------------------------------------------
/testbed/compositions/run_composition_plot.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Cleaning previous results..."
4 | RUNNER="local:docker"
5 |
6 | rm -rf ./results
7 | mkdir ./results
8 |
9 | source ../testbed/scripts/exec.sh
10 |
11 |
12 | echo "[*] Running Composition"
13 | run_composition ./$1
14 | # Plot in pdf
15 | python3 ../testbed/scripts/pdf_composition.py $1
--------------------------------------------------------------------------------
/testbed/compositions/tcp-composition.toml:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = "tcp-transfer"
3 |
4 |
5 | [global]
6 | plan = "testbed"
7 | case = "tcp-transfer"
8 | builder = "docker:go"
9 | runner = "local:docker"
10 |
11 | total_instances = 2
12 |
13 | [[groups]]
14 | id = "nodes"
15 | instances = { count = 2 }
16 |
17 | [groups.build]
18 |
19 |
20 |
21 |
22 | [groups.run]
23 | [groups.run.test_params]
24 | input_data = "files"
25 | data_dir = "../extra/test-datasets"
26 | run_timeout_secs = "3000"
27 | timeout_secs = "12000"
28 | run_count = "4"
29 | leech_count= "1"
30 | passive_count = "0"
31 | max_connection_rate = "100"
32 | # input_data = "random"
33 | # file_size = "10000000,30000000,50000000"
34 | latency_ms= "100,10"
35 | bandwidth_mb= "100,50"
36 | enable_tcp= "false"
37 | enable_dht= "false"
38 | node_type = "ipfs"
39 | long_lasting = "true"
40 |
--------------------------------------------------------------------------------
/testbed/probe/README.md:
--------------------------------------------------------------------------------
1 | ## Beyond Bitswap Probe
2 | This is a simple CLI tool to help debug and test the exchange of content between different IPFS nodes.
3 |
4 | [](https://asciinema.org/a/2xqFne0tkGXprE3tDmAhHRKZo)
5 |
6 | ### Usage
7 | * To run the tool use:
8 | ```
9 | $ go build
10 | $ ./probe
11 | ```
12 | * The command will start a new IPFS node and add a set of files from a directory and
13 | randomly generated to start testing. When this task is finished you will be prompted to
14 | start typing commands:
15 | ```
16 | -- Getting an IPFS node running --
17 | Spawning node on a temporary repo
18 | Listening at: [/ip4/192.168.1.66/tcp/33387 /ip4/127.0.0.1/tcp/33387 /ip6/::1/tcp/35697 /ip4/192.168.1.66/udp/44399/quic /ip4/127.0.0.1/udp/44399/quic /ip6/::1/udp/50214/quic]
19 | PeerInfo: {QmRDgb3Vq1nqBBGe8VugFSXSxt7pG49xComwN8QV7Z5m3Z: [/ip4/192.168.1.66/tcp/33387 /ip4/127.0.0.1/tcp/33387 /ip6/::1/tcp/35697 /ip4/192.168.1.66/udp/44399/quic /ip4/127.0.0.1/udp/44399/quic /ip6/::1/udp/50214/quic]}
20 | Adding a random file to the network: /ipfs/QmVaGrB1GESwjNTVvguZbYGf1mmDgU24Jtcs8wxTP2tT3x
21 | Adding inputData directory
22 | Adding file to the network: /ipfs/QmNdGY4t8ZPU1StBRs7fNpyr6TarwVaYNFtWFwT2tZunw5
23 | >> Enter command:
24 | ```
25 | * Optionally you can use the `--debug` flag to show verbose Bitswap DEBUG traces.
26 |
27 | These are the currently available commands:
28 | * `get_`: Gets `path` from the IPFS network.
29 | * `add_`: Adds a random file of size ``
30 | * `addFile_`: Adds file from path to the network.
31 | * `connect_`: Connects to an IPFS node.
32 | * `pin_`: Pins content to the node.
33 | * `graphsync__`: Fetches content from a peer using graphsync.
34 | * `exit`: Exits the command line tool.
35 |
36 | ### Using other Bitswap versions
37 | If you want to test this tool using other Bitswap/Graphsync versions (like an implementation of an RFC), just modify the `replace` direcive in the [`go.mod`](./go.mod) to the version you want to spawn within the IPFS node. For instance, if we want to test the implementation of RFCBBL102 we would add the following replace directive:
38 | ```
39 | replace github.com/ipfs/go-bitswap => github.com/adlrocha/go-bitswap 6f5c6dc5e81bb7a49c73d20aa3d9004747164928
40 | ```
--------------------------------------------------------------------------------
/testbed/probe/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/adlrocha/beyond-bitswap/probe
2 |
3 | go 1.14
4 |
5 | require (
6 | github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75 // indirect
7 | github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
8 | github.com/dustin/go-humanize v1.0.0
9 | github.com/google/gopacket v1.1.18 // indirect
10 | github.com/ipfs/go-bitswap v0.2.20
11 | github.com/ipfs/go-blockservice v0.1.3
12 | github.com/ipfs/go-cid v0.0.7
13 | github.com/ipfs/go-datastore v0.4.5
14 | github.com/ipfs/go-filestore v1.0.0
15 | github.com/ipfs/go-graphsync v0.4.3
16 | github.com/ipfs/go-ipfs v0.7.0
17 | github.com/ipfs/go-ipfs-blockstore v1.0.1
18 | github.com/ipfs/go-ipfs-chunker v0.0.5
19 | github.com/ipfs/go-ipfs-config v0.9.0
20 | github.com/ipfs/go-ipfs-delay v0.0.1
21 | github.com/ipfs/go-ipfs-exchange-interface v0.0.1
22 | github.com/ipfs/go-ipfs-files v0.0.8
23 | github.com/ipfs/go-ipfs-routing v0.1.0
24 | github.com/ipfs/go-ipld-cbor v0.0.4 // indirect
25 | github.com/ipfs/go-ipld-format v0.2.0
26 | github.com/ipfs/go-log v1.0.4
27 | github.com/ipfs/go-merkledag v0.3.2
28 | github.com/ipfs/go-metrics-interface v0.0.1
29 | github.com/ipfs/go-unixfs v0.2.4
30 | github.com/ipfs/interface-go-ipfs-core v0.4.0
31 | github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018
32 | github.com/jbenet/goprocess v0.1.4
33 | github.com/libp2p/go-libp2p v0.11.0
34 | github.com/libp2p/go-libp2p-autonat v0.3.2 // indirect
35 | github.com/libp2p/go-libp2p-core v0.6.1
36 | github.com/libp2p/go-libp2p-peerstore v0.2.6
37 | github.com/libp2p/go-mplex v0.1.3 // indirect
38 | github.com/libp2p/go-reuseport-transport v0.0.4 // indirect
39 | github.com/libp2p/go-sockaddr v0.1.0 // indirect
40 | github.com/libp2p/go-yamux v1.3.8 // indirect
41 | github.com/multiformats/go-multiaddr v0.3.1
42 | github.com/multiformats/go-multihash v0.0.14
43 | github.com/pkg/errors v0.9.1
44 | github.com/testground/sdk-go v0.2.4
45 | github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30 // indirect
46 | github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b // indirect
47 | github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f // indirect
48 | go.uber.org/fx v1.13.1
49 | golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de // indirect
50 | golang.org/x/net v0.0.0-20200904194848-62affa334b73 // indirect
51 | golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
52 | golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 // indirect
53 | golang.org/x/text v0.3.3 // indirect
54 |
55 | )
56 |
57 | replace github.com/ipfs/go-bitswap => github.com/adlrocha/go-bitswap v0.2.20-0.20201125102925-e76a6cbc6ee1
58 |
--------------------------------------------------------------------------------
/testbed/probe/graphsync.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | p2p "github.com/libp2p/go-libp2p-core"
9 | "go.uber.org/fx"
10 |
11 | "github.com/ipfs/go-cid"
12 | "github.com/ipfs/go-graphsync"
13 | gsimpl "github.com/ipfs/go-graphsync/impl"
14 | "github.com/ipfs/go-graphsync/network"
15 | "github.com/ipfs/go-graphsync/storeutil"
16 | blockstore "github.com/ipfs/go-ipfs-blockstore"
17 | files "github.com/ipfs/go-ipfs-files"
18 | "github.com/ipfs/go-ipfs/core/node/helpers"
19 | unixfsFile "github.com/ipfs/go-unixfs/file"
20 |
21 | "github.com/ipld/go-ipld-prime"
22 | cidlink "github.com/ipld/go-ipld-prime/linking/cid"
23 | basicnode "github.com/ipld/go-ipld-prime/node/basic"
24 | ipldselector "github.com/ipld/go-ipld-prime/traversal/selector"
25 | "github.com/ipld/go-ipld-prime/traversal/selector/builder"
26 | "github.com/libp2p/go-libp2p-core/host"
27 | "github.com/libp2p/go-libp2p-core/peer"
28 | ma "github.com/multiformats/go-multiaddr"
29 | )
30 |
31 | // Graphsync constructs a graphsync
32 | func Graphsync(lc fx.Lifecycle, mctx helpers.MetricsCtx, host p2p.Host, bs blockstore.GCBlockstore) graphsync.GraphExchange {
33 | ctx := helpers.LifecycleCtx(mctx, lc)
34 |
35 | network := network.NewFromLibp2pHost(host)
36 | return gsimpl.New(ctx, network,
37 | storeutil.LoaderForBlockstore(bs),
38 | storeutil.StorerForBlockstore(bs),
39 | )
40 | }
41 |
42 | func newGraphsync(ctx context.Context, p2p host.Host, bs blockstore.Blockstore) (graphsync.GraphExchange, error) {
43 | network := network.NewFromLibp2pHost(p2p)
44 | return gsimpl.New(ctx,
45 | network,
46 | storeutil.LoaderForBlockstore(bs),
47 | storeutil.StorerForBlockstore(bs),
48 | ), nil
49 | }
50 |
51 | var selectAll ipld.Node = func() ipld.Node {
52 | ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
53 | return ssb.ExploreRecursive(
54 | ipldselector.RecursionLimitDepth(100), // default max
55 | ssb.ExploreAll(ssb.ExploreRecursiveEdge()),
56 | ).Node()
57 | }()
58 |
59 | func fetch(ctx context.Context, gs graphsync.GraphExchange, p peer.ID, c cid.Cid) error {
60 | ctx, cancel := context.WithCancel(ctx)
61 | defer cancel()
62 |
63 | resps, errs := gs.Request(ctx, p, cidlink.Link{Cid: c}, selectAll)
64 | for {
65 | select {
66 | case <-ctx.Done():
67 | return ctx.Err()
68 | case _, ok := <-resps:
69 | if !ok {
70 | resps = nil
71 | }
72 | case err, ok := <-errs:
73 | if !ok {
74 | // done.
75 | return nil
76 | }
77 | if err != nil {
78 | return fmt.Errorf("got an unexpected error: %s", err)
79 | }
80 | }
81 | }
82 | }
83 |
84 | // getContent gets a file from the network and computes time_to_fetch
85 | func getGraphsync(ctx context.Context, n *IPFSNode, p string, cidString string) error {
86 | ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
87 | defer cancel()
88 | var (
89 | timeToFetch time.Duration
90 | // f files.Node
91 | err error
92 | )
93 |
94 | // Parse CID
95 | target, err := cid.Decode(cidString)
96 | if err != nil {
97 | return err
98 | }
99 | // Parse target
100 | maddr, err := ma.NewMultiaddr(p)
101 | ai, err := peer.AddrInfoFromP2pAddr(maddr)
102 | if err != nil {
103 | return err
104 | }
105 | fmt.Printf("Searching for cid %v in peer %v\n", target, p)
106 |
107 | // Store in /tmp/
108 | fileName := "/tmp/" + time.Now().String()
109 |
110 | gs := n.Node.GraphExchange
111 | start := time.Now()
112 | // Fetch graph
113 | err = fetch(ctx, gs, ai.ID, target)
114 | if err != nil {
115 | return err
116 | }
117 | dag := n.Node.DAG
118 | // Get the DAG
119 | root, err := dag.Get(ctx, target)
120 | if err != nil {
121 | return err
122 | }
123 | // Traverse it and store it in file
124 | f, err := unixfsFile.NewUnixfsFile(ctx, dag, root)
125 | if err != nil {
126 | return err
127 | }
128 | files.WriteTo(f, fileName)
129 | timeToFetch = time.Since(start)
130 | // TODO: Store in a file to check also the size retrieved.
131 | s, _ := f.Size()
132 | fmt.Printf("[*] Time to fetch file of size %d: %d ns\n", s, timeToFetch)
133 |
134 | fmt.Println("Cleaning datastore")
135 | n.ClearDatastore(ctx)
136 | err = n.ClearBlockstore(ctx)
137 | if err != nil {
138 | fmt.Println("Error cleaning blockstore", err)
139 | }
140 | return nil
141 | }
142 |
--------------------------------------------------------------------------------
/testbed/probe/probe.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "context"
6 | "flag"
7 | "fmt"
8 | "os"
9 | "os/signal"
10 | "strconv"
11 | "strings"
12 | "syscall"
13 |
14 | logging "github.com/ipfs/go-log"
15 | "github.com/ipfs/interface-go-ipfs-core/path"
16 | // This package is needed so that all the preloaded plugins are loaded automatically
17 | // bsnet "github.com/ipfs/go-bitswap/network"
18 | )
19 |
20 | func helpcmd() {
21 | fmt.Println(`[!] Commands available:
22 | * addFile_
23 | * pin_
24 | * get_
25 | * connect_
26 | * graphsync__
27 | * exit`)
28 | }
29 |
30 | // Process commands received from prompt
31 | func processInput(ctx context.Context, ipfs *IPFSNode, text string, done chan bool) error {
32 | text = strings.ReplaceAll(text, "\n", "")
33 | text = strings.ReplaceAll(text, " ", "")
34 | words := strings.Split(text, "_")
35 |
36 | // Defer notifying the that processing is finished.
37 | defer func() {
38 | done <- true
39 | }()
40 |
41 | if words[0] == "exit" {
42 | os.Exit(0)
43 | }
44 | if words[0] == "help" {
45 | helpcmd()
46 | return nil
47 | }
48 | if len(words) < 2 {
49 | fmt.Println("Wrong number of arguments")
50 | return fmt.Errorf("Wrong number of arguments")
51 | }
52 | // If we use add we can add random content to the network.
53 | if words[0] == "add" {
54 | size, err := strconv.Atoi(words[1])
55 | if err != nil {
56 | fmt.Println("Not a valid size for random add")
57 | return err
58 | }
59 | addRandomContent(ctx, ipfs, size)
60 | } else if words[0] == "connect" {
61 | connectPeer(ctx, ipfs, words[1])
62 | } else if words[0] == "addFile" {
63 | addFile(ctx, ipfs, words[1])
64 | } else if words[0] == "get" {
65 | fPath := path.New(words[1])
66 | err := getContent(ctx, ipfs, fPath, false)
67 | if err != nil {
68 | fmt.Println("Couldn't find content", err)
69 | return err
70 | }
71 | } else if words[0] == "pin" {
72 | fPath := path.New(words[1])
73 | err := getContent(ctx, ipfs, fPath, true)
74 | if err != nil {
75 | fmt.Println("Couldn't find content", err)
76 | return err
77 | }
78 | } else if words[0] == "graphsync" {
79 | p := words[1]
80 | c := words[2]
81 | fmt.Println("Looking graphsync", p, c)
82 |
83 | err := getGraphsync(ctx, ipfs, p, c)
84 | if err != nil {
85 | fmt.Println("Couldn't find content with graphsync:", err)
86 | return err
87 | }
88 | } else {
89 | fmt.Println("[!] Wrong command")
90 | helpcmd()
91 | }
92 | // We could show metrics after each command in certain cases.
93 | // fmt.Println("=== METRICS ===")
94 | // bw := ipfs1.Node.Reporter.GetBandwidthTotals()
95 | // printStats(&bw)
96 | return nil
97 | }
98 |
99 | func main() {
100 | addDirectory := flag.String("addDirectory", "", "Add a directory to the probe")
101 | debug := flag.Bool("debug", false, "Set debug logging")
102 |
103 | flag.Parse()
104 | if *debug {
105 | logging.SetLogLevel("bitswap", "DEBUG")
106 | logging.SetLogLevel("bitswap_network", "DEBUG")
107 | }
108 |
109 | reader := bufio.NewReader(os.Stdin)
110 |
111 | fmt.Println("-- Getting an IPFS node running -- ")
112 |
113 | ctx, cancel := context.WithCancel(context.Background())
114 | defer cancel()
115 |
116 | if err := setupPlugins(""); err != nil {
117 | panic(fmt.Errorf("Failed setting up plugins: %s", err))
118 | }
119 |
120 | // Spawn a node using a temporary path, creating a temporary repo for the run
121 | fmt.Println("Spawning node on a temporary repo")
122 | // ipfs, err := CreateIPFSNode(ctx)
123 | // if err != nil {
124 | // panic(fmt.Errorf("failed to spawn ephemeral node: %s", err))
125 | // }
126 | nConfig, err := GenerateAddrInfo("127.0.0.1")
127 | if err != nil {
128 | panic(err)
129 | }
130 | // Create IPFS node
131 | ipfs, err := CreateIPFSNodeWithConfig(ctx, nConfig, false)
132 | if err != nil {
133 | panic(err)
134 | }
135 |
136 | // Adding random content for testing.
137 | addRandomContent(ctx, ipfs, 11111)
138 | if *addDirectory != "" {
139 | // Adding directory,
140 | fmt.Println("Adding inputData directory")
141 | err := addFile(ctx, ipfs, *addDirectory)
142 | if err != nil {
143 | panic("Wrong directory")
144 | }
145 | }
146 |
147 | ch := make(chan string)
148 | chSignal := make(chan os.Signal)
149 | done := make(chan bool)
150 | signal.Notify(chSignal, os.Interrupt, syscall.SIGTERM)
151 |
152 | // Prompt routine
153 | go func(ch chan string, done chan bool) {
154 | for {
155 | fmt.Print(">> Enter command: ")
156 | text, _ := reader.ReadString('\n')
157 | ch <- text
158 | <-done
159 | }
160 | }(ch, done)
161 |
162 | // Processing loop.
163 | for {
164 | select {
165 | case text := <-ch:
166 | processInput(ctx, ipfs, text, done)
167 |
168 | case <-chSignal:
169 | fmt.Printf("\nUse exit to close the tool\n")
170 | fmt.Printf(">> Enter command: ")
171 |
172 | }
173 | }
174 | }
175 |
--------------------------------------------------------------------------------
/testbed/test-datasets/README.md:
--------------------------------------------------------------------------------
1 | # Test datasets
2 |
3 | This folder gets used by the testbed as the location to pull the data to run the benchmarks against. Remove this README.md and add any dataset you want to use in your tests.
4 |
5 | You can find multiple free datasets at https://awesome.ipfs.io/datasets for your own testing.
--------------------------------------------------------------------------------
/testbed/test-datasets/xkcd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/protocol/beyond-bitswap/63a95b41d5c71d0da509edc8c5ac43de9fdea09d/testbed/test-datasets/xkcd.png
--------------------------------------------------------------------------------
/testbed/testbed/.gitignore:
--------------------------------------------------------------------------------
1 | scripts/env
2 | scripts/results
3 | scripts/saved
4 | scripts/inputData
5 | scripts/__pycache__
6 | .DS_Store
7 | ../test-datasets/*
--------------------------------------------------------------------------------
/testbed/testbed/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/protocol/beyond-bitswap/testbed/testbed
2 |
3 | go 1.14
4 |
5 | require (
6 | github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
7 | github.com/dgraph-io/badger/v2 v2.2007.2
8 | github.com/hannahhoward/all-selector v0.2.0
9 | github.com/ipfs/go-bitswap v0.2.20
10 | github.com/ipfs/go-blockservice v0.1.3
11 | github.com/ipfs/go-cid v0.0.7
12 | github.com/ipfs/go-datastore v0.4.5
13 | github.com/ipfs/go-ds-badger2 v0.1.0
14 | github.com/ipfs/go-filestore v1.0.0 // indirect
15 | github.com/ipfs/go-graphsync v0.4.3
16 | github.com/ipfs/go-ipfs v0.7.0
17 | github.com/ipfs/go-ipfs-blockstore v1.0.1
18 | github.com/ipfs/go-ipfs-chunker v0.0.5
19 | github.com/ipfs/go-ipfs-config v0.9.0
20 | github.com/ipfs/go-ipfs-delay v0.0.1
21 | github.com/ipfs/go-ipfs-exchange-interface v0.0.1
22 | github.com/ipfs/go-ipfs-exchange-offline v0.0.1
23 | github.com/ipfs/go-ipfs-files v0.0.8
24 | github.com/ipfs/go-ipfs-posinfo v0.0.1
25 | github.com/ipfs/go-ipfs-routing v0.1.0
26 | github.com/ipfs/go-ipld-format v0.2.0
27 | github.com/ipfs/go-log/v2 v2.1.1
28 | github.com/ipfs/go-merkledag v0.3.2
29 | github.com/ipfs/go-metrics-interface v0.0.1
30 | github.com/ipfs/go-mfs v0.1.2
31 | github.com/ipfs/go-unixfs v0.2.4
32 | github.com/ipfs/interface-go-ipfs-core v0.4.0
33 | github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018
34 | github.com/jbenet/goprocess v0.1.4
35 | github.com/libp2p/go-libp2p v0.11.0
36 | github.com/libp2p/go-libp2p-core v0.6.1
37 | github.com/libp2p/go-libp2p-gostream v0.2.1
38 | github.com/libp2p/go-libp2p-http v0.1.6-0.20210310045043-5508c68db693
39 | // github.com/libp2p/go-libp2p-gzip v0.0.0-00010101000000-000000000000
40 | github.com/libp2p/go-mplex v0.1.3 // indirect
41 | github.com/libp2p/go-sockaddr v0.1.0 // indirect
42 | github.com/libp2p/go-yamux v1.3.8 // indirect
43 | github.com/multiformats/go-multiaddr v0.3.1
44 | github.com/multiformats/go-multihash v0.0.14
45 | github.com/pkg/errors v0.9.1
46 | github.com/testground/sdk-go v0.2.6-0.20201016180515-1e40e1b0ec3a
47 | github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b // indirect
48 | go.uber.org/fx v1.13.1
49 | golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de // indirect
50 | golang.org/x/net v0.0.0-20200822124328-c89045814202 // indirect
51 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
52 | golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 // indirect
53 | golang.org/x/text v0.3.3 // indirect
54 | )
55 |
56 | replace github.com/ipfs/go-bitswap => github.com/adlrocha/go-bitswap v0.2.20-0.20201006081544-fad1a007cf9b
57 |
--------------------------------------------------------------------------------
/testbed/testbed/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | test "github.com/protocol/beyond-bitswap/testbed/testbed/test"
5 | "github.com/testground/sdk-go/run"
6 | )
7 |
8 | func main() {
9 | run.InvokeMap(map[string]interface{}{
10 | "transfer": test.Transfer,
11 | "tcp-transfer": test.TCPTransfer,
12 | })
13 | }
14 |
--------------------------------------------------------------------------------
/testbed/testbed/manifest.toml:
--------------------------------------------------------------------------------
1 | name = "testbed"
2 |
3 | [builders."docker:go"]
4 | enabled = true
5 | go_version = "1.14"
6 | module_path = "github.com/ipfs/test-plans/beyond-bitswap"
7 | exec_pkg = "."
8 | skip_runtime_image = true
9 |
10 | [extra_sources]
11 | "docker:go" = ["../test-datasets"]
12 |
13 | # extra_sources = { "exec:go" = ["./scripts/inputData"] }
14 |
15 | [builders."exec:go"]
16 | enabled = true
17 | module_path = "github.com/ipfs/test-plans/beyond-bitswap"
18 | exec_pkg = "."
19 |
20 | [runners."local:docker"]
21 | enabled = true
22 |
23 | [runners."local:exec"]
24 | enabled = true
25 |
26 | ["aws"]
27 | region = "eu-west-2"
28 |
29 | [runners."cluster:k8s"]
30 | testplan_pod_cpu = "100m"
31 | testplan_pod_memory = "100Mi"
32 | provider = "aws"
33 | autoscaler_enabled = false
34 | collect_outputs_pod_cpu = "102m"
35 | collect_outputs_pod_memory = "103Mi"
36 |
37 | [runners."cluster:swarm"]
38 | enabled = true
39 |
40 | [[testcases]]
41 | name = "transfer"
42 | instances = { min = 2, max = 64, default = 2 }
43 |
44 | [testcases.params]
45 | node_type = { type="string", desc="type of node (ipfs, bitswap, graphsync, libp2pHTTP, rawLibp2p)", default="ipfs" }
46 | input_data = { type="string", desc="input data to be used in the test (files, random, custom)", default="random"}
47 | data_dir = { type="string", desc="directory with data is located", default="../extra/test-datasets"}
48 | exchange_interface = { type="string", desc="exchange interface to use in IPFS node", default="bitswap"}
49 | run_count = { type = "int", desc = "number of iterations of the test", unit = "iteration", default = 1 }
50 | run_timeout_secs = { type = "int", desc = "timeout for an individual run", unit = "seconds", default = 90000 }
51 | leech_count = { type = "int", desc = "number of leech nodes", unit = "peers", default = 1 }
52 | passive_count = { type = "int", desc = "number of passive nodes (neither leech nor seed)", unit = "peers", default = 0 }
53 | timeout_secs = { type = "int", desc = "timeout", unit = "seconds", default = 400000 }#TODO: Decrease to 300 if not debugging. Bear this in mind while making long tests.
54 | bstore_delay_ms = { type = "int", desc = "blockstore get / put delay (Only applicable for in-memory stores)", unit = "milliseconds", default = 5 }
55 | request_stagger = { type = "int", desc = "time between each leech's first request", unit = "ms", default = 0}
56 | file_size = { type = "int", desc = "file size", unit = "bytes", default = 4194304 }
57 | latency_ms = { type = "int", desc = "latency", unit = "ms", default = 5 }
58 | jitter_pct = { type = "int", desc = "jitter as percentage of latency", unit = "%", default = 10 }
59 | bandwidth_mb = { type = "int", desc = "bandwidth", unit = "Mib", default = 1024 }
60 | parallel_gen_mb = { type = "int", desc = "maximum allowed size of seed data to generate in parallel", unit = "Mib", default = 100 }
61 | max_connection_rate = { type = "int", desc = "max connection allowed per peer according to total nodes", unit = "%", default = 100 }
62 | seeder_rate = { type = "int", desc = "percentage of nodes seeding the file", unit = "%", default = 100 }
63 | number_waves = { type = "int", desc = "Number of waves of leechers", unit = "%", default = 1 }
64 | enable_tcp = { type="bool", desc="Enable TCP comparison", default=false }
65 | enable_dht = { type="bool", desc="Enable DHT in IPFS nodes", default=false }
66 | enable_providing = { type="bool", desc="Enable the providing system", default=false }
67 | long_lasting = {type="bool", desc="Enable to retrieve feedback from running nodes in long-lasting experiments", default=false}
68 | dialer = { type="string", desc="network topology between nodes", default="default"}
69 | disk_store = { type="bool", desc="Enable Badger Data Store instead of an in-memory store", default=false}
70 |
71 |
72 | [[testcases]]
73 | name = "tcp-transfer"
74 | instances = { min = 2, max = 2, default = 2 }
75 |
76 | [testcases.params]
77 | input_data = { type="string", desc="input data to be used in the test (files, random, custom)", default="random"}
78 | data_dir = { type="string", desc="directory with data is located", default="../extra/test-datasets"}
79 | file_size = { type = "int", desc = "file size", unit = "bytes", default = 4194304 }
80 | latency_ms = { type = "int", desc = "latency", unit = "ms", default = 5 }
81 | jitter_pct = { type = "int", desc = "jitter as percentage of latency", unit = "%", default = 10 }
82 | bandwidth_mb = { type = "int", desc = "bandwidth", unit = "Mib", default = 1024 }
83 |
--------------------------------------------------------------------------------
/testbed/testbed/scripts/config.yaml:
--------------------------------------------------------------------------------
1 | # Set up you use case parameters
2 | use_case:
3 | testcase: "transfer"
4 | input_data: "random" # files, dir, random and custom supported.
5 | file_size: "15728640,31457280,47185920,57671680" # If INPUT_DATA=random is selected choose the file sizes
6 | files_directory: "./inputData" # Set directory from which to take files if INPUT_DATA=files
7 | run_count: 3
8 |
9 | # Set up Data Ingestion parameters
10 | # TODO
11 | dataIngestion:
12 | chunker: ""
13 | dag_layout: ""
14 |
15 | # Set your network topology
16 | #TODO
17 | network:
18 | n_nodes: 4 # TOTAL NUMBER OF NODES
19 | n_leechers: 1 # NUMBER OF LEECHERS
20 | n_passive: 0 # NUMBER OF PASSIVE NODES.
21 | max_connection_rate: 100 # % OF CONNECTION FROM THE TOTAL NUMBER OF NODES ALLOWED
22 | churn_rate: 10 # % CHURN RATE OF THE NETWORK.
23 |
24 | # Set your nodes parameters
25 | nodes:
26 | nodes_bandwidth: # PERCENTAGE OF NODES WITH X BANDWIDTH. (0.2, 150; 0.3, 100) = 20% 150MB; 30% 100MB; Rest default.
27 | nodes_latency:
28 | nodes_jitter:
29 | max_cpu:
30 | max_ram:
31 |
32 | # Set your havoc parameters.
33 | havoc:
34 | attack_type: ""
35 | malicious_nodes: ""
36 |
--------------------------------------------------------------------------------
/testbed/testbed/scripts/exec.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | TESTGROUND_BIN="testground"
4 | CMD="run $TESTCASE $INSTANCES $FILE_SIZE $RUN_COUNT $LATENCY $JITTER $PARALLEL_GEN $LEECH_COUNT $BANDWIDTH $INPUT_DATA $DATA_DIR $TCP_ENABLED $MAX_CONNECTION_RATE $PASSIVE_COUNT"
5 | # RUNNER="local:exec"
6 | # BUILDER="exec:go"
7 |
8 | echo "Starting test..."
9 |
10 | run_bitswap(){
11 | $TESTGROUND_BIN run single \
12 | --build-cfg skip_runtime_image=true \
13 | --plan=testbed \
14 | --testcase=$1 \
15 | --builder=$BUILDER \
16 | --runner=$RUNNER --instances=$2 \
17 | -tp file_size=$3 \
18 | -tp run_count=$4 \
19 | -tp latency_ms=$5 \
20 | -tp jitter_pct=$6 \
21 | -tp parallel_gen_mb=$7 \
22 | -tp leech_count=$8 \
23 | -tp bandwidth_mb=$9 \
24 | -tp input_data=${10} \
25 | -tp data_dir=${11} \
26 | -tp enable_tcp=${12} \
27 | -tp max_connection_rate=${13} \
28 | -tp passive_count=${14}
29 | # | tail -n 1 | awk -F 'run with ID: ' '{ print $2 }'
30 | }
31 |
32 | run() {
33 | echo "Running test with ($1, $2, $3, $4, $5, $6, $7, $8, $9, ${10}, ${11}, ${12}, ${13}, ${14}) (TESTCASE, INSTANCES, FILE_SIZE, RUN_COUNT, LATENCY, JITTER, PARALLEL, LEECH, BANDWIDTH, INPUT_DATA, DATA_DIR, TCP_ENABLED, MAX_CONNECTION_RATE, PASSIVE_COUNT)"
34 | TESTID=`run_bitswap $1 $2 $3 $4 $5 $6 $7 $8 $9 ${10} ${11} ${12} ${13} ${14}| tail -n 1 | awk -F 'run is queued with ID:' '{ print $2 }'`
35 | checkstatus $TESTID
36 | # `run_bitswap $1 $2 $3 $4 $5 $6 $7 $8 $9 ${10} ${11} ${12} ${13} ${14}| tail -n 1 | awk -F 'run with ID: ' '{ print $2 }'`
37 | # echo $TESTID
38 | # echo "Finished test $TESTID"
39 | $TESTGROUND_BIN collect --runner=$RUNNER $TESTID
40 | tar xzvf $TESTID.tgz
41 | rm $TESTID.tgz
42 | mv $TESTID ./results/
43 | echo "Collected results"
44 | }
45 |
46 | getstatus() {
47 | STATUS=`testground status --task $1 | tail -n 2 | awk -F 'Status:' '{ print $2 }'`
48 | echo ${STATUS//[[:blank:]]/}
49 | }
50 |
51 | checkstatus(){
52 | STATUS="none"
53 | while [ "$STATUS" != "complete" ]
54 | do
55 | STATUS=`getstatus $1`
56 | echo "Getting status: $STATUS"
57 | sleep 10s
58 | done
59 | echo "Task completed"
60 | }
61 |
62 | run_composition() {
63 | echo "Running composition test for $1"
64 | TESTID=`testground run composition -f $1 | tail -n 1 | awk -F 'run is queued with ID:' '{ print $2 }'`
65 | checkstatus $TESTID
66 | $TESTGROUND_BIN collect --runner=$RUNNER $TESTID
67 | tar xzvf $TESTID.tgz
68 | rm $TESTID.tgz
69 | mv $TESTID ./results/
70 | echo "Collected results"
71 | }
72 |
73 | # checkstatus bub74h523089p79be5ng
--------------------------------------------------------------------------------
/testbed/testbed/scripts/pdf.py:
--------------------------------------------------------------------------------
1 | import process
2 | import os
3 | import sys
4 | import utils
5 | from matplotlib.backends.backend_pdf import PdfPages
6 |
7 | dir_path = os.path.dirname(os.path.realpath(__file__))
8 | rfc = sys.argv[1]
9 | filename = "/rfc.pdf"
10 | if len(sys.argv) == 3:
11 | filename = "/" + sys.argv[2] + ".pdf"
12 |
13 | print(filename)
14 |
15 | with PdfPages(dir_path + "/../../../RFC/"+rfc+filename) as export_pdf:
16 |
17 | agg, testcases = process.aggregate_results(dir_path + "/../../../RFC/results")
18 | byLatency = process.groupBy(agg, "latencyMS")
19 | byNodeType = process.groupBy(agg, "nodeType")
20 | byFileSize = process.groupBy(agg, "fileSize")
21 | byBandwidth = process.groupBy(agg, "bandwidthMB")
22 | byTopology = process.groupBy(agg, "topology")
23 |
24 | process.plot_latency(byLatency, byBandwidth, byFileSize)
25 | export_pdf.savefig()
26 | process.plot_messages(byFileSize, byTopology)
27 | export_pdf.savefig()
28 | # process.plot_bw_overhead(byFileSize, byTopology)
29 | # export_pdf.savefig()
30 | # process.plot_througput(byLatency, byBandwidth, byFileSize, byTopology, testcases)
31 | # export_pdf.savefig()
32 | process.plot_want_messages(byFileSize, byTopology)
33 | export_pdf.savefig()
34 | # process.plot_tcp_latency(byLatency, byBandwidth, byFileSize)
35 | # export_pdf.savefig()
36 |
--------------------------------------------------------------------------------
/testbed/testbed/scripts/pdf_composition.py:
--------------------------------------------------------------------------------
1 | import process
2 | import os
3 | import sys
4 | import utils
5 | from matplotlib.backends.backend_pdf import PdfPages
6 |
7 | dir_path = os.path.dirname(os.path.realpath(__file__))
8 | print(dir_path)
9 | name = sys.argv[1]
10 | filename = "/" + name + ".pdf"
11 |
12 | print(filename)
13 |
14 | with PdfPages(dir_path + "/../../compositions/results/"+filename) as export_pdf:
15 |
16 | agg, testcases = process.aggregate_results(dir_path + "/../../compositions/results")
17 | byLatency = process.groupBy(agg, "latencyMS")
18 | byNodeType = process.groupBy(agg, "nodeType")
19 | byFileSize = process.groupBy(agg, "fileSize")
20 | byBandwidth = process.groupBy(agg, "bandwidthMB")
21 | byTopology = process.groupBy(agg, "topology")
22 |
23 | process.plot_latency_no_comparision(byLatency, byBandwidth, byFileSize)
24 | export_pdf.savefig()
--------------------------------------------------------------------------------
/testbed/testbed/scripts/random-file.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | if [ $# -eq 0 ]
3 | then
4 | echo "[!!] No argument supplied. Example of use: ./random-file 10M "
5 | exit 0
6 | fi
7 | OUTPUT_DIR=$2
8 | NAME=`date +%s`
9 | echo "[*] Generating a random file of $1B in $OUTPUT_DIR"
10 | if [ -z "$2" ]
11 | then
12 | OUTPUT_DIR="../../test-datasets"
13 | fi
14 |
15 | head -c $1 $OUTPUT_DIR/$NAME
--------------------------------------------------------------------------------
/testbed/testbed/scripts/requirements.txt:
--------------------------------------------------------------------------------
1 | toml
2 | jinja2
3 | ndjson
4 | pandas
5 | numpy
6 | matplotlib
7 | jupyter
8 | ipywidgets
9 | bunch
10 | stringcase
11 | papermill
12 | jupyter-ui-poll
13 | jupyter_contrib_nbextensions
14 | durations
15 | seaborn
16 | python-rclone
17 | pandas-sets
18 | base58
19 | ipyfilechooser
--------------------------------------------------------------------------------
/testbed/testbed/scripts/runner.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | def prepareRun():
4 | res = subprocess.run(["ls"])
5 | print(res.stdout)
6 |
7 | prepareRun()
--------------------------------------------------------------------------------
/testbed/testbed/scripts/single_run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # RUNNER="local:docker"
4 | # BUILDER="docker:go"
5 | # RUNNER="cluster:k8s"
6 | # BUILDER="docker:go"
7 | RUNNER="local:exec"
8 | BUILDER="exec:go"
9 |
10 | echo "Cleaning previous results..."
11 |
12 | rm -rf ./results
13 | mkdir ./results
14 |
15 | FILE_SIZE=15728640
16 | # FILE_SIZE=15728640,31457280,47185920,57671680
17 | RUN_COUNT=2
18 | INSTANCES=5
19 | LEECH_COUNT=3
20 | PASSIVE_COUNT=0
21 | LATENCY=10
22 | JITTER=10
23 | BANDWIDTH=150
24 | PARALLEL_GEN=100
25 | TESTCASE=transfer
26 | INPUT_DATA=files
27 | # DATA_DIR=../extra/testDataset
28 | TCP_ENABLED=false
29 | MAX_CONNECTION_RATE=100
30 |
31 | source ./exec.sh
32 |
33 | eval $CMD
34 |
35 | docker rm -f testground-redis
36 |
--------------------------------------------------------------------------------
/testbed/testbed/scripts/ui.py:
--------------------------------------------------------------------------------
1 | import ipywidgets as widgets
2 | import utils
3 |
4 | class Layout:
5 | def __init__(self):
6 | self.testcase = widgets.Text(description="Testcase")
7 | self.protocol = widgets.Text(description="Protocol")
8 | self.input_data = widgets.Text(description="Input Data Type")
9 | self.file_size = widgets.Text(description="File Size")
10 | self.data_dir = widgets.Text(description="Files Directory")
11 | self.run_count = widgets.IntSlider(description="Run Count", min=1, max=10)
12 |
13 | self.n_nodes = widgets.IntSlider(description="# nodes", min=2, max=50)
14 | self.n_leechers = widgets.IntSlider(description="# leechers", min=1, max=50)
15 | self.n_passive = widgets.IntSlider(description="# passive ", min=0, max=10)
16 | self.max_connection_rate = widgets.IntSlider(description="Max connections (%)", value=100, min=0, max=100)
17 | self.churn_rate = widgets.IntSlider(description="Churn Rate (%)", min=0, max=100)
18 | self.isDocker = widgets.Checkbox(value=False,description='Docker Env',disabled=False,indent=False)
19 | self.bandwidth_mb = widgets.IntSlider(description="Nodes Bandwidth (MB)", value=100, min=0, max=500)
20 | self.latency_ms = widgets.IntSlider(description="Nodes Latency (ms)", value=10, min=10, max=500)
21 | self.jitter_pct = widgets.IntSlider(description="Pct Jitter (%)", value=5, min=0, max=100)
22 | self.tcpEnabled = widgets.Checkbox(value=False,description='TCP enabled',disabled=False,indent=False)
23 | self.runButton = widgets.Button(
24 | description='Run Test',
25 | disabled=False,
26 | button_style='success',
27 | tooltip='Run test',
28 | icon='check' # (FontAwesome names without the `fa-` prefix)
29 | )
30 | self.grid = widgets.GridspecLayout(8, 2, height='300px')
31 | self.testid = ""
32 |
33 | def show(self):
34 | self.grid[0, 0] = self.testcase
35 | self.grid[1, 0] = self.input_data
36 | self.grid[2, 0] = self.file_size
37 | self.grid[3, 0] = self.data_dir
38 | self.grid[4, 0] = self.run_count
39 | self.grid[5, 0] = self.bandwidth_mb
40 | self.grid[6, 0] = self.protocol
41 | self.grid[7, 0] = self.runButton
42 |
43 | self.grid[0, 1] = self.n_nodes
44 | self.grid[1, 1] = self.n_leechers
45 | self.grid[2, 1] = self.n_passive
46 | self.grid[3, 1] = self.churn_rate
47 | self.grid[4, 1] = self.isDocker
48 | self.grid[5, 1] = self.latency_ms
49 | self.grid[6, 1] = self.jitter_pct
50 | self.grid[7, 1] = self.tcpEnabled
51 |
52 | return self.grid
53 |
54 |
55 | class ProcessLayout:
56 | def __init__(self):
57 | self.testid = widgets.Text(description="Test ID")
58 |
59 | self.runButton = widgets.Button(
60 | description='Run Test',
61 | disabled=False,
62 | button_style='success',
63 | tooltip='Run test',
64 | icon='check' # (FontAwesome names without the `fa-` prefix)
65 | )
66 | self.grid = widgets.GridspecLayout(1, 1)
67 |
68 | def show(self):
69 | self.grid[0, 0] = self.testid
70 | # self.grid[0, 1] = self.runButton
71 | return self.grid
72 |
73 | # def run_test(self):
74 | # self.testid = utils.runner(utils.process_layout_config(self))
75 |
76 | # def on_click(self, b):
77 | # run_test(self)
--------------------------------------------------------------------------------
/testbed/testbed/scripts/utils.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import os
3 | import time
4 |
5 | TESTGROUND_BIN="testground"
6 | BUILDER = "exec:go"
7 | RUNNER = "local:exec"
8 | BUILDCFG = " --build-cfg skip_runtime_image=true"
9 | BASE_CMD = TESTGROUND_BIN + " run single --plan=testbed --builder=" + \
10 | BUILDER + " --runner=" + RUNNER + BUILDCFG
11 |
12 | # Parses yaml configs
13 | def process_yaml_config(path):
14 | cmd = BASE_CMD
15 | with open(path) as file:
16 | docs = yaml.full_load(file)
17 |
18 | # Parsing use case parameters
19 | if docs["use_case"]:
20 | if docs["use_case"]["testcase"]:
21 | cmd = cmd + " --testcase=" + docs["use_case"]["testcase"]
22 | if docs["use_case"]["input_data"]:
23 | cmd = cmd + " -tp input_data=" + docs["use_case"]["input_data"]
24 | if docs["use_case"]["file_size"]:
25 | cmd = cmd + " --tp file_size=" + docs["use_case"]["file_size"]
26 | if docs["use_case"]["run_count"]:
27 | cmd = cmd + " --tp run_count=" +str(docs["use_case"]["run_count"])
28 |
29 | # Parsing network parameters
30 | if docs["network"]:
31 | if docs["network"]["n_nodes"]:
32 | cmd = cmd + " --instances=" + str(docs["network"]["n_nodes"])
33 | if docs["network"]["n_leechers"]:
34 | cmd = cmd + " -tp leech_count=" + str(docs["network"]["n_leechers"])
35 | if docs["network"]["n_passive"]:
36 | cmd = cmd + "-tp passive_count=" + str(docs["network"]["n_passive"])
37 | if docs["network"]["max_peer_connections"]:
38 | cmd = cmd + " -tp max_connection_rate=" + str(docs["network"]["max_peer_connections"])
39 | # if docs["network"]["churn_rate"]:
40 | # cmd = cmd + " -tp churn_rate=" + str(docs["network"]["churn_rate"])
41 |
42 | return cmd
43 |
44 | # Parses config from Jupyter layout
45 | def process_layout_config(layout):
46 | base = BASE_CMD
47 | if layout.isDocker.value:
48 | BUILDER = "docker:go"
49 | RUNNER = "local:docker"
50 | base = TESTGROUND_BIN + " run single --plan=testbed --builder=" + \
51 | BUILDER + " --runner=" + RUNNER + BUILDCFG
52 |
53 | if layout.tcpEnabled.value:
54 | tcpFlag = "true"
55 | else:
56 | tcpFlag = "false"
57 |
58 | cmd = base + " --testcase=" + layout.testcase.value + \
59 | " --instances=" + str(layout.n_nodes.value)
60 |
61 | if layout.input_data.value != "":
62 | cmd = cmd + " -tp input_data=" + layout.input_data.value
63 | if layout.file_size.value != "":
64 | cmd = cmd + " -tp file_size=" + layout.file_size.value.replace(" ", "")
65 | if layout.data_dir.value != "":
66 | cmd = cmd + " -tp data_dir=" + layout.data_dir.value.replace(" ", "")
67 |
68 | cmd = cmd + " -tp leech_count=" + str(layout.n_leechers.value) + \
69 | " -tp passive_count=" + str(layout.n_passive.value) + \
70 | " -tp max_connection_rate=" + str(layout.max_connection_rate.value) + \
71 | " -tp run_count=" + str(layout.run_count.value) + \
72 | " -tp bandwidth_mb=" + str(layout.bandwidth_mb.value) + \
73 | " -tp latency_ms=" + str(layout.latency_ms.value) + \
74 | " -tp jitter_pct=" + str(layout.jitter_pct.value) + \
75 | " -tp node_type=" + layout.protocol.value + \
76 | " -tp enable_tcp=" + tcpFlag
77 |
78 | return cmd
79 |
80 | # Testground runner
81 | def runner(cmd):
82 | print("Running as: ", cmd)
83 | cmd = cmd + "| tail -n 1 | awk -F 'run is queued with ID:' '{ print $2 }'"
84 | stream = os.popen(cmd)
85 | testID = stream.read().replace("\n", "").replace(" ", "")
86 |
87 | print("Received testID: " + testID)
88 |
89 | check_status(testID)
90 |
91 | print("Run for task completed")
92 | # if len(testID) < 13 and len(testID) > 1:
93 | # print("Run completed successfully with testID: %s" % testID)
94 | # else:
95 | # print("There was an error running the testcase. Check daemon.")
96 | return testID
97 |
98 | def check_status(testid):
99 | cmd = "testground status --task " + testid
100 | print(cmd)
101 | cmd = cmd + "| tail -n 2 | awk -F 'Status:' '{ print $2 }'"
102 | status = "none"
103 | while status != "complete":
104 | stream = os.popen(cmd)
105 | status = stream.read().replace("\n", "").replace(" ", "").strip()
106 | print("Task status:", status)
107 | if status == "":
108 | print("There was an error running the experiment. Check Testground daemon for further details.")
109 | return
110 | time.sleep(10)
111 |
112 |
113 | # Collect data from a testcase
114 | def collect_data(layout, testid, save=False):
115 | RUNNER = "local:exec"
116 | if layout.isDocker.value:
117 | RUNNER = "local:docker"
118 |
119 | print("Cleaning previous runs..")
120 | cmd = "rm -rf results/*"
121 | print(os.popen(cmd).read())
122 |
123 | print("Collecting data for testid: ", testid)
124 | cmd = TESTGROUND_BIN + " collect --runner="+RUNNER + " " + testid
125 | print(os.popen(cmd).read())
126 | cmd = "tar xzvf %s.tgz && rm %s.tgz && mv %s results/" % (testid, testid, testid)
127 | print(os.popen(cmd).read())
128 |
129 | if save:
130 | print("Saving data for testid: %s" % testid)
131 | cmd = "cp -r results/%s saved/"
132 | print(os.popen(cmd).read())
133 |
134 |
135 | # testid = runner(process_config("./config.yaml"))
136 | # collect_data("96c6ff2b6ebf")
137 | # check_status("bub8gid23084pljmerqg")
--------------------------------------------------------------------------------
/testbed/testbed/test/tcpTransfer.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/testground/sdk-go/run"
8 | "github.com/testground/sdk-go/runtime"
9 |
10 | "github.com/protocol/beyond-bitswap/testbed/testbed/utils"
11 | )
12 |
13 | // IPFSTransfer data from S seeds to L leeches
14 | func TCPTransfer(runenv *runtime.RunEnv, initCtx *run.InitContext) error {
15 | // Test Parameters
16 | testvars, err := getEnvVars(runenv)
17 | if err != nil {
18 | return err
19 | }
20 |
21 | /// --- Set up
22 | ctx, cancel := context.WithTimeout(context.Background(), testvars.Timeout)
23 | defer cancel()
24 | t, err := InitializeTest(ctx, runenv, testvars)
25 | if err != nil {
26 | return err
27 | }
28 |
29 | // Signal that this node is in the given state, and wait for all peers to
30 | // send the same signal
31 | signalAndWaitForAll := t.signalAndWaitForAll
32 |
33 | err = signalAndWaitForAll("file-list-ready")
34 | if err != nil {
35 | return err
36 | }
37 |
38 | var tcpFetch int64
39 |
40 | // For each file found in the test
41 | for pIndex, testParams := range testvars.Permutations {
42 | // Set up network (with traffic shaping)
43 | if err := utils.SetupNetwork(ctx, runenv, t.nwClient, t.nodetp, t.tpindex, testParams.Latency,
44 | testParams.Bandwidth, testParams.JitterPct); err != nil {
45 | return fmt.Errorf("Failed to set up network: %v", err)
46 | }
47 |
48 | err = signalAndWaitForAll(fmt.Sprintf("transfer-start-%d", pIndex))
49 | if err != nil {
50 | return err
51 | }
52 |
53 | runenv.RecordMessage("Starting TCP Fetch...")
54 |
55 | for runNum := 1; runNum < testvars.RunCount+1; runNum++ {
56 |
57 | switch t.nodetp {
58 | case utils.Seed:
59 | err = t.runTCPServer(ctx, pIndex, runNum, testParams.File, runenv, testvars)
60 | if err != nil {
61 | return err
62 | }
63 | case utils.Leech:
64 | tcpFetch, err = t.runTCPFetch(ctx, pIndex, runNum, runenv, testvars)
65 | if err != nil {
66 | return err
67 | }
68 | recorder := newMetricsRecorder(runenv, runNum, t.seq, t.grpseq, "tcp", testParams.Latency,
69 | testParams.Bandwidth, int(testParams.File.Size()), t.nodetp, t.tpindex, 1)
70 | recorder.Record("time_to_fetch", float64(tcpFetch))
71 | }
72 | }
73 |
74 | err = signalAndWaitForAll(fmt.Sprintf("transfer-end-%d", pIndex))
75 | if err != nil {
76 | return err
77 | }
78 | }
79 |
80 | runenv.RecordMessage("Ending testcase")
81 | return nil
82 | }
83 |
--------------------------------------------------------------------------------
/testbed/testbed/test/transfer.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "strconv"
8 | "time"
9 |
10 | "github.com/libp2p/go-libp2p"
11 | "github.com/libp2p/go-libp2p-core/crypto"
12 | "github.com/libp2p/go-libp2p-core/host"
13 | "github.com/testground/sdk-go/run"
14 | "github.com/testground/sdk-go/runtime"
15 | "github.com/testground/sdk-go/sync"
16 |
17 | "github.com/ipfs/go-cid"
18 | files "github.com/ipfs/go-ipfs-files"
19 | "github.com/protocol/beyond-bitswap/testbed/testbed/utils"
20 | )
21 |
22 | // Transfer data from S seeds to L leeches
23 | func Transfer(runenv *runtime.RunEnv, initCtx *run.InitContext) error {
24 | // Test Parameters
25 | testvars, err := getEnvVars(runenv)
26 | if err != nil {
27 | return err
28 | }
29 | nodeType := runenv.StringParam("node_type")
30 |
31 | /// --- Set up
32 | ctx, cancel := context.WithTimeout(context.Background(), testvars.Timeout)
33 | defer cancel()
34 | baseT, err := InitializeTest(ctx, runenv, testvars)
35 | if err != nil {
36 | return err
37 | }
38 | nodeInitializer, ok := supportedNodes[nodeType]
39 | if !ok {
40 | return fmt.Errorf("unsupported node type: %s", nodeType)
41 | }
42 | t, err := nodeInitializer(ctx, runenv, testvars, baseT)
43 | transferNode := t.node
44 | signalAndWaitForAll := t.signalAndWaitForAll
45 |
46 | // Start still alive process if enabled
47 | t.stillAlive(runenv, testvars)
48 |
49 | var tcpFetch int64
50 |
51 | // For each test permutation found in the test
52 | for pIndex, testParams := range testvars.Permutations {
53 | // Set up network (with traffic shaping)
54 | if err := utils.SetupNetwork(ctx, runenv, t.nwClient, t.nodetp, t.tpindex, testParams.Latency,
55 | testParams.Bandwidth, testParams.JitterPct); err != nil {
56 | return fmt.Errorf("Failed to set up network: %v", err)
57 | }
58 |
59 | // Accounts for every file that couldn't be found.
60 | var leechFails int64
61 | var rootCid cid.Cid
62 |
63 | // Wait for all nodes to be ready to start the run
64 | err = signalAndWaitForAll(fmt.Sprintf("start-file-%d", pIndex))
65 | if err != nil {
66 | return err
67 | }
68 |
69 | switch t.nodetp {
70 | case utils.Seed:
71 | rootCid, err = t.addPublishFile(ctx, pIndex, testParams.File, runenv, testvars)
72 | case utils.Leech:
73 | rootCid, err = t.readFile(ctx, pIndex, runenv, testvars)
74 | }
75 | if err != nil {
76 | return err
77 | }
78 |
79 | runenv.RecordMessage("File injest complete...")
80 | // Wait for all nodes to be ready to dial
81 | err = signalAndWaitForAll(fmt.Sprintf("injest-complete-%d", pIndex))
82 | if err != nil {
83 | return err
84 | }
85 |
86 | if testvars.TCPEnabled {
87 | runenv.RecordMessage("Running TCP test...")
88 | switch t.nodetp {
89 | case utils.Seed:
90 | err = t.runTCPServer(ctx, pIndex, 0, testParams.File, runenv, testvars)
91 | case utils.Leech:
92 | tcpFetch, err = t.runTCPFetch(ctx, pIndex, 0, runenv, testvars)
93 | }
94 | if err != nil {
95 | return err
96 | }
97 | }
98 |
99 | runenv.RecordMessage("Starting %s Fetch...", nodeType)
100 |
101 | for runNum := 1; runNum < testvars.RunCount+1; runNum++ {
102 | // Reset the timeout for each run
103 | ctx, cancel := context.WithTimeout(ctx, testvars.RunTimeout)
104 | defer cancel()
105 |
106 | runID := fmt.Sprintf("%d-%d", pIndex, runNum)
107 |
108 | // Wait for all nodes to be ready to start the run
109 | err = signalAndWaitForAll("start-run-" + runID)
110 | if err != nil {
111 | return err
112 | }
113 |
114 | runenv.RecordMessage("Starting run %d / %d (%d bytes)", runNum, testvars.RunCount, testParams.File.Size())
115 |
116 | dialed, err := t.dialFn(ctx, transferNode.Host(), t.nodetp, t.peerInfos, testvars.MaxConnectionRate)
117 | if err != nil {
118 | return err
119 | }
120 | runenv.RecordMessage("%s Dialed %d other nodes:", t.nodetp.String(), len(dialed))
121 |
122 | // Wait for all nodes to be connected
123 | err = signalAndWaitForAll("connect-complete-" + runID)
124 | if err != nil {
125 | return err
126 | }
127 |
128 | /// --- Start test
129 |
130 | var timeToFetch time.Duration
131 | if t.nodetp == utils.Leech {
132 | // For each wave
133 | for waveNum := 0; waveNum < testvars.NumWaves; waveNum++ {
134 | // Only leecheers for that wave entitled to leech.
135 | if (t.tpindex % testvars.NumWaves) == waveNum {
136 | runenv.RecordMessage("Starting wave %d", waveNum)
137 | // Stagger the start of the first request from each leech
138 | // Note: seq starts from 1 (not 0)
139 | startDelay := time.Duration(t.seq-1) * testvars.RequestStagger
140 |
141 | runenv.RecordMessage("Starting to leech %d / %d (%d bytes)", runNum, testvars.RunCount, testParams.File.Size())
142 | runenv.RecordMessage("Leech fetching data after %s delay", startDelay)
143 | start := time.Now()
144 | // TODO: Here we may be able to define requesting pattern. ipfs.DAG()
145 | // Right now using a path.
146 | ctxFetch, cancel := context.WithTimeout(ctx, testvars.RunTimeout/2)
147 | // Pin Add also traverse the whole DAG
148 | // err := ipfsNode.API.Pin().Add(ctxFetch, fPath)
149 | rcvFile, err := transferNode.Fetch(ctxFetch, rootCid, t.peerInfos)
150 | if err != nil {
151 | runenv.RecordMessage("Error fetching data: %v", err)
152 | leechFails++
153 | } else {
154 | runenv.RecordMessage("Fetch complete, proceeding")
155 | err = files.WriteTo(rcvFile, "/tmp/"+strconv.Itoa(t.tpindex)+time.Now().String())
156 | if err != nil {
157 | cancel()
158 | return err
159 | }
160 | timeToFetch = time.Since(start)
161 | s, _ := rcvFile.Size()
162 | runenv.RecordMessage("Leech fetch of %d complete (%d ns) for wave %d", s, timeToFetch, waveNum)
163 | }
164 | cancel()
165 | }
166 | if waveNum < testvars.NumWaves-1 {
167 | runenv.RecordMessage("Waiting 5 seconds between waves for wave %d", waveNum)
168 | time.Sleep(5 * time.Second)
169 | }
170 | _, err = t.client.SignalAndWait(ctx, sync.State(fmt.Sprintf("leech-wave-%d", waveNum)), testvars.LeechCount)
171 | }
172 | }
173 |
174 | // Wait for all leeches to have downloaded the data from seeds
175 | err = signalAndWaitForAll("transfer-complete-" + runID)
176 | if err != nil {
177 | return err
178 | }
179 |
180 | /// --- Report stats
181 | err = t.emitMetrics(runenv, runNum, nodeType, testParams, timeToFetch, tcpFetch, leechFails, testvars.MaxConnectionRate)
182 | if err != nil {
183 | return err
184 | }
185 | runenv.RecordMessage("Finishing emitting metrics. Starting to clean...")
186 |
187 | err = t.cleanupRun(ctx, rootCid, runenv)
188 | if err != nil {
189 | return err
190 | }
191 | }
192 | err = t.cleanupFile(ctx, rootCid)
193 | if err != nil {
194 | return err
195 | }
196 | }
197 | err = t.close()
198 | if err != nil {
199 | return err
200 | }
201 |
202 | runenv.RecordMessage("Ending testcase")
203 | return nil
204 | }
205 |
206 | type nodeInitializer func(ctx context.Context, runenv *runtime.RunEnv, testvars *TestVars, baseT *TestData) (*NodeTestData, error)
207 |
208 | var supportedNodes = map[string]nodeInitializer{
209 | "ipfs": initializeIPFSTest,
210 | "bitswap": initializeBitswapTest,
211 | "graphsync": initializeGraphsyncTest,
212 | "libp2pHTTP": initializeLibp2pHTTPTest,
213 | "rawLibp2p": initializeRawLibp2pTest,
214 | //TODO FIX HTTP
215 | //"http": initializeHTTPTest,
216 | }
217 |
218 | func initializeIPFSTest(ctx context.Context, runenv *runtime.RunEnv, testvars *TestVars, baseT *TestData) (*NodeTestData, error) {
219 |
220 | // Create IPFS node
221 | runenv.RecordMessage("Preparing exchange for node: %v", testvars.ExchangeInterface)
222 | // Set exchange Interface
223 | exch, err := utils.SetExchange(ctx, testvars.ExchangeInterface)
224 | if err != nil {
225 | return nil, err
226 | }
227 | ipfsNode, err := utils.CreateIPFSNodeWithConfig(ctx, baseT.nConfig, exch, testvars.DHTEnabled, testvars.ProvidingEnabled)
228 | if err != nil {
229 | runenv.RecordFailure(err)
230 | return nil, err
231 | }
232 |
233 | err = baseT.signalAndWaitForAll("file-list-ready")
234 | if err != nil {
235 | return nil, err
236 | }
237 |
238 | return &NodeTestData{
239 | TestData: baseT,
240 | node: ipfsNode,
241 | }, nil
242 | }
243 |
244 | func initializeBitswapTest(ctx context.Context, runenv *runtime.RunEnv, testvars *TestVars, baseT *TestData) (*NodeTestData, error) {
245 | h, err := makeHost(ctx, baseT)
246 | if err != nil {
247 | return nil, err
248 | }
249 | runenv.RecordMessage("I am %s with addrs: %v", h.ID(), h.Addrs())
250 |
251 | // Use the same blockstore on all runs for the seed node
252 | bstoreDelay := time.Duration(runenv.IntParam("bstore_delay_ms")) * time.Millisecond
253 |
254 | dStore, err := utils.CreateDatastore(testvars.DiskStore, bstoreDelay)
255 | if err != nil {
256 | return nil, err
257 | }
258 | runenv.RecordMessage("created data store %T with params disk_store=%b", dStore, testvars.DiskStore)
259 | bstore, err := utils.CreateBlockstore(ctx, dStore)
260 | if err != nil {
261 | return nil, err
262 | }
263 | // Create a new bitswap node from the blockstore
264 | bsnode, err := utils.CreateBitswapNode(ctx, h, bstore)
265 | if err != nil {
266 | return nil, err
267 | }
268 |
269 | return &NodeTestData{baseT, bsnode, &h}, nil
270 | }
271 |
272 | func initializeGraphsyncTest(ctx context.Context, runenv *runtime.RunEnv, testvars *TestVars, baseT *TestData) (*NodeTestData, error) {
273 |
274 | h, err := makeHost(ctx, baseT)
275 | if err != nil {
276 | return nil, err
277 | }
278 | runenv.RecordMessage("I am %s with addrs: %v", h.ID(), h.Addrs())
279 |
280 | // Use the same blockstore on all runs for the seed node
281 | bstoreDelay := time.Duration(runenv.IntParam("bstore_delay_ms")) * time.Millisecond
282 | dStore, err := utils.CreateDatastore(testvars.DiskStore, bstoreDelay)
283 | if err != nil {
284 | return nil, err
285 | }
286 | runenv.RecordMessage("created data store %T with params disk_store=%v", dStore, testvars.DiskStore)
287 | bstore, err := utils.CreateBlockstore(ctx, dStore)
288 | if err != nil {
289 | return nil, err
290 | }
291 |
292 | // Create a new bitswap node from the blockstore
293 | numSeeds := runenv.TestInstanceCount - (testvars.LeechCount + testvars.PassiveCount)
294 | bsnode, err := utils.CreateGraphsyncNode(ctx, h, bstore, numSeeds)
295 | if err != nil {
296 | return nil, err
297 | }
298 |
299 | return &NodeTestData{baseT, bsnode, &h}, nil
300 | }
301 |
302 | func initializeLibp2pHTTPTest(ctx context.Context, runenv *runtime.RunEnv, testvars *TestVars, baseT *TestData) (*NodeTestData, error) {
303 | if runenv.TestInstanceCount != 2 {
304 | return nil, errors.New("libp2p HTTP transfer ONLY supports two instances for now")
305 | }
306 |
307 | if testvars.LeechCount != 1 {
308 | return nil, errors.New("libp2p HTTP transfer ONLY supports 1 Leecher for now")
309 | }
310 |
311 | if testvars.PassiveCount != 0 {
312 | return nil, errors.New("libp2p HTTP transfer does NOT support passive peers")
313 | }
314 |
315 | h, err := makeHost(ctx, baseT)
316 | if err != nil {
317 | return nil, err
318 | }
319 | runenv.RecordMessage("I am %s with addrs: %v", h.ID(), h.Addrs())
320 |
321 | libp2pHttpN, err := utils.CreateLibp2pHTTPNode(ctx, h, baseT.nodetp)
322 | if err != nil {
323 | return nil, err
324 | }
325 |
326 | return &NodeTestData{
327 | TestData: baseT,
328 | node: libp2pHttpN,
329 | host: &h,
330 | }, nil
331 | }
332 |
333 | func initializeHTTPTest(ctx context.Context, runenv *runtime.RunEnv, testvars *TestVars, baseT *TestData) (*NodeTestData, error) {
334 | if runenv.TestInstanceCount != 2 {
335 | return nil, errors.New("http transfer ONLY supports two instances for now")
336 | }
337 |
338 | if testvars.LeechCount != 1 {
339 | return nil, errors.New("http transfer ONLY supports 1 Leecher for now")
340 | }
341 |
342 | if testvars.PassiveCount != 0 {
343 | return nil, errors.New("http transfer does NOT support passive peers")
344 | }
345 |
346 | h, err := makeHost(ctx, baseT)
347 | if err != nil {
348 | return nil, err
349 | }
350 | runenv.RecordMessage("I am %s with addrs: %v", h.ID(), h.Addrs())
351 |
352 | httpN, err := utils.CreateHTTPNode(ctx, h, baseT.nodetp)
353 | if err != nil {
354 | return nil, err
355 | }
356 |
357 | return &NodeTestData{
358 | TestData: baseT,
359 | node: httpN,
360 | host: &h,
361 | }, nil
362 | }
363 |
364 | func initializeRawLibp2pTest(ctx context.Context, runenv *runtime.RunEnv, testvars *TestVars, baseT *TestData) (*NodeTestData, error) {
365 | if runenv.TestInstanceCount != 2 {
366 | return nil, errors.New("libp2p transfer ONLY supports two instances for now")
367 | }
368 |
369 | if testvars.LeechCount != 1 {
370 | return nil, errors.New("libp2p transfer ONLY supports 1 Leecher for now")
371 | }
372 |
373 | if testvars.PassiveCount != 0 {
374 | return nil, errors.New("libp2P transfer does NOT support passive peers")
375 | }
376 |
377 | h, err := makeHost(ctx, baseT)
378 | if err != nil {
379 | return nil, err
380 | }
381 | runenv.RecordMessage("I am %s with addrs: %v", h.ID(), h.Addrs())
382 |
383 | rawLibp2pN, err := utils.CreateRawLibp2pNode(ctx, h, baseT.nodetp)
384 | if err != nil {
385 | return nil, err
386 | }
387 |
388 | return &NodeTestData{
389 | TestData: baseT,
390 | node: rawLibp2pN,
391 | host: &h,
392 | }, nil
393 | }
394 |
395 | func makeHost(ctx context.Context, baseT *TestData) (host.Host, error) {
396 | // Create libp2p node
397 | privKey, err := crypto.UnmarshalPrivateKey(baseT.nConfig.PrivKey)
398 | if err != nil {
399 | return nil, err
400 | }
401 |
402 | return libp2p.New(ctx, libp2p.Identity(privKey), libp2p.ListenAddrs(baseT.nConfig.AddrInfo.Addrs...))
403 | }
404 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/bitswap.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "os"
6 | "path/filepath"
7 | "time"
8 |
9 | bs "github.com/ipfs/go-bitswap"
10 | bsnet "github.com/ipfs/go-bitswap/network"
11 | "github.com/ipfs/go-blockservice"
12 | "github.com/ipfs/go-cid"
13 | ds "github.com/ipfs/go-datastore"
14 | delayed "github.com/ipfs/go-datastore/delayed"
15 | ds_sync "github.com/ipfs/go-datastore/sync"
16 | badgerds "github.com/ipfs/go-ds-badger2"
17 | blockstore "github.com/ipfs/go-ipfs-blockstore"
18 | delay "github.com/ipfs/go-ipfs-delay"
19 | files "github.com/ipfs/go-ipfs-files"
20 | nilrouting "github.com/ipfs/go-ipfs-routing/none"
21 | ipld "github.com/ipfs/go-ipld-format"
22 | "github.com/ipfs/go-merkledag"
23 | unixfile "github.com/ipfs/go-unixfs/file"
24 | "github.com/ipfs/go-unixfs/importer/helpers"
25 | "github.com/libp2p/go-libp2p-core/host"
26 | "github.com/pkg/errors"
27 | "golang.org/x/sync/errgroup"
28 |
29 | dgbadger "github.com/dgraph-io/badger/v2"
30 | )
31 |
32 | type NodeType int
33 |
34 | const (
35 | // Seeds data
36 | Seed NodeType = iota
37 | // Fetches data from seeds
38 | Leech
39 | // Doesn't seed or fetch data
40 | Passive
41 | )
42 |
43 | func (nt NodeType) String() string {
44 | return [...]string{"Seed", "Leech", "Passive"}[nt]
45 | }
46 |
47 | // Adapted from the netflix/p2plab repo under an Apache-2 license.
48 | // Original source code located at https://github.com/Netflix/p2plab/blob/master/peer/peer.go
49 | type BitswapNode struct {
50 | bitswap *bs.Bitswap
51 | blockStore blockstore.Blockstore
52 | dserv ipld.DAGService
53 | h host.Host
54 | }
55 |
56 | func (n *BitswapNode) Close() error {
57 | return n.bitswap.Close()
58 | }
59 |
60 | func CreateBlockstore(ctx context.Context, dStore ds.Batching) (blockstore.Blockstore, error) {
61 | return blockstore.CachedBlockstore(ctx,
62 | blockstore.NewBlockstore(dStore),
63 | blockstore.DefaultCacheOpts())
64 | }
65 |
66 | // CreateDatastore creates a data store to use for the transfer.
67 | // If diskStore=false, it returns an in-memory store that uses the given delay for each read/write.
68 | // If diskStore=true, it returns a Badger data store and ignores the bsdelay param.
69 | func CreateDatastore(diskStore bool, bsdelay time.Duration) (ds.Batching, error) {
70 | if !diskStore {
71 | dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), delay.Fixed(bsdelay)))
72 | return dstore, nil
73 | }
74 |
75 | // create temporary directory for badger datastore
76 | path := filepath.Join(os.TempDir(), "datastore")
77 | if _, err := os.Stat(path); os.IsNotExist(err) {
78 | if err := os.MkdirAll(path, 0755); err != nil {
79 | return nil, err
80 | }
81 | } else if err != nil {
82 | return nil, err
83 | }
84 |
85 | // create disk based badger datastore
86 | defopts := badgerds.DefaultOptions
87 |
88 | defopts.Options = dgbadger.DefaultOptions("").WithTruncate(true).
89 | WithValueThreshold(1 << 10)
90 | datastore, err := badgerds.NewDatastore(path, &defopts)
91 | if err != nil {
92 | return nil, err
93 | }
94 |
95 | return datastore, nil
96 | }
97 |
98 | func ClearBlockstore(ctx context.Context, bstore blockstore.Blockstore) error {
99 | ks, err := bstore.AllKeysChan(ctx)
100 | if err != nil {
101 | return err
102 | }
103 | g := errgroup.Group{}
104 | for k := range ks {
105 | c := k
106 | g.Go(func() error {
107 | return bstore.DeleteBlock(c)
108 | })
109 | }
110 | return g.Wait()
111 | }
112 |
113 | func CreateBitswapNode(ctx context.Context, h host.Host, bstore blockstore.Blockstore) (*BitswapNode, error) {
114 | routing, err := nilrouting.ConstructNilRouting(ctx, nil, nil, nil)
115 | if err != nil {
116 | return nil, err
117 | }
118 | net := bsnet.NewFromIpfsHost(h, routing)
119 | bitswap := bs.New(ctx, net, bstore).(*bs.Bitswap)
120 | bserv := blockservice.New(bstore, bitswap)
121 | dserv := merkledag.NewDAGService(bserv)
122 | return &BitswapNode{bitswap, bstore, dserv, h}, nil
123 | }
124 |
125 | func (n *BitswapNode) Add(ctx context.Context, fileNode files.Node) (cid.Cid, error) {
126 | settings := AddSettings{
127 | Layout: "balanced",
128 | Chunker: "size-262144",
129 | RawLeaves: false,
130 | NoCopy: false,
131 | HashFunc: "sha2-256",
132 | MaxLinks: helpers.DefaultLinksPerBlock,
133 | }
134 | adder, err := NewDAGAdder(ctx, n.dserv, settings)
135 | if err != nil {
136 | return cid.Undef, err
137 | }
138 | ipldNode, err := adder.Add(fileNode)
139 | if err != nil {
140 | return cid.Undef, err
141 | }
142 | return ipldNode.Cid(), nil
143 | }
144 |
145 | func (n *BitswapNode) ClearDatastore(ctx context.Context, _ cid.Cid) error {
146 | return ClearBlockstore(ctx, n.blockStore)
147 | }
148 |
149 | func (n *BitswapNode) EmitMetrics(recorder MetricsRecorder) error {
150 | stats, err := n.bitswap.Stat()
151 |
152 | if err != nil {
153 | return err
154 | }
155 | recorder.Record("msgs_rcvd", float64(stats.MessagesReceived))
156 | recorder.Record("data_sent", float64(stats.DataSent))
157 | recorder.Record("data_rcvd", float64(stats.DataReceived))
158 | recorder.Record("block_data_rcvd", float64(stats.BlockDataReceived))
159 | recorder.Record("dup_data_rcvd", float64(stats.DupDataReceived))
160 | recorder.Record("blks_sent", float64(stats.BlocksSent))
161 | recorder.Record("blks_rcvd", float64(stats.BlocksReceived))
162 | recorder.Record("dup_blks_rcvd", float64(stats.DupBlksReceived))
163 | return err
164 | }
165 |
166 | func (n *BitswapNode) Fetch(ctx context.Context, c cid.Cid, _ []PeerInfo) (files.Node, error) {
167 | err := merkledag.FetchGraph(ctx, c, n.dserv)
168 | if err != nil {
169 | return nil, err
170 | }
171 | nd, err := n.dserv.Get(ctx, c)
172 | if err != nil {
173 | return nil, errors.Wrapf(err, "failed to get file %q", c)
174 | }
175 |
176 | return unixfile.NewUnixfsFile(ctx, n.dserv, nd)
177 | }
178 |
179 | func (n *BitswapNode) DAGService() ipld.DAGService {
180 | return n.dserv
181 | }
182 |
183 | func (n *BitswapNode) Host() host.Host {
184 | return n.h
185 | }
186 |
187 | func (n *BitswapNode) EmitKeepAlive(recorder MessageRecorder) error {
188 | stats, err := n.bitswap.Stat()
189 |
190 | if err != nil {
191 | return err
192 | }
193 |
194 | recorder.RecordMessage("I am still alive! Total In: %d - TotalOut: %d",
195 | stats.DataReceived,
196 | stats.DataSent)
197 |
198 | return nil
199 | }
200 |
201 | var _ Node = &BitswapNode{}
202 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/dagadder.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 | gopath "path"
8 | "strings"
9 |
10 | "github.com/ipfs/go-cid"
11 | chunker "github.com/ipfs/go-ipfs-chunker"
12 | files "github.com/ipfs/go-ipfs-files"
13 | posinfo "github.com/ipfs/go-ipfs-posinfo"
14 | ipld "github.com/ipfs/go-ipld-format"
15 | dag "github.com/ipfs/go-merkledag"
16 | "github.com/ipfs/go-mfs"
17 | "github.com/ipfs/go-unixfs"
18 | "github.com/ipfs/go-unixfs/importer/balanced"
19 | ihelper "github.com/ipfs/go-unixfs/importer/helpers"
20 | "github.com/ipfs/go-unixfs/importer/trickle"
21 | "github.com/multiformats/go-multihash"
22 | "github.com/pkg/errors"
23 | )
24 |
25 | var liveCacheSize = uint64(256 << 10)
26 |
27 | type syncer interface {
28 | Sync() error
29 | }
30 |
31 | // NewDAGAdder returns an adder that can at any files.Node using the given DAG service
32 | func NewDAGAdder(ctx context.Context, ds ipld.DAGService, settings AddSettings) (*DAGAdder, error) {
33 | bufferedDS := ipld.NewBufferedDAG(ctx, ds)
34 |
35 | prefix, err := dag.PrefixForCidVersion(1)
36 | if err != nil {
37 | return nil, errors.Wrap(err, "unrecognized CID version")
38 | }
39 |
40 | hashFuncCode, ok := multihash.Names[strings.ToLower(settings.HashFunc)]
41 | if !ok {
42 | return nil, errors.Wrapf(err, "unrecognized hash function %q", settings.HashFunc)
43 | }
44 | prefix.MhType = hashFuncCode
45 | return &DAGAdder{
46 | ctx: ctx,
47 | dagService: ds,
48 | bufferedDS: bufferedDS,
49 | settings: settings,
50 | }, nil
51 | }
52 |
53 | type AddSettings struct {
54 | Layout string
55 | Chunker string
56 | RawLeaves bool
57 | NoCopy bool
58 | HashFunc string
59 | MaxLinks int
60 | }
61 |
62 | // DAGAdder holds the switches passed to the `add` command.
63 | type DAGAdder struct {
64 | ctx context.Context
65 | dagService ipld.DAGService
66 | bufferedDS *ipld.BufferedDAG
67 | Out chan<- interface{}
68 | mroot *mfs.Root
69 | tempRoot cid.Cid
70 | CidBuilder cid.Builder
71 | liveNodes uint64
72 | settings AddSettings
73 | }
74 |
75 | func (adder *DAGAdder) mfsRoot() (*mfs.Root, error) {
76 | if adder.mroot != nil {
77 | return adder.mroot, nil
78 | }
79 | rnode := unixfs.EmptyDirNode()
80 | rnode.SetCidBuilder(adder.CidBuilder)
81 | mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil)
82 | if err != nil {
83 | return nil, err
84 | }
85 | adder.mroot = mr
86 | return adder.mroot, nil
87 | }
88 |
89 | // Constructs a node from reader's data, and adds it. Doesn't pin.
90 | func (adder *DAGAdder) add(reader io.Reader) (ipld.Node, error) {
91 | chnk, err := chunker.FromString(reader, adder.settings.Chunker)
92 | if err != nil {
93 | return nil, err
94 | }
95 |
96 | params := ihelper.DagBuilderParams{
97 | Dagserv: adder.bufferedDS,
98 | RawLeaves: adder.settings.RawLeaves,
99 | Maxlinks: adder.settings.MaxLinks,
100 | NoCopy: adder.settings.NoCopy,
101 | CidBuilder: adder.CidBuilder,
102 | }
103 |
104 | db, err := params.New(chnk)
105 | if err != nil {
106 | return nil, err
107 | }
108 | var nd ipld.Node
109 | switch adder.settings.Layout {
110 | case "trickle":
111 | nd, err = trickle.Layout(db)
112 | case "balanced":
113 | nd, err = balanced.Layout(db)
114 | default:
115 | err = errors.Errorf("unrecognized layout %q", adder.settings.Layout)
116 | }
117 |
118 | if err != nil {
119 | return nil, err
120 | }
121 |
122 | return nd, adder.bufferedDS.Commit()
123 | }
124 |
125 | // RootNode returns the mfs root node
126 | func (adder *DAGAdder) curRootNode() (ipld.Node, error) {
127 | mr, err := adder.mfsRoot()
128 | if err != nil {
129 | return nil, err
130 | }
131 | root, err := mr.GetDirectory().GetNode()
132 | if err != nil {
133 | return nil, err
134 | }
135 |
136 | // if one root file, use that hash as root.
137 | if len(root.Links()) == 1 {
138 | nd, err := root.Links()[0].GetNode(adder.ctx, adder.dagService)
139 | if err != nil {
140 | return nil, err
141 | }
142 |
143 | root = nd
144 | }
145 |
146 | return root, err
147 | }
148 |
149 | func (adder *DAGAdder) addNode(node ipld.Node, path string) error {
150 | // patch it into the root
151 | if path == "" {
152 | path = node.Cid().String()
153 | }
154 |
155 | if pi, ok := node.(*posinfo.FilestoreNode); ok {
156 | node = pi.Node
157 | }
158 |
159 | mr, err := adder.mfsRoot()
160 | if err != nil {
161 | return err
162 | }
163 | dir := gopath.Dir(path)
164 | if dir != "." {
165 | opts := mfs.MkdirOpts{
166 | Mkparents: true,
167 | Flush: false,
168 | CidBuilder: adder.CidBuilder,
169 | }
170 | if err := mfs.Mkdir(mr, dir, opts); err != nil {
171 | return err
172 | }
173 | }
174 |
175 | if err := mfs.PutNode(mr, path, node); err != nil {
176 | return err
177 | }
178 |
179 | return nil
180 | }
181 |
182 | // Add adds the given files.Node to the DAG
183 | func (adder *DAGAdder) Add(file files.Node) (ipld.Node, error) {
184 |
185 | if err := adder.addFileNode("", file, true); err != nil {
186 | return nil, err
187 | }
188 |
189 | // get root
190 | mr, err := adder.mfsRoot()
191 | if err != nil {
192 | return nil, err
193 | }
194 | var root mfs.FSNode
195 | rootdir := mr.GetDirectory()
196 | root = rootdir
197 |
198 | err = root.Flush()
199 | if err != nil {
200 | return nil, err
201 | }
202 |
203 | // if adding a file without wrapping, swap the root to it (when adding a
204 | // directory, mfs root is the directory)
205 | _, dir := file.(files.Directory)
206 | var name string
207 | if !dir {
208 | children, err := rootdir.ListNames(adder.ctx)
209 | if err != nil {
210 | return nil, err
211 | }
212 |
213 | if len(children) == 0 {
214 | return nil, fmt.Errorf("expected at least one child dir, got none")
215 | }
216 |
217 | // Replace root with the first child
218 | name = children[0]
219 | root, err = rootdir.Child(name)
220 | if err != nil {
221 | return nil, err
222 | }
223 | }
224 |
225 | err = mr.Close()
226 | if err != nil {
227 | return nil, err
228 | }
229 |
230 | nd, err := root.GetNode()
231 | if err != nil {
232 | return nil, err
233 | }
234 |
235 | if asyncDagService, ok := adder.dagService.(syncer); ok {
236 | err = asyncDagService.Sync()
237 | if err != nil {
238 | return nil, err
239 | }
240 | }
241 |
242 | return nd, nil
243 | }
244 |
245 | func (adder *DAGAdder) addFileNode(path string, file files.Node, toplevel bool) error {
246 | defer file.Close()
247 |
248 | if adder.liveNodes >= liveCacheSize {
249 | // TODO: A smarter cache that uses some sort of lru cache with an eviction handler
250 | mr, err := adder.mfsRoot()
251 | if err != nil {
252 | return err
253 | }
254 | if err := mr.FlushMemFree(adder.ctx); err != nil {
255 | return err
256 | }
257 |
258 | adder.liveNodes = 0
259 | }
260 | adder.liveNodes++
261 |
262 | switch f := file.(type) {
263 | case files.Directory:
264 | return adder.addDir(path, f, toplevel)
265 | case *files.Symlink:
266 | return adder.addSymlink(path, f)
267 | case files.File:
268 | return adder.addFile(path, f)
269 | default:
270 | return errors.New("unknown file type")
271 | }
272 | }
273 |
274 | func (adder *DAGAdder) addSymlink(path string, l *files.Symlink) error {
275 | sdata, err := unixfs.SymlinkData(l.Target)
276 | if err != nil {
277 | return err
278 | }
279 |
280 | dagnode := dag.NodeWithData(sdata)
281 | dagnode.SetCidBuilder(adder.CidBuilder)
282 | err = adder.dagService.Add(adder.ctx, dagnode)
283 | if err != nil {
284 | return err
285 | }
286 |
287 | return adder.addNode(dagnode, path)
288 | }
289 |
290 | func (adder *DAGAdder) addFile(path string, file files.File) error {
291 | // if the progress flag was specified, wrap the file so that we can send
292 | // progress updates to the client (over the output channel)
293 | var reader io.Reader = file
294 |
295 | dagnode, err := adder.add(reader)
296 | if err != nil {
297 | return err
298 | }
299 |
300 | // patch it into the root
301 | return adder.addNode(dagnode, path)
302 | }
303 |
304 | func (adder *DAGAdder) addDir(path string, dir files.Directory, toplevel bool) error {
305 | log.Infof("adding directory: %s", path)
306 |
307 | if !(toplevel && path == "") {
308 | mr, err := adder.mfsRoot()
309 | if err != nil {
310 | return err
311 | }
312 | err = mfs.Mkdir(mr, path, mfs.MkdirOpts{
313 | Mkparents: true,
314 | Flush: false,
315 | CidBuilder: adder.CidBuilder,
316 | })
317 | if err != nil {
318 | return err
319 | }
320 | }
321 |
322 | it := dir.Entries()
323 | for it.Next() {
324 | fpath := gopath.Join(path, it.Name())
325 | err := adder.addFileNode(fpath, it.Node(), false)
326 | if err != nil {
327 | return err
328 | }
329 | }
330 |
331 | return it.Err()
332 | }
333 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/dialer/dialer.go:
--------------------------------------------------------------------------------
1 | package dialer
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "fmt"
7 | "math"
8 |
9 | core "github.com/libp2p/go-libp2p-core"
10 | "github.com/libp2p/go-libp2p-core/peer"
11 | "github.com/protocol/beyond-bitswap/testbed/testbed/utils"
12 | "golang.org/x/sync/errgroup"
13 | )
14 |
15 | // PeerInfosFromChan collects peer information from a channel of peer information
16 | func PeerInfosFromChan(peerCh chan *utils.PeerInfo, count int) ([]utils.PeerInfo, error) {
17 | var ais []utils.PeerInfo
18 | for i := 1; i <= count; i++ {
19 | ai, ok := <-peerCh
20 | if !ok {
21 | return ais, fmt.Errorf("subscription closed")
22 | }
23 | ais = append(ais, *ai)
24 | }
25 | return ais, nil
26 | }
27 |
28 | // Dialer is a function that dials other peers, following a specified pattern
29 | type Dialer func(ctx context.Context, self core.Host, selfType utils.NodeType, ais []utils.PeerInfo, maxConnectionRate int) ([]peer.AddrInfo, error)
30 |
31 | // SparseDial connects to a set of peers in the experiment, but only those with the correct node type
32 | func SparseDial(ctx context.Context, self core.Host, selfType utils.NodeType, ais []utils.PeerInfo, maxConnectionRate int) ([]peer.AddrInfo, error) {
33 | // Grab list of other peers that are available for this Run
34 | var toDial []peer.AddrInfo
35 | for _, inf := range ais {
36 | ai := inf.Addr
37 | id1, _ := ai.ID.MarshalBinary()
38 | id2, _ := self.ID().MarshalBinary()
39 |
40 | // skip over dialing ourselves, and prevent TCP simultaneous
41 | // connect (known to fail) by only dialing peers whose peer ID
42 | // is smaller than ours.
43 | if bytes.Compare(id1, id2) < 0 {
44 | // In sparse topology we don't allow leechers and seeders to be directly connected.
45 | switch selfType {
46 | case utils.Seed:
47 | if inf.Nodetp != utils.Leech {
48 | toDial = append(toDial, ai)
49 | }
50 | case utils.Leech:
51 | if inf.Nodetp != utils.Seed {
52 | toDial = append(toDial, ai)
53 | }
54 | case utils.Passive:
55 | toDial = append(toDial, ai)
56 | }
57 | }
58 | }
59 |
60 | // Limit max number of connections for the peer according to rate.
61 | rate := float64(maxConnectionRate) / 100
62 | toDial = toDial[:int(math.Ceil(float64(len(toDial))*rate))]
63 |
64 | // Dial to all the other peers
65 | g, ctx := errgroup.WithContext(ctx)
66 | for _, ai := range toDial {
67 | ai := ai
68 | g.Go(func() error {
69 | if err := self.Connect(ctx, ai); err != nil {
70 | return fmt.Errorf("Error while dialing peer %v: %w", ai.Addrs, err)
71 | }
72 | return nil
73 | })
74 | }
75 | if err := g.Wait(); err != nil {
76 | return nil, err
77 | }
78 |
79 | return toDial, nil
80 | }
81 |
82 | // DialOtherPeers connects to a set of peers in the experiment, dialing all of them
83 | func DialOtherPeers(ctx context.Context, self core.Host, selfType utils.NodeType, ais []utils.PeerInfo, maxConnectionRate int) ([]peer.AddrInfo, error) {
84 | // Grab list of other peers that are available for this Run
85 | var toDial []peer.AddrInfo
86 | for _, inf := range ais {
87 | ai := inf.Addr
88 | id1, _ := ai.ID.MarshalBinary()
89 | id2, _ := self.ID().MarshalBinary()
90 |
91 | // skip over dialing ourselves, and prevent TCP simultaneous
92 | // connect (known to fail) by only dialing peers whose peer ID
93 | // is smaller than ours.
94 | if bytes.Compare(id1, id2) < 0 {
95 | toDial = append(toDial, ai)
96 | }
97 | }
98 |
99 | // Limit max number of connections for the peer according to rate.
100 | rate := float64(maxConnectionRate) / 100
101 | toDial = toDial[:int(math.Ceil(float64(len(toDial))*rate))]
102 |
103 | // Dial to all the other peers
104 | g, ctx := errgroup.WithContext(ctx)
105 | for _, ai := range toDial {
106 | ai := ai
107 | g.Go(func() error {
108 | if err := self.Connect(ctx, ai); err != nil {
109 | return fmt.Errorf("Error while dialing peer %v: %w", ai.Addrs, err)
110 | }
111 | return nil
112 | })
113 | }
114 | if err := g.Wait(); err != nil {
115 | return nil, err
116 | }
117 |
118 | return toDial, nil
119 | }
120 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/exchange.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "errors"
6 |
7 | "github.com/ipfs/go-bitswap"
8 | "github.com/ipfs/go-bitswap/network"
9 | blockstore "github.com/ipfs/go-ipfs-blockstore"
10 | exchange "github.com/ipfs/go-ipfs-exchange-interface"
11 | "github.com/ipfs/go-ipfs/core/node/helpers"
12 | "github.com/libp2p/go-libp2p-core/host"
13 | "github.com/libp2p/go-libp2p-core/routing"
14 | "go.uber.org/fx"
15 | )
16 |
17 | // ExchangeOpt injects exchange interface
18 | type ExchangeOpt func(helpers.MetricsCtx, fx.Lifecycle, host.Host,
19 | routing.Routing, blockstore.GCBlockstore) exchange.Interface
20 |
21 | // SetExchange sets the exchange interface to be used
22 | func SetExchange(ctx context.Context, name string) (ExchangeOpt, error) {
23 | switch name {
24 | case "bitswap":
25 | // Initializing bitswap exchange
26 | return func(mctx helpers.MetricsCtx, lc fx.Lifecycle,
27 | host host.Host, rt routing.Routing, bs blockstore.GCBlockstore) exchange.Interface {
28 | bitswapNetwork := network.NewFromIpfsHost(host, rt)
29 | exch := bitswap.New(helpers.LifecycleCtx(mctx, lc), bitswapNetwork, bs)
30 |
31 | lc.Append(fx.Hook{
32 | OnStop: func(ctx context.Context) error {
33 | return exch.Close()
34 | },
35 | })
36 | return exch
37 | }, nil
38 |
39 | // TODO: Add aditional exchanges here
40 | default:
41 | return nil, errors.New("This exchange interface is not implemented")
42 | }
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/files.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "io"
7 | "io/ioutil"
8 | "math/rand"
9 | "os"
10 | "path/filepath"
11 | "time"
12 |
13 | files "github.com/ipfs/go-ipfs-files"
14 | logging "github.com/ipfs/go-log/v2"
15 | "github.com/testground/sdk-go/runtime"
16 | )
17 |
18 | var log = logging.Logger("utils")
19 |
20 | // var randReader *rand.Rand
21 |
22 | // TestFile interface for input files used.
23 | type TestFile interface {
24 | GenerateFile() (files.Node, error)
25 | Size() int64
26 | }
27 |
28 | // RandFile represents a randomly generated file
29 | type RandFile struct {
30 | size int64
31 | seed int64
32 | }
33 |
34 | // PathFile is a generated from file.
35 | type PathFile struct {
36 | Path string
37 | size int64
38 | isDir bool
39 | }
40 |
41 | // GenerateFile generates new randomly generated file
42 | func (f *RandFile) GenerateFile() (files.Node, error) {
43 | r := SeededRandReader(int(f.size), f.seed)
44 |
45 | path := fmt.Sprintf("/tmp-%d", rand.Uint64())
46 | tf, err := os.Create(path)
47 | if err != nil {
48 | return nil, err
49 | }
50 |
51 | if _, err := io.Copy(tf, r); err != nil {
52 | return nil, err
53 | }
54 | if err := tf.Close(); err != nil {
55 | return nil, err
56 | }
57 |
58 | return getUnixfsNode(path)
59 | }
60 |
61 | // Size returns size
62 | func (f *RandFile) Size() int64 {
63 | return f.size
64 | }
65 |
66 | // Size returns size
67 | func (f *PathFile) Size() int64 {
68 | return f.size
69 | }
70 |
71 | // GenerateFile gets the file from path
72 | func (f *PathFile) GenerateFile() (files.Node, error) {
73 | tmpFile, err := getUnixfsNode(f.Path)
74 | if err != nil {
75 | return nil, err
76 | }
77 | return tmpFile, nil
78 | }
79 |
80 | // RandFromReader Generates random file from existing reader
81 | func RandFromReader(randReader *rand.Rand, len int) io.Reader {
82 | if randReader == nil {
83 | randReader = rand.New(rand.NewSource(2))
84 | }
85 | data := make([]byte, len)
86 | randReader.Read(data)
87 | return bytes.NewReader(data)
88 | }
89 |
90 | // DirSize computes total size of the of the direcotry.
91 | func dirSize(path string) (int64, error) {
92 | var size int64
93 | err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
94 | if err != nil {
95 | return err
96 | }
97 | if !info.IsDir() {
98 | size += info.Size()
99 | }
100 | return err
101 | })
102 | return size, err
103 | }
104 |
105 | // RandReader generates random data from seed.
106 | func SeededRandReader(len int, seed int64) io.Reader {
107 | randReader := rand.New(rand.NewSource(seed))
108 | data := make([]byte, len)
109 | randReader.Read(data)
110 | return bytes.NewReader(data)
111 | }
112 |
113 | // RandReader generates random data randomly.
114 | func RandReader(len int) io.Reader {
115 | return SeededRandReader(len, time.Now().Unix())
116 | }
117 |
118 | func GetFileList(runenv *runtime.RunEnv) ([]TestFile, error) {
119 | listFiles := []TestFile{}
120 | inputData := runenv.StringParam("input_data")
121 |
122 | switch inputData {
123 | case "files":
124 | path := runenv.StringParam("data_dir")
125 | runenv.RecordMessage("Getting file list for %s", path)
126 | files, err := ioutil.ReadDir(path)
127 | if err != nil {
128 | return nil, err
129 | }
130 |
131 | for _, file := range files {
132 | var size int64
133 |
134 | // Assign the right size.
135 | if file.IsDir() {
136 | size, err = dirSize(path + "/" + file.Name())
137 | if err != nil {
138 | return nil, err
139 | }
140 | } else {
141 | size = file.Size()
142 | }
143 |
144 | // Append the file.
145 | listFiles = append(listFiles,
146 | &PathFile{
147 | Path: path + "/" + file.Name(),
148 | size: size,
149 | isDir: file.IsDir()})
150 | }
151 | return listFiles, nil
152 | case "random":
153 | fileSizes, err := ParseIntArray(runenv.StringParam("file_size"))
154 | runenv.RecordMessage("Getting file list for random with sizes: %v", fileSizes)
155 | if err != nil {
156 | return nil, err
157 | }
158 | for i, v := range fileSizes {
159 | listFiles = append(listFiles, &RandFile{size: int64(v), seed: int64(i)})
160 | }
161 | return listFiles, nil
162 | case "custom":
163 | return nil, fmt.Errorf("Custom inputData not implemented yet")
164 | default:
165 | return nil, fmt.Errorf("Inputdata type not implemented")
166 | }
167 | }
168 |
169 | func getUnixfsNode(path string) (files.Node, error) {
170 | st, err := os.Stat(path)
171 | if err != nil {
172 | return nil, err
173 | }
174 |
175 | f, err := files.NewSerialFile(path, false, st)
176 | if err != nil {
177 | return nil, err
178 | }
179 |
180 | return f, nil
181 | }
182 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/graphsync.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/ipfs/go-blockservice"
9 | "github.com/ipfs/go-cid"
10 | "github.com/ipfs/go-graphsync"
11 | gsimpl "github.com/ipfs/go-graphsync/impl"
12 | "github.com/ipfs/go-graphsync/network"
13 | "github.com/ipfs/go-graphsync/storeutil"
14 | blockstore "github.com/ipfs/go-ipfs-blockstore"
15 | offline "github.com/ipfs/go-ipfs-exchange-offline"
16 | files "github.com/ipfs/go-ipfs-files"
17 | format "github.com/ipfs/go-ipld-format"
18 | "github.com/ipfs/go-merkledag"
19 | unixfile "github.com/ipfs/go-unixfs/file"
20 | "github.com/ipfs/go-unixfs/importer/helpers"
21 | "github.com/pkg/errors"
22 |
23 | allselector "github.com/hannahhoward/all-selector"
24 | "github.com/ipld/go-ipld-prime"
25 | cidlink "github.com/ipld/go-ipld-prime/linking/cid"
26 | "github.com/libp2p/go-libp2p-core/host"
27 | "github.com/libp2p/go-libp2p-core/peer"
28 | )
29 |
30 | type GraphsyncNode struct {
31 | gs graphsync.GraphExchange
32 | blockStore blockstore.Blockstore
33 | dserv format.DAGService
34 | h host.Host
35 | totalSent uint64
36 | totalReceived uint64
37 | numSeeds int
38 | }
39 |
40 | func CreateGraphsyncNode(ctx context.Context, h host.Host, bstore blockstore.Blockstore, numSeeds int) (*GraphsyncNode, error) {
41 | net := network.NewFromLibp2pHost(h)
42 | bserv := blockservice.New(bstore, offline.Exchange(bstore))
43 | dserv := merkledag.NewDAGService(bserv)
44 | gs := gsimpl.New(ctx, net,
45 | storeutil.LoaderForBlockstore(bstore),
46 | storeutil.StorerForBlockstore(bstore),
47 | )
48 | n := &GraphsyncNode{gs, bstore, dserv, h, 0, 0, numSeeds}
49 | gs.RegisterBlockSentListener(n.onDataSent)
50 | gs.RegisterIncomingBlockHook(n.onDataReceived)
51 | gs.RegisterIncomingRequestHook(n.onIncomingRequestHook)
52 | return n, nil
53 | }
54 |
55 | var selectAll ipld.Node = allselector.AllSelector
56 |
57 | func (n *GraphsyncNode) Add(ctx context.Context, fileNode files.Node) (cid.Cid, error) {
58 | settings := AddSettings{
59 | Layout: "balanced",
60 | Chunker: "size-262144",
61 | RawLeaves: false,
62 | NoCopy: false,
63 | HashFunc: "sha2-256",
64 | MaxLinks: helpers.DefaultLinksPerBlock,
65 | }
66 | adder, err := NewDAGAdder(ctx, n.dserv, settings)
67 | if err != nil {
68 | return cid.Undef, err
69 | }
70 | ipldNode, err := adder.Add(fileNode)
71 | if err != nil {
72 | return cid.Undef, err
73 | }
74 | return ipldNode.Cid(), nil
75 | }
76 |
77 | func (n *GraphsyncNode) ClearDatastore(ctx context.Context, rootCid cid.Cid) error {
78 | return ClearBlockstore(ctx, n.blockStore)
79 | }
80 |
81 | func (n *GraphsyncNode) EmitMetrics(recorder MetricsRecorder) error {
82 | recorder.Record("data_sent", float64(n.totalSent))
83 | recorder.Record("data_rcvd", float64(n.totalReceived))
84 | return nil
85 | }
86 |
87 | func (n *GraphsyncNode) Fetch(ctx context.Context, c cid.Cid, peers []PeerInfo) (files.Node, error) {
88 | leechIndex := 0
89 | for i := 0; i < len(peers); i++ {
90 | if peers[i].Addr.ID == n.h.ID() {
91 | break
92 | }
93 | if peers[i].Nodetp == Leech {
94 | leechIndex++
95 | }
96 | }
97 |
98 | targetSeed := leechIndex % n.numSeeds
99 | seedCount := 0
100 | var seedIndex = 0
101 | for ; seedIndex < len(peers); seedIndex++ {
102 | if peers[seedIndex].Nodetp == Seed && peers[seedIndex].Addr.ID != n.h.ID() {
103 | if seedCount == targetSeed {
104 | break
105 | }
106 | seedCount++
107 | }
108 | }
109 |
110 | if seedCount == len(peers) {
111 | return nil, errors.New("no suitable seed found")
112 | }
113 | p := peers[seedIndex].Addr.ID
114 |
115 | start := time.Now()
116 | resps, errs := n.gs.Request(ctx, p, cidlink.Link{Cid: c}, selectAll)
117 | for range resps {
118 | }
119 | fmt.Println("TIME SINCE START: ", time.Since(start))
120 |
121 | var lastError error
122 | for err := range errs {
123 | if err != nil {
124 | lastError = err
125 | }
126 | }
127 | if lastError != nil {
128 | return nil, lastError
129 | }
130 | nd, err := n.dserv.Get(ctx, c)
131 | if err != nil {
132 | return nil, errors.Wrapf(err, "failed to get file %q", c)
133 | }
134 |
135 | return unixfile.NewUnixfsFile(ctx, n.dserv, nd)
136 | }
137 |
138 | func (n *GraphsyncNode) DAGService() format.DAGService {
139 | return n.dserv
140 | }
141 |
142 | func (n *GraphsyncNode) Host() host.Host {
143 | return n.h
144 | }
145 |
146 | func (n *GraphsyncNode) EmitKeepAlive(recorder MessageRecorder) error {
147 |
148 | recorder.RecordMessage("I am still alive! Total In: %d - TotalOut: %d",
149 | n.totalSent,
150 | n.totalReceived)
151 |
152 | return nil
153 | }
154 |
155 | func (n *GraphsyncNode) onDataSent(p peer.ID, request graphsync.RequestData, block graphsync.BlockData) {
156 | n.totalSent += block.BlockSizeOnWire()
157 | }
158 |
159 | func (n *GraphsyncNode) onDataReceived(p peer.ID, request graphsync.ResponseData, block graphsync.BlockData, ha graphsync.IncomingBlockHookActions) {
160 | n.totalReceived += block.BlockSizeOnWire()
161 | }
162 |
163 | func (n *GraphsyncNode) onIncomingRequestHook(p peer.ID, request graphsync.RequestData, ha graphsync.IncomingRequestHookActions) {
164 | ha.ValidateRequest()
165 | }
166 |
167 | var _ Node = &GraphsyncNode{}
168 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/http.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "io"
8 | "net"
9 | "net/http"
10 | "time"
11 |
12 | "github.com/ipfs/go-cid"
13 | files "github.com/ipfs/go-ipfs-files"
14 | ipld "github.com/ipfs/go-ipld-format"
15 | "github.com/libp2p/go-libp2p-core/host"
16 | ma "github.com/multiformats/go-multiaddr"
17 | manet "github.com/multiformats/go-multiaddr/net"
18 | )
19 |
20 | type HTTPNode struct {
21 | h host.Host
22 | svc *http.Server
23 | }
24 |
25 | func CreateHTTPNode(ctx context.Context, h host.Host, nodeTP NodeType) (*HTTPNode, error) {
26 | var svr *http.Server
27 | switch nodeTP {
28 | case Seed:
29 | svr = &http.Server{Addr: ":8080"}
30 | go svr.ListenAndServe()
31 | time.Sleep(1 * time.Second)
32 | case Leech:
33 | default:
34 | return nil, errors.New("nodeType NOT supported")
35 | }
36 |
37 | return &HTTPNode{
38 | h: h,
39 | svc: svr,
40 | }, nil
41 | }
42 |
43 | func (h *HTTPNode) Add(ctx context.Context, file files.Node) (cid.Cid, error) {
44 | f := files.ToFile(file)
45 | if f == nil {
46 | return cid.Undef, errors.New("node is NOT a File")
47 | }
48 |
49 | // associate a random CID with the file here as we don't really care about CIDs for the HTTP Libp2p transfer
50 | c, err := randCid()
51 | if err != nil {
52 | return c, err
53 | }
54 |
55 | // set up http server to send file
56 | http.HandleFunc(fmt.Sprintf("/%s", c.String()), func(w http.ResponseWriter, r *http.Request) {
57 | defer f.Close()
58 | _, err := io.Copy(w, f)
59 | if err != nil {
60 | panic(err)
61 | }
62 | })
63 |
64 | return c, nil
65 | }
66 |
67 | // TODO GET IP
68 | func (h *HTTPNode) Fetch(ctx context.Context, c cid.Cid, peers []PeerInfo) (files.Node, error) {
69 | seedCount := 0
70 | var seedAddrs []ma.Multiaddr
71 |
72 | for _, p := range peers {
73 | if p.Nodetp == Seed {
74 | seedCount++
75 | seedAddrs = p.Addr.Addrs
76 | }
77 | }
78 | if seedCount != 1 {
79 | return nil, errors.New("http should ONLY have one seed")
80 | }
81 |
82 | var ip net.IP
83 | for _, a := range seedAddrs {
84 | if _, err := a.ValueForProtocol(ma.P_IP4); err == nil {
85 | ip, err = manet.ToIP(a)
86 | if err != nil {
87 | return nil, err
88 | }
89 | }
90 | }
91 |
92 | resp, err := http.DefaultClient.Get(fmt.Sprintf("http://%s:8080/%s", ip.String(), c.String()))
93 | if err != nil {
94 | return nil, err
95 | }
96 |
97 | return files.NewReaderFile(resp.Body), nil
98 | }
99 |
100 | func (h *HTTPNode) Host() host.Host {
101 | return h.Host()
102 | }
103 |
104 | // NOOP FOR now,
105 | // #TODO Fix
106 | func (h *HTTPNode) EmitMetrics(recorder MetricsRecorder) error {
107 | return nil
108 | }
109 |
110 | // NO-OP
111 | func (h *HTTPNode) ClearDatastore(ctx context.Context, rootCid cid.Cid) error {
112 | return nil
113 | }
114 |
115 | // NO-OP
116 | func (h *HTTPNode) DAGService() ipld.DAGService {
117 | return nil
118 | }
119 |
120 | // NO-OP
121 | func (h *HTTPNode) EmitKeepAlive(recorder MessageRecorder) error {
122 | return nil
123 | }
124 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/ipfs.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "crypto/rand"
6 | "encoding/base64"
7 | "fmt"
8 | mathRand "math/rand"
9 | "net"
10 | "strconv"
11 | "sync"
12 | "time"
13 |
14 | bs "github.com/ipfs/go-bitswap"
15 | "github.com/ipfs/go-cid"
16 | "github.com/ipfs/go-datastore"
17 | files "github.com/ipfs/go-ipfs-files"
18 | ipld "github.com/ipfs/go-ipld-format"
19 | "github.com/ipfs/go-merkledag"
20 | "github.com/ipfs/interface-go-ipfs-core/path"
21 |
22 | blockstore "github.com/ipfs/go-ipfs-blockstore"
23 | config "github.com/ipfs/go-ipfs-config"
24 | "github.com/ipfs/go-metrics-interface"
25 | icore "github.com/ipfs/interface-go-ipfs-core"
26 | "github.com/jbenet/goprocess"
27 | ma "github.com/multiformats/go-multiaddr"
28 | "go.uber.org/fx"
29 |
30 | "github.com/ipfs/go-ipfs/core"
31 | "github.com/ipfs/go-ipfs/core/bootstrap"
32 | "github.com/ipfs/go-ipfs/core/coreapi"
33 | "github.com/ipfs/go-ipfs/core/node"
34 | "github.com/ipfs/go-ipfs/core/node/helpers"
35 | "github.com/ipfs/go-ipfs/core/node/libp2p"
36 | "github.com/ipfs/go-ipfs/p2p" // This package is needed so that all the preloaded plugins are loaded automatically
37 | "github.com/ipfs/go-ipfs/repo"
38 | "github.com/libp2p/go-libp2p-core/host"
39 | "github.com/libp2p/go-libp2p-core/peer"
40 |
41 | dsync "github.com/ipfs/go-datastore/sync"
42 | ci "github.com/libp2p/go-libp2p-core/crypto"
43 | )
44 |
45 | // IPFSNode represents the node
46 | type IPFSNode struct {
47 | Node *core.IpfsNode
48 | API icore.CoreAPI
49 | Close func() error
50 | }
51 |
52 | type NodeConfig struct {
53 | Addrs []string
54 | AddrInfo *peer.AddrInfo
55 | PrivKey []byte
56 | }
57 |
58 | func getFreePort() string {
59 | mathRand.Seed(time.Now().UnixNano())
60 | notAvailable := true
61 | port := 0
62 | for notAvailable {
63 | port = 3000 + mathRand.Intn(5000)
64 | ln, err := net.Listen("tcp", ":"+strconv.Itoa(port))
65 | if err == nil {
66 | notAvailable = false
67 | _ = ln.Close()
68 | }
69 | }
70 | return strconv.Itoa(port)
71 | }
72 |
73 | func GenerateAddrInfo(ip string) (*NodeConfig, error) {
74 | // Use a free port
75 | port := getFreePort()
76 | // Generate new KeyPair instead of using existing one.
77 | priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 2048, rand.Reader)
78 | if err != nil {
79 | panic(err)
80 | }
81 | // Generate PeerID
82 | pid, err := peer.IDFromPublicKey(pub)
83 | if err != nil {
84 | panic(err)
85 | }
86 | // Get PrivKey
87 | privkeyb, err := priv.Bytes()
88 | if err != nil {
89 | panic(err)
90 | }
91 |
92 | addrs := []string{
93 | fmt.Sprintf("/ip4/%s/tcp/%s", ip, port),
94 | "/ip6/::/tcp/" + port,
95 | fmt.Sprintf("/ip4/%s/udp/%s/quic", ip, port),
96 | fmt.Sprintf("/ip6/::/udp/%s/quic", port),
97 | }
98 | multiAddrs := make([]ma.Multiaddr, 0)
99 |
100 | for _, a := range addrs {
101 | maddr, err := ma.NewMultiaddr(a)
102 | if err != nil {
103 | return nil, err
104 | }
105 | multiAddrs = append(multiAddrs, maddr)
106 | }
107 |
108 | return &NodeConfig{addrs, &peer.AddrInfo{ID: pid, Addrs: multiAddrs}, privkeyb}, nil
109 | }
110 |
111 | // baseProcess creates a goprocess which is closed when the lifecycle signals it to stop
112 | func baseProcess(lc fx.Lifecycle) goprocess.Process {
113 | p := goprocess.WithParent(goprocess.Background())
114 | lc.Append(fx.Hook{
115 | OnStop: func(_ context.Context) error {
116 | return p.Close()
117 | },
118 | })
119 | return p
120 | }
121 |
122 | // setConfig manually injects dependencies for the IPFS nodes.
123 | func setConfig(ctx context.Context, nConfig *NodeConfig, exch ExchangeOpt, DHTenabled bool, providingEnabled bool) fx.Option {
124 |
125 | // Create new Datastore
126 | // TODO: This is in memory we should have some other external DataStore for big files.
127 | d := datastore.NewMapDatastore()
128 | // Initialize config.
129 | cfg := &config.Config{}
130 |
131 | // Use defaultBootstrap
132 | cfg.Bootstrap = config.DefaultBootstrapAddresses
133 |
134 | //Allow the node to start in any available port. We do not use default ones.
135 | cfg.Addresses.Swarm = nConfig.Addrs
136 |
137 | cfg.Identity.PeerID = nConfig.AddrInfo.ID.Pretty()
138 | cfg.Identity.PrivKey = base64.StdEncoding.EncodeToString(nConfig.PrivKey)
139 |
140 | // Repo structure that encapsulate the config and datastore for dependency injection.
141 | buildRepo := &repo.Mock{
142 | D: dsync.MutexWrap(d),
143 | C: *cfg,
144 | }
145 | repoOption := fx.Provide(func(lc fx.Lifecycle) repo.Repo {
146 | lc.Append(fx.Hook{
147 | OnStop: func(ctx context.Context) error {
148 | return buildRepo.Close()
149 | },
150 | })
151 | return buildRepo
152 | })
153 |
154 | // Enable metrics in the node.
155 | metricsCtx := fx.Provide(func() helpers.MetricsCtx {
156 | return helpers.MetricsCtx(ctx)
157 | })
158 |
159 | // Use DefaultHostOptions
160 | hostOption := fx.Provide(func() libp2p.HostOption {
161 | return libp2p.DefaultHostOption
162 | })
163 |
164 | dhtOption := libp2p.NilRouterOption
165 | if DHTenabled {
166 | dhtOption = libp2p.DHTOption // This option sets the node to be a full DHT node (both fetching and storing DHT Records)
167 | //dhtOption = libp2p.DHTClientOption, // This option sets the node to be a client DHT node (only fetching records)
168 | }
169 |
170 | // Use libp2p.DHTOption. Could also use DHTClientOption.
171 | routingOption := fx.Provide(func() libp2p.RoutingOption {
172 | // return libp2p.DHTClientOption
173 | //TODO: Reminder. DHTRouter disabled.
174 | return dhtOption
175 | })
176 |
177 | // Return repo datastore
178 | repoDS := func(repo repo.Repo) datastore.Datastore {
179 | return d
180 | }
181 |
182 | // Assign some defualt values.
183 | var repubPeriod, recordLifetime time.Duration
184 | ipnsCacheSize := cfg.Ipns.ResolveCacheSize
185 | enableRelay := cfg.Swarm.Transports.Network.Relay.WithDefault(!cfg.Swarm.DisableRelay) //nolint
186 |
187 | providingOptions := node.OfflineProviders(cfg.Experimental.StrategicProviding, cfg.Reprovider.Strategy, cfg.Reprovider.Interval)
188 | if providingEnabled {
189 | providingOptions = node.OnlineProviders(cfg.Experimental.StrategicProviding, cfg.Reprovider.Strategy, cfg.Reprovider.Interval)
190 | }
191 |
192 | // Inject all dependencies for the node.
193 | // Many of the default dependencies being used. If you want to manually set any of them
194 | // follow: https://github.com/ipfs/go-ipfs/blob/master/core/node/groups.go
195 | return fx.Options(
196 | // RepoConfigurations
197 | repoOption,
198 | hostOption,
199 | routingOption,
200 | metricsCtx,
201 |
202 | // Setting baseProcess
203 | fx.Provide(baseProcess),
204 |
205 | // Storage configuration
206 | fx.Provide(repoDS),
207 | fx.Provide(node.BaseBlockstoreCtor(blockstore.DefaultCacheOpts(),
208 | false, cfg.Datastore.HashOnRead)),
209 | fx.Provide(node.GcBlockstoreCtor),
210 |
211 | // Identity dependencies
212 | node.Identity(cfg),
213 |
214 | //IPNS dependencies
215 | node.IPNS,
216 |
217 | // Network dependencies
218 | // Set exchange option.
219 | fx.Provide(exch),
220 | // Provide graphsync
221 | fx.Provide(node.Namesys(ipnsCacheSize)),
222 | fx.Provide(node.Peering),
223 | node.PeerWith(cfg.Peering.Peers...),
224 |
225 | fx.Invoke(node.IpnsRepublisher(repubPeriod, recordLifetime)),
226 |
227 | fx.Provide(p2p.New),
228 |
229 | // Libp2p dependencies
230 | node.BaseLibP2P,
231 | fx.Provide(libp2p.AddrFilters(cfg.Swarm.AddrFilters)),
232 | fx.Provide(libp2p.AddrsFactory(cfg.Addresses.Announce, cfg.Addresses.NoAnnounce)),
233 | fx.Provide(libp2p.SmuxTransport(cfg.Swarm.Transports)),
234 | fx.Provide(libp2p.Relay(enableRelay, cfg.Swarm.EnableRelayHop)),
235 | fx.Provide(libp2p.Transports(cfg.Swarm.Transports)),
236 | fx.Invoke(libp2p.StartListening(cfg.Addresses.Swarm)),
237 | // TODO: Reminder. MDN discovery disabled.
238 | fx.Invoke(libp2p.SetupDiscovery(false, cfg.Discovery.MDNS.Interval)),
239 | fx.Provide(libp2p.Routing),
240 | fx.Provide(libp2p.BaseRouting),
241 | // Enable IPFS bandwidth metrics.
242 | fx.Provide(libp2p.BandwidthCounter),
243 |
244 | // TODO: Here you can see some more of the libp2p dependencies you could set.
245 | // fx.Provide(libp2p.Security(!bcfg.DisableEncryptedConnections, cfg.Swarm.Transports)),
246 | // maybeProvide(libp2p.PubsubRouter, bcfg.getOpt("ipnsps")),
247 | // maybeProvide(libp2p.BandwidthCounter, !cfg.Swarm.DisableBandwidthMetrics),
248 | // maybeProvide(libp2p.NatPortMap, !cfg.Swarm.DisableNatPortMap),
249 | // maybeProvide(libp2p.AutoRelay, cfg.Swarm.EnableAutoRelay),
250 | // autonat, // Sets autonat
251 | // connmgr, // Set connection manager
252 | // ps, // Sets pubsub router
253 | // disc, // Sets discovery service
254 | providingOptions,
255 |
256 | // Core configuration
257 | node.Core,
258 | )
259 | }
260 |
261 | // CreateIPFSNodeWithConfig constructs and returns an IpfsNode using the given cfg.
262 | func CreateIPFSNodeWithConfig(ctx context.Context, nConfig *NodeConfig, exch ExchangeOpt, DHTEnabled bool, providingEnabled bool) (*IPFSNode, error) {
263 | // save this context as the "lifetime" ctx.
264 | lctx := ctx
265 |
266 | // derive a new context that ignores cancellations from the lifetime ctx.
267 | ctx, cancel := context.WithCancel(ctx)
268 |
269 | // add a metrics scope.
270 | ctx = metrics.CtxScope(ctx, "ipfs")
271 |
272 | n := &core.IpfsNode{}
273 |
274 | app := fx.New(
275 | // Inject dependencies in the node.
276 | setConfig(ctx, nConfig, exch, DHTEnabled, providingEnabled),
277 |
278 | fx.NopLogger,
279 | fx.Extract(n),
280 | )
281 |
282 | var once sync.Once
283 | var stopErr error
284 | stopNode := func() error {
285 | once.Do(func() {
286 | stopErr = app.Stop(context.Background())
287 | if stopErr != nil {
288 | fmt.Errorf("failure on stop: %w", stopErr)
289 | }
290 | // Cancel the context _after_ the app has stopped.
291 | cancel()
292 | })
293 | return stopErr
294 | }
295 | // Set node to Online mode.
296 | n.IsOnline = true
297 |
298 | go func() {
299 | // Shut down the application if the lifetime context is canceled.
300 | // NOTE: we _should_ stop the application by calling `Close()`
301 | // on the process. But we currently manage everything with contexts.
302 | select {
303 | case <-lctx.Done():
304 | err := stopNode()
305 | if err != nil {
306 | fmt.Errorf("failure on stop: %v", err)
307 | }
308 | case <-ctx.Done():
309 | }
310 | }()
311 |
312 | if app.Err() != nil {
313 | return nil, app.Err()
314 | }
315 |
316 | if err := app.Start(ctx); err != nil {
317 | return nil, err
318 | }
319 |
320 | if err := n.Bootstrap(bootstrap.DefaultBootstrapConfig); err != nil {
321 | return nil, fmt.Errorf("Failed starting the node: %s", err)
322 | }
323 | api, err := coreapi.NewCoreAPI(n)
324 | if err != nil {
325 | return nil, fmt.Errorf("Failed starting API: %s", err)
326 |
327 | }
328 |
329 | // Attach the Core API to the constructed node
330 | return &IPFSNode{n, api, stopNode}, nil
331 | }
332 |
333 | // ClearDatastore removes a block from the datastore.
334 | // TODO: This function may be inefficient with large blockstore. Used the option above.
335 | // This function may be cleaned in the future.
336 | func (n *IPFSNode) ClearDatastore(ctx context.Context, rootCid cid.Cid) error {
337 | _, pinned, err := n.API.Pin().IsPinned(ctx, path.IpfsPath(rootCid))
338 | if err != nil {
339 | return err
340 | }
341 | if pinned {
342 | err := n.API.Pin().Rm(ctx, path.IpfsPath(rootCid))
343 | if err != nil {
344 | return err
345 | }
346 | }
347 | var ng ipld.NodeGetter = merkledag.NewSession(ctx, n.Node.DAG)
348 | toDelete := cid.NewSet()
349 | err = merkledag.Walk(ctx, merkledag.GetLinksDirect(ng), rootCid, toDelete.Visit, merkledag.Concurrent())
350 | if err != nil {
351 | return err
352 | }
353 | return toDelete.ForEach(func(c cid.Cid) error {
354 | return n.API.Block().Rm(ctx, path.IpfsPath(c))
355 | })
356 | }
357 |
358 | // EmitMetrics emits node's metrics for the run
359 | func (n *IPFSNode) EmitMetrics(recorder MetricsRecorder) error {
360 | // TODO: We ned a way of generalizing this for any exchange type
361 | bsnode := n.Node.Exchange.(*bs.Bitswap)
362 | stats, err := bsnode.Stat()
363 |
364 | if err != nil {
365 | return fmt.Errorf("Error getting stats from Bitswap: %w", err)
366 | }
367 |
368 | recorder.Record("msgs_rcvd", float64(stats.MessagesReceived))
369 | recorder.Record("data_sent", float64(stats.DataSent))
370 | recorder.Record("data_rcvd", float64(stats.DataReceived))
371 | recorder.Record("block_data_rcvd", float64(stats.BlockDataReceived))
372 | recorder.Record("dup_data_rcvd", float64(stats.DupDataReceived))
373 | recorder.Record("blks_sent", float64(stats.BlocksSent))
374 | recorder.Record("blks_rcvd", float64(stats.BlocksReceived))
375 | recorder.Record("dup_blks_rcvd", float64(stats.DupBlksReceived))
376 | recorder.Record("wants_rcvd", float64(stats.WantsRecvd))
377 | recorder.Record("want_blocks_rcvd", float64(stats.WantBlocksRecvd))
378 | recorder.Record("want_haves_rcvd", float64(stats.WantHavesRecvd))
379 | recorder.Record("stream_data_sent", float64(stats.StreamDataSent))
380 |
381 | // IPFS Node Stats
382 | bwTotal := n.Node.Reporter.GetBandwidthTotals()
383 | recorder.Record("total_in", float64(bwTotal.TotalIn))
384 | recorder.Record("total_out", float64(bwTotal.TotalOut))
385 | recorder.Record("rate_in", float64(bwTotal.RateIn))
386 | recorder.Record("rate_out", float64(bwTotal.RateOut))
387 |
388 | // Restart all counters for the next test.
389 | n.Node.Reporter.Reset()
390 | n.Node.Exchange.(*bs.Bitswap).ResetStatCounters()
391 |
392 | // A few other metrics that could be collected.
393 | // GetBandwidthForPeer(peer.ID) Stats
394 | // GetBandwidthForProtocol(protocol.ID) Stats
395 | // GetBandwidthTotals() Stats
396 | // GetBandwidthByPeer() map[peer.ID]Stats
397 | // GetBandwidthByProtocol() map[protocol.ID]Stats
398 |
399 | return nil
400 | }
401 |
402 | func (n *IPFSNode) Add(ctx context.Context, tmpFile files.Node) (cid.Cid, error) {
403 | path, err := n.API.Unixfs().Add(ctx, tmpFile)
404 | if err != nil {
405 | return cid.Undef, err
406 | }
407 | return path.Cid(), nil
408 | }
409 |
410 | func (n *IPFSNode) Fetch(ctx context.Context, c cid.Cid, _ []PeerInfo) (files.Node, error) {
411 | fPath := path.IpfsPath(c)
412 | return n.API.Unixfs().Get(ctx, fPath)
413 | }
414 |
415 | func (n *IPFSNode) DAGService() ipld.DAGService {
416 | return n.Node.DAG
417 | }
418 |
419 | func (n *IPFSNode) Host() host.Host {
420 | return n.Node.PeerHost
421 | }
422 |
423 | func (n *IPFSNode) EmitKeepAlive(recorder MessageRecorder) error {
424 |
425 | recorder.RecordMessage("I am still alive! Total In: %d - TotalOut: %d",
426 | n.Node.Reporter.GetBandwidthTotals().TotalIn,
427 | n.Node.Reporter.GetBandwidthTotals().TotalOut)
428 |
429 | return nil
430 | }
431 |
432 | var _ Node = &IPFSNode{}
433 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/libp2pHTTP.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "encoding/binary"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "math/rand"
10 | "net/http"
11 | "time"
12 |
13 | "github.com/ipfs/go-cid"
14 | files "github.com/ipfs/go-ipfs-files"
15 | ipld "github.com/ipfs/go-ipld-format"
16 | "github.com/libp2p/go-libp2p-core/host"
17 | "github.com/libp2p/go-libp2p-core/peer"
18 | gostream "github.com/libp2p/go-libp2p-gostream"
19 | p2phttp "github.com/libp2p/go-libp2p-http"
20 | mh "github.com/multiformats/go-multihash"
21 | )
22 |
23 | type Libp2pHTTPNode struct {
24 | client *http.Client
25 | h host.Host
26 | svr *http.Server
27 | }
28 |
29 | func CreateLibp2pHTTPNode(ctx context.Context, h host.Host, nodeTP NodeType) (*Libp2pHTTPNode, error) {
30 | switch nodeTP {
31 | case Seed:
32 | // Server
33 | listener, err := gostream.Listen(h, p2phttp.DefaultP2PProtocol)
34 | if err != nil {
35 | return nil, err
36 | }
37 | // start an http server on port 8080
38 | svr := &http.Server{}
39 | go svr.Serve(listener)
40 | time.Sleep(1 * time.Second)
41 | return &Libp2pHTTPNode{
42 | h: h,
43 | svr: svr,
44 | }, nil
45 | case Leech:
46 | tr := &http.Transport{}
47 | tr.RegisterProtocol("libp2p", p2phttp.NewTransport(h))
48 | client := &http.Client{Transport: tr}
49 |
50 | return &Libp2pHTTPNode{
51 | client: client,
52 | h: h,
53 | }, nil
54 | default:
55 | return nil, errors.New("nodeType NOT supported")
56 | }
57 | }
58 |
59 | func (l *Libp2pHTTPNode) Add(ctx context.Context, file files.Node) (cid.Cid, error) {
60 | f := files.ToFile(file)
61 | if f == nil {
62 | return cid.Undef, errors.New("node is NOT a File")
63 | }
64 |
65 | c, err := randCid()
66 | if err != nil {
67 | return cid.Undef, err
68 | }
69 |
70 | // set up http server to send file
71 | http.HandleFunc(fmt.Sprintf("/%s", c.String()), func(w http.ResponseWriter, r *http.Request) {
72 | defer f.Close()
73 | _, err := io.Copy(w, f)
74 | if err != nil {
75 | panic(err)
76 | }
77 | })
78 |
79 | return c, nil
80 | }
81 |
82 | func (l *Libp2pHTTPNode) Fetch(ctx context.Context, cid cid.Cid, peers []PeerInfo) (files.Node, error) {
83 | seedCount := 0
84 | var seed peer.ID
85 |
86 | for _, p := range peers {
87 | if p.Nodetp == Seed {
88 | seedCount++
89 | seed = p.Addr.ID
90 | }
91 | }
92 | if seedCount != 1 {
93 | return nil, errors.New("libp2p http should ONLY have one seed")
94 | }
95 |
96 | resp, err := l.client.Get(fmt.Sprintf("libp2p://%s/%s", seed.String(), cid.String()))
97 | if err != nil {
98 | return nil, err
99 | }
100 |
101 | return files.NewReaderFile(resp.Body), nil
102 | }
103 |
104 | func (l *Libp2pHTTPNode) Host() host.Host {
105 | return l.h
106 | }
107 |
108 | func (l *Libp2pHTTPNode) ClearDatastore(ctx context.Context, rootCid cid.Cid) error {
109 |
110 | return nil
111 | }
112 |
113 | // NOOP FOR now,
114 | // #TODO Fix
115 | func (l *Libp2pHTTPNode) EmitMetrics(recorder MetricsRecorder) error {
116 | return nil
117 | }
118 |
119 | // NO-OP
120 | func (l *Libp2pHTTPNode) DAGService() ipld.DAGService {
121 | return nil
122 | }
123 |
124 | // NO-OP
125 | func (l *Libp2pHTTPNode) EmitKeepAlive(recorder MessageRecorder) error {
126 | return nil
127 | }
128 |
129 | func randCid() (cid.Cid, error) {
130 | buf := make([]byte, binary.MaxVarintLen64)
131 | u := rand.Uint64()
132 | binary.PutUvarint(buf, u)
133 | h1, err := mh.Sum(buf, mh.SHA2_256, -1)
134 | if err != nil {
135 | return cid.Undef, err
136 | }
137 |
138 | return cid.NewCidV1(cid.Raw, h1), nil
139 | }
140 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/net.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "strings"
6 | "time"
7 |
8 | "github.com/testground/sdk-go/network"
9 | "github.com/testground/sdk-go/runtime"
10 | "github.com/testground/sdk-go/sync"
11 | )
12 |
13 | // SetupNetwork instructs the sidecar (if enabled) to setup the network for this
14 | // test case.
15 | func SetupNetwork(ctx context.Context, runenv *runtime.RunEnv,
16 | nwClient *network.Client, nodetp NodeType, tpindex int, baseLatency time.Duration,
17 | bandwidth int, jitterPct int) error {
18 |
19 | if !runenv.TestSidecar {
20 | return nil
21 | }
22 |
23 | // Wait for the network to be initialized.
24 | if err := nwClient.WaitNetworkInitialized(ctx); err != nil {
25 | return err
26 | }
27 |
28 | latency, err := getLatency(runenv, nodetp, tpindex, baseLatency)
29 | if err != nil {
30 | return err
31 | }
32 |
33 | cfg := &network.Config{
34 | Network: "default",
35 | Enable: true,
36 | RoutingPolicy: network.AllowAll,
37 | Default: network.LinkShape{
38 | Latency: latency,
39 | Bandwidth: uint64(bandwidth) * 1024 * 1024,
40 | Jitter: (time.Duration(jitterPct) * latency) / 100,
41 | },
42 | CallbackState: sync.State("network-configured"),
43 | CallbackTarget: runenv.TestInstanceCount,
44 | }
45 |
46 | runenv.RecordMessage("%s %d has %s latency (%d%% jitter) and %dMB bandwidth", nodetp, tpindex, latency, jitterPct, bandwidth)
47 |
48 | return nwClient.ConfigureNetwork(ctx, cfg)
49 | }
50 |
51 | // If there's a latency specific to the node type, overwrite the default latency
52 | func getLatency(runenv *runtime.RunEnv, nodetp NodeType, tpindex int, baseLatency time.Duration) (time.Duration, error) {
53 | if nodetp == Seed {
54 | return getTypeLatency(runenv, "seed_latency_ms", tpindex, baseLatency)
55 | } else if nodetp == Leech {
56 | return getTypeLatency(runenv, "leech_latency_ms", tpindex, baseLatency)
57 | }
58 | return baseLatency, nil
59 | }
60 |
61 | // If the parameter is a comma-separated list, each value in the list
62 | // corresponds to the type index. For example:
63 | // seed_latency_ms=100,200,400
64 | // means that
65 | // - the first seed has 100ms latency
66 | // - the second seed has 200ms latency
67 | // - the third seed has 400ms latency
68 | // - any subsequent seeds have defaultLatency
69 | func getTypeLatency(runenv *runtime.RunEnv, param string, tpindex int, baseLatency time.Duration) (time.Duration, error) {
70 | // No type specific latency set, just return the default
71 | if !runenv.IsParamSet(param) {
72 | return baseLatency, nil
73 | }
74 |
75 | // Not a comma-separated list, interpret the value as an int and apply
76 | // the same latency to all peers of this type
77 | if !strings.Contains(runenv.StringParam(param), ",") {
78 | return baseLatency + time.Duration(runenv.IntParam(param)) * time.Millisecond, nil
79 | }
80 |
81 | // Comma separated list, the position in the list corresponds to the
82 | // type index
83 | latencies, err := ParseIntArray(runenv.StringParam(param))
84 | if err != nil {
85 | return 0, err
86 | }
87 | if tpindex < len(latencies) {
88 | return baseLatency + time.Duration(latencies[tpindex]) * time.Millisecond, nil
89 | }
90 |
91 | // More peers of this type than entries in the list. Return the default
92 | // latency for peers not covered by list entries
93 | return baseLatency, nil
94 | }
95 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/node.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/ipfs/go-cid"
7 | files "github.com/ipfs/go-ipfs-files"
8 | ipld "github.com/ipfs/go-ipld-format"
9 | "github.com/libp2p/go-libp2p-core/host"
10 | "github.com/libp2p/go-libp2p-core/peer"
11 | )
12 |
13 | // PeerInfo provides all the neccessary information to dial a peer
14 | type PeerInfo struct {
15 | Addr peer.AddrInfo
16 | Nodetp NodeType
17 | }
18 |
19 | type Node interface {
20 | Add(ctx context.Context, file files.Node) (cid.Cid, error)
21 | Fetch(ctx context.Context, cid cid.Cid, peers []PeerInfo) (files.Node, error)
22 | ClearDatastore(ctx context.Context, rootCid cid.Cid) error
23 | EmitMetrics(recorder MetricsRecorder) error
24 | Host() host.Host
25 | DAGService() ipld.DAGService
26 | EmitKeepAlive(recorder MessageRecorder) error
27 | }
28 |
29 | type MetricsRecorder interface {
30 | Record(key string, value float64)
31 | }
32 |
33 | type MessageRecorder interface {
34 | RecordMessage(msg string, a ...interface{})
35 | }
36 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/params.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "strings"
7 | )
8 |
9 | func ParseIntArray(value string) ([]uint64, error) {
10 | var ints []uint64
11 | strs := strings.Split(value, ",")
12 | for _, str := range strs {
13 | num, err := strconv.ParseUint(str, 10, 64)
14 | if err != nil {
15 | return nil, fmt.Errorf("Could not convert '%s' to integer(s)", strs)
16 | }
17 | ints = append(ints, num)
18 | }
19 | return ints, nil
20 | }
21 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/rawLibp2p.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "io"
7 |
8 | "github.com/ipfs/go-cid"
9 | files "github.com/ipfs/go-ipfs-files"
10 | ipld "github.com/ipfs/go-ipld-format"
11 | "github.com/libp2p/go-libp2p-core/host"
12 | "github.com/libp2p/go-libp2p-core/network"
13 | "github.com/libp2p/go-libp2p-core/peer"
14 | "github.com/libp2p/go-libp2p-core/protocol"
15 | )
16 |
17 | type RawLibp2pNode struct {
18 | h host.Host
19 | }
20 |
21 | func CreateRawLibp2pNode(ctx context.Context, h host.Host, nodeTP NodeType) (*RawLibp2pNode, error) {
22 | return &RawLibp2pNode{
23 | h: h,
24 | }, nil
25 | }
26 |
27 | func (r *RawLibp2pNode) Add(ctx context.Context, file files.Node) (cid.Cid, error) {
28 | f := files.ToFile(file)
29 | if f == nil {
30 | return cid.Undef, errors.New("node is NOT a File")
31 | }
32 |
33 | // associate a random CID with the file here as we don't really care about CIDs for the Libp2p transfer
34 | c, err := randCid()
35 | if err != nil {
36 | return cid.Undef, err
37 | }
38 |
39 | // set up handler to send file
40 | r.h.SetStreamHandler(protocol.ID(c.String()), func(s network.Stream) {
41 | buf := make([]byte, network.MessageSizeMax)
42 | if _, err := io.CopyBuffer(s, f, buf); err != nil {
43 | s.Reset()
44 | }
45 | s.Close()
46 | })
47 |
48 | return c, nil
49 | }
50 |
51 | func (r *RawLibp2pNode) Fetch(ctx context.Context, cid cid.Cid, peers []PeerInfo) (files.Node, error) {
52 | seedCount := 0
53 | var seed peer.ID
54 |
55 | for _, p := range peers {
56 | if p.Nodetp == Seed {
57 | seedCount++
58 | seed = p.Addr.ID
59 | }
60 | }
61 | if seedCount != 1 {
62 | return nil, errors.New("libp2p should ONLY have one seed")
63 | }
64 |
65 | s, err := r.h.NewStream(ctx, seed, protocol.ID(cid.String()))
66 | if err != nil {
67 | return nil, err
68 | }
69 |
70 | return files.NewReaderFile(s), nil
71 | }
72 |
73 | func (r *RawLibp2pNode) Host() host.Host {
74 | return r.h
75 | }
76 |
77 | // NOOP FOR now,
78 | // #TODO Fix
79 | func (r *RawLibp2pNode) EmitMetrics(recorder MetricsRecorder) error {
80 | return nil
81 | }
82 |
83 | // NO-OP
84 | func (r *RawLibp2pNode) ClearDatastore(ctx context.Context, rootCid cid.Cid) error {
85 | return nil
86 | }
87 |
88 | // NO-OP
89 | func (r *RawLibp2pNode) DAGService() ipld.DAGService {
90 | return nil
91 | }
92 |
93 | // NO-OP
94 | func (r *RawLibp2pNode) EmitKeepAlive(recorder MessageRecorder) error {
95 | return nil
96 | }
97 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/tcp.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 | "io/ioutil"
8 | "net"
9 | "strconv"
10 | "strings"
11 | "sync"
12 |
13 | files "github.com/ipfs/go-ipfs-files"
14 | "github.com/libp2p/go-libp2p-core/network"
15 | "github.com/testground/sdk-go/runtime"
16 | )
17 |
18 | // TCPServer structure
19 | type TCPServer struct {
20 | quit chan interface{}
21 | listener net.Listener
22 | file TestFile
23 | Addr string
24 | wg sync.WaitGroup
25 | }
26 |
27 | // SpawnTCPServer Spawns a TCP server that serves a specific file.
28 | func SpawnTCPServer(ctx context.Context, ip string, tmpFile TestFile) (*TCPServer, error) {
29 | //Create a TCP istener on localhost with porth 27001
30 | listener, err := net.Listen("tcp", ip+":0")
31 | fmt.Println("listening at: ", listener.Addr().String())
32 | if err != nil {
33 | fmt.Println("Error listetning: ", err)
34 | return nil, err
35 | }
36 | //Spawn a new goroutine whenever a client connects
37 | s := &TCPServer{
38 | quit: make(chan interface{}),
39 | listener: listener,
40 | file: tmpFile,
41 | Addr: listener.Addr().String(),
42 | }
43 | s.wg.Add(1)
44 | go s.Start()
45 | return s, nil
46 | }
47 |
48 | // Start listening for conections.
49 | func (s *TCPServer) Start() {
50 | // Start listening routine
51 | defer s.wg.Done()
52 | for {
53 | connection, err := s.listener.Accept()
54 | if err != nil {
55 | select {
56 | case <-s.quit:
57 | return
58 | default:
59 | fmt.Println("Accept error", err)
60 | }
61 | } else {
62 | s.wg.Add(1)
63 | go s.sendFileToClient(connection)
64 | s.wg.Done()
65 | }
66 | }
67 | }
68 |
69 | // Close the TCP Server.
70 | func (s *TCPServer) Close() {
71 | close(s.quit)
72 | s.listener.Close()
73 | s.wg.Wait()
74 | fmt.Println("Successfully closed TCP server")
75 | }
76 |
77 | // Format for fileSize
78 | func fillString(returnString string, toLength int) string {
79 | for {
80 | lengtString := len(returnString)
81 | if lengtString < toLength {
82 | returnString = returnString + ":"
83 | continue
84 | }
85 | break
86 | }
87 | return returnString
88 | }
89 |
90 | // Sends file to client.
91 | func (s *TCPServer) sendFileToClient(connection net.Conn) {
92 | defer connection.Close()
93 | // Passing files.Node directly produced that routines
94 | // concurrently accessed their reader. Instead of sending the
95 | // file n times, each client received a part.
96 | tmpFile, err := s.file.GenerateFile()
97 | if err != nil {
98 | fmt.Println("Failed generating file:", err)
99 | return
100 | }
101 |
102 | var f io.Reader
103 | f = files.ToFile(tmpFile)
104 | if f == nil {
105 | d := files.ToDir(tmpFile)
106 | if d == nil {
107 | fmt.Println("Must be a file or dir")
108 | return
109 | }
110 | f = files.NewMultiFileReader(d, false)
111 | }
112 |
113 | size := s.file.Size()
114 | // The first write is to notify the size.
115 | fileSize := fillString(strconv.FormatInt(size, 10), 10)
116 | fmt.Println("Sending file of: ", size)
117 | connection.Write([]byte(fileSize))
118 |
119 | // Sending the file.
120 | buf := make([]byte, network.MessageSizeMax)
121 | written, err := io.CopyBuffer(connection, f, buf)
122 | if err != nil {
123 | log.Fatal(err)
124 | }
125 | connection.Close()
126 |
127 | fmt.Println("Bytes sent from server", written)
128 | return
129 | }
130 |
131 | // FetchFileTCP fetchs the file server in an address by a TCP server.
132 | func FetchFileTCP(connection net.Conn, runEnv *runtime.RunEnv) {
133 | // read file size
134 | bufferFileSize := make([]byte, 10)
135 | if _, err := connection.Read(bufferFileSize); err != nil {
136 | runEnv.RecordFailure(err)
137 | return
138 | }
139 | fileSize, _ := strconv.ParseInt(strings.Trim(string(bufferFileSize), ":"), 10, 64)
140 |
141 | // Read from connection
142 | buf := make([]byte, network.MessageSizeMax)
143 | w, err := io.CopyBuffer(ioutil.Discard, connection, buf)
144 | if err != nil {
145 | runEnv.RecordFailure(err)
146 | return
147 | }
148 | if w != fileSize {
149 | runEnv.RecordFailure(fmt.Errorf("expcted:%d, got: %d bytes", fileSize, w))
150 | }
151 | }
152 |
--------------------------------------------------------------------------------
/testbed/testbed/utils/walker.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 |
6 | cid "github.com/ipfs/go-cid"
7 | ipld "github.com/ipfs/go-ipld-format"
8 | "golang.org/x/sync/errgroup"
9 | )
10 |
11 | // Adapted from the netflix/p2plab repo under an Apache-2 license.
12 | // Original source code located at https://github.com/Netflix/p2plab/blob/master/dag/walker.go
13 | func Walk(ctx context.Context, c cid.Cid, ng ipld.NodeGetter) error {
14 | nd, err := ng.Get(ctx, c)
15 | if err != nil {
16 | return err
17 | }
18 |
19 | return walk(ctx, nd, ng)
20 | }
21 |
22 | func walk(ctx context.Context, nd ipld.Node, ng ipld.NodeGetter) error {
23 | var cids []cid.Cid
24 | for _, link := range nd.Links() {
25 | cids = append(cids, link.Cid)
26 | }
27 |
28 | eg, gctx := errgroup.WithContext(ctx)
29 |
30 | ndChan := ng.GetMany(ctx, cids)
31 | for ndOpt := range ndChan {
32 | if ndOpt.Err != nil {
33 | return ndOpt.Err
34 | }
35 |
36 | nd := ndOpt.Node
37 | eg.Go(func() error {
38 | return walk(gctx, nd, ng)
39 | })
40 | }
41 |
42 | err := eg.Wait()
43 | if err != nil {
44 | return err
45 | }
46 |
47 | return nil
48 | }
49 |
--------------------------------------------------------------------------------
/testbed/viewer/README.md:
--------------------------------------------------------------------------------
1 | # Bitswap Viewer
2 | The tool uses an ObservableHQ notebook and Jaeger to collect logs from an instrumented `go-bitswap` implementation, and visualize the flow of messages exchanged in an execution of the protocol.
3 | https://observablehq.com/@adlrocha/bitswap-viewer
4 |
5 | 
6 | ## Installation
7 |
8 | ### Using instrumented Bitswap implementation.
9 | To use the instrumented implementation of Bitswap in your IPFS node or the environment where you are running `go-bitswap` point to the following fork adding a `replace` directive to your `go.mod`:
10 | ```
11 | replace github.com/ipfs/go-bitswap => github.com/adlrocha/go-bitswap 1e2319bd190f17f5455fea7bb73dbd6d2af815f8
12 | ```
13 | The Beyond Bitswap [probe](../probe) has been configured to point to this fork by default, so any execution of the probe is automatically traced.
14 |
15 | ### Installing Jaeger and making it available from ObservableHQ
16 | To collect the traces from the instrumented Bitswap execution you need to have a local Jaeger environment up and running. All the code assumes that you have `jaeger-all-in-one` running locally with the default configurations. You can find the installation instructions for Jaeger [here](https://www.jaegertracing.io/docs/1.19/getting-started/).
17 |
18 | Finally, in order to be able to retrieve the local logs collected by Jaeger in the ObservableHQ, you need to run a local proxy server between Jaeger and the notebook with CORS enabled. This repo includes a simple implementation of a proxy server in `server.js`. You can run it easily using NodeJS:
19 | ```
20 | $ node ./server.js
21 | ```
22 | (Any other proxy server that interfaces Jaeger with the ObservableHQ notebook so that CORS can be enabled would do the work).
23 |
24 | ## Usage
25 | To use the tool be sure that you have Jaeger and the proxy server running.
26 | ```
27 | $ ./jaeger-all-in-one
28 | $ node server.js
29 | ```
30 | You can run a simple file exchange between two nodes using the probe to collecte some logs in Jaeger. You can check if some logs have been collected by the execution by going to http://localhost:16686.
31 |
32 | If can see Bitswap logs in the Jaeger UI it means you have everything ready to start seeing messages flow in the ObservableHQ: https://observablehq.com/@adlrocha/bitswap-viewer
33 |
34 | Move the timestamp slider right and left to observe step-by-step how messages flow between nodes.
35 |
--------------------------------------------------------------------------------
/testbed/viewer/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/protocol/beyond-bitswap/63a95b41d5c71d0da509edc8c5ac43de9fdea09d/testbed/viewer/image.png
--------------------------------------------------------------------------------
/testbed/viewer/server.js:
--------------------------------------------------------------------------------
1 | var http = require('http');
2 |
3 | console.log("Proxy server running in port 3000...")
4 | http.createServer(onRequest).listen(3000);
5 |
6 | function onRequest(client_req, client_res) {
7 | console.log('serve: ' + client_req.url);
8 |
9 | var options = {
10 | hostname: 'localhost',
11 | port: 16686,
12 | path: client_req.url,
13 | method: client_req.method,
14 | headers: client_req.headers
15 | };
16 |
17 | var proxy = http.request(options, function (res) {
18 | res.headers["Access-Control-Allow-Origin"] = "*"
19 | res.headers["Access-Control-Allow-Headers"] = "Origin, X-Requested-With, Content-Type, Accept"
20 | res.headers["Access-Control-Allow-Methods"] = "OPTIONS, POST, GET"
21 | client_res.writeHead(res.statusCode, res.headers)
22 | res.pipe(client_res, {
23 | end: true
24 | });
25 | });
26 |
27 | client_req.pipe(proxy, {
28 | end: true
29 | });
30 | }
--------------------------------------------------------------------------------