├── .dockerignore
├── .github
├── dependabot.yml
└── workflows
│ ├── docker.yml
│ ├── go-check.yml
│ ├── go-test-config.json
│ ├── go-test.yml
│ ├── release-check.yml
│ ├── releaser.yml
│ ├── stale.yml
│ └── tagpush.yml
├── .gitignore
├── .tool-versions
├── .travis.yml
├── Dockerfile
├── LICENSE
├── LICENSE-APACHE
├── LICENSE-MIT
├── Makefile
├── README.md
├── datastore
├── pgxpool.go
└── postgres.go
├── docs
└── deployment.md
├── entrypoint.sh
├── exec_bash.sh
├── go.mod
├── go.sum
├── head
├── head.go
├── head_test.go
├── opts
│ └── options.go
└── testing.go
├── httpapi
├── httpapi.go
└── httpapi_test.go
├── hydra
├── hydra.go
└── hydra_test.go
├── idgen
├── cleaning.go
├── cleaning_test.go
├── delegated.go
├── delegated_test.go
├── idgen.go
├── idgen_test.go
├── xortrie.go
└── xortrie_test.go
├── k8s
├── README.md
├── alasybil.yaml
├── bubbles.yaml
├── chumpy.yaml
├── domino.yaml
├── euclid.yaml
├── flake.yaml
├── grendel.yaml
├── hojo.yaml
├── ibycus.yaml
├── jetta.yaml
└── namespace.yaml
├── main.go
├── metrics
├── aws.go
├── definitions.go
├── metrics.go
└── rcmgr.go
├── metricstasks
├── metricstasks.go
└── metricstasks_test.go
├── periodictasks
├── runner.go
└── runner_test.go
├── promconfig.yaml
├── providers
├── caching.go
├── caching_test.go
├── combined.go
├── ddb.go
├── ddb_test.go
├── finder.go
├── finder_test.go
├── httpapi.go
├── noop.go
└── unsupported.go
├── run.sh
├── testdata
└── metrics
│ ├── 1head.txt
│ └── 2heads.txt
├── testing
└── helpers.go
├── ui
├── gooey.go
├── opts
│ └── options.go
├── ui.go
└── ui_test.go
├── utils
├── opts.go
├── port-selector.go
└── port-selector_test.go
├── version.json
└── version
└── version.go
/.dockerignore:
--------------------------------------------------------------------------------
1 | hydra-booster
2 | hydra-belly
3 | hydra-pstore
4 | coverage.txt
5 | prometheus-data
6 | secrets.yaml
7 | k8s/*
8 | Dockerfile
9 | README.md
10 | .git
11 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "github-actions"
4 | directory: "/"
5 | schedule:
6 | interval: "weekly"
7 |
--------------------------------------------------------------------------------
/.github/workflows/docker.yml:
--------------------------------------------------------------------------------
1 | name: Publish Docker image
2 |
3 | on:
4 | push
5 |
6 | jobs:
7 | push_to_registry:
8 | name: Push Docker image to Docker Hub
9 | environment: DockerBuilders
10 | runs-on: ubuntu-latest
11 | env:
12 | IMAGE_NAME: libp2p/hydra-booster
13 | steps:
14 | - name: Check out the repo
15 | uses: actions/checkout@v3
16 |
17 | - name: Set SHORT_SHA
18 | run: echo $GITHUB_SHA | head -c7 > SHORT_SHA
19 |
20 | - name: Build Docker image
21 | run: docker build -t $IMAGE_NAME:$(cat SHORT_SHA) .
22 |
23 | - name: Log in to Docker Hub
24 | uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
25 | with:
26 | username: ${{ secrets.DOCKER_USERNAME }}
27 | password: ${{ secrets.DOCKER_PASSWORD }}
28 |
29 | - name: Publish SHORT_SHA tag to Docker Hub
30 | run: docker push ${IMAGE_NAME}:$(cat SHORT_SHA)
31 |
--------------------------------------------------------------------------------
/.github/workflows/go-check.yml:
--------------------------------------------------------------------------------
1 | name: Go Checks
2 |
3 | on:
4 | pull_request:
5 | push:
6 | branches: ["master"]
7 | workflow_dispatch:
8 |
9 | permissions:
10 | contents: read
11 |
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
14 | cancel-in-progress: true
15 |
16 | jobs:
17 | go-check:
18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0
19 |
--------------------------------------------------------------------------------
/.github/workflows/go-test-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "skip32bit": true,
3 | "skipOSes": ["macos", "windows"],
4 | "skipRace": true
5 | }
6 |
--------------------------------------------------------------------------------
/.github/workflows/go-test.yml:
--------------------------------------------------------------------------------
1 | name: Go Test
2 |
3 | on:
4 | pull_request:
5 | push:
6 | branches: ["master"]
7 | workflow_dispatch:
8 |
9 | permissions:
10 | contents: read
11 |
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
14 | cancel-in-progress: true
15 |
16 | jobs:
17 | go-test:
18 | uses: ipdxco/unified-github-workflows/.github/workflows/go-test.yml@v1.0
19 | secrets:
20 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
21 |
--------------------------------------------------------------------------------
/.github/workflows/release-check.yml:
--------------------------------------------------------------------------------
1 | name: Release Checker
2 |
3 | on:
4 | pull_request_target:
5 | paths: [ 'version.json' ]
6 | types: [ opened, synchronize, reopened, labeled, unlabeled ]
7 | workflow_dispatch:
8 |
9 | permissions:
10 | contents: write
11 | pull-requests: write
12 |
13 | concurrency:
14 | group: ${{ github.workflow }}-${{ github.ref }}
15 | cancel-in-progress: true
16 |
17 | jobs:
18 | release-check:
19 | uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0
20 |
--------------------------------------------------------------------------------
/.github/workflows/releaser.yml:
--------------------------------------------------------------------------------
1 | name: Releaser
2 |
3 | on:
4 | push:
5 | paths: [ 'version.json' ]
6 | workflow_dispatch:
7 |
8 | permissions:
9 | contents: write
10 |
11 | concurrency:
12 | group: ${{ github.workflow }}-${{ github.sha }}
13 | cancel-in-progress: true
14 |
15 | jobs:
16 | releaser:
17 | uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0
18 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: Close and mark stale issue
2 |
3 | on:
4 | schedule:
5 | - cron: '0 0 * * *'
6 |
7 | permissions:
8 | issues: write
9 | pull-requests: write
10 |
11 | jobs:
12 | stale:
13 | uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3
14 |
--------------------------------------------------------------------------------
/.github/workflows/tagpush.yml:
--------------------------------------------------------------------------------
1 | name: Tag Push Checker
2 |
3 | on:
4 | push:
5 | tags:
6 | - v*
7 |
8 | permissions:
9 | contents: read
10 | issues: write
11 |
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.ref }}
14 | cancel-in-progress: true
15 |
16 | jobs:
17 | releaser:
18 | uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | hydra-booster
2 | hydra-belly
3 | hydra-pstore
4 | coverage.txt
5 | prometheus-data
6 | secrets.yaml
7 |
--------------------------------------------------------------------------------
/.tool-versions:
--------------------------------------------------------------------------------
1 | kubectl 1.17.16
2 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | os:
2 | - linux
3 |
4 | language: go
5 |
6 | go:
7 | - 1.18.x
8 |
9 | env:
10 | matrix:
11 | - GOTFLAGS="-race"
12 | - GOTFLAGS="-race -tags=openssl"
13 |
14 | cache:
15 | directories:
16 | - $GOPATH/pkg/mod
17 | - $HOME/.cache/go-build
18 |
19 | script:
20 | - go build ./...
21 | - go test -v -race -coverprofile=coverage.txt -covermode=atomic ./...
22 |
23 | after_success:
24 | - bash <(curl -s https://codecov.io/bash)
25 |
26 | notifications:
27 | email: false
28 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.18-alpine AS build
2 |
3 | RUN apk add --no-cache openssl-dev git build-base
4 |
5 | WORKDIR /hydra-booster
6 |
7 | COPY go.mod go.sum ./
8 | RUN go mod download -x
9 |
10 | # Copy the source from the current directory
11 | # to the Working Directory inside the container
12 | COPY datastore ./datastore
13 | COPY head ./head
14 | COPY httpapi ./httpapi
15 | COPY hydra ./hydra
16 | COPY idgen ./idgen
17 | COPY ui ./ui
18 | COPY utils ./utils
19 | COPY version ./version
20 | COPY metrics ./metrics
21 | COPY metricstasks ./metricstasks
22 | COPY periodictasks ./periodictasks
23 | COPY providers ./providers
24 | COPY testing ./testing
25 | COPY main.go promconfig.yaml ./
26 |
27 | # Run the build and install
28 | RUN go install -tags=openssl -v ./...
29 |
30 | # Create single-layer run image
31 | FROM alpine@sha256:bc41182d7ef5ffc53a40b044e725193bc10142a1243f395ee852a8d9730fc2ad
32 | RUN apk add --no-cache openssl curl # curl is for health checking
33 | COPY --from=build /go/bin/hydra-booster /hydra-booster
34 | # HTTP API
35 | COPY entrypoint.sh ./
36 | RUN chmod a+x entrypoint.sh
37 | EXPOSE 7779
38 |
39 | # Prometheus /metrics
40 | EXPOSE 8888
41 |
42 | # Heads
43 | EXPOSE 30000-32767
44 | EXPOSE 30000-32767/udp
45 |
46 | # CMD ["./hydra-booster", "-metrics-addr=0.0.0.0:8888", "-httpapi-addr=0.0.0.0:7779", "-ui-theme=none"]
47 | CMD ["./entrypoint.sh"]
48 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | This project is transitioning from an MIT-only license to a dual MIT/Apache-2.0 license.
2 | Unless otherwise noted, all code contributed prior to 2019-05-06 and not contributed by
3 | a user listed in [this signoff issue](https://github.com/ipfs/go-ipfs/issues/6302) is
4 | licensed under MIT-only. All new contributions (and past contributions since 2019-05-06)
5 | are licensed under a dual MIT/Apache-2.0 license.
6 |
7 | MIT: https://www.opensource.org/licenses/mit
8 | Apache-2.0: https://www.apache.org/licenses/license-2.0
9 |
--------------------------------------------------------------------------------
/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
2 |
3 | http://www.apache.org/licenses/LICENSE-2.0
4 |
5 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
6 |
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 | THE SOFTWARE.
20 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | all:
2 | go build -tags=openssl
3 |
4 |
--------------------------------------------------------------------------------
/datastore/pgxpool.go:
--------------------------------------------------------------------------------
1 | package datastore
2 |
3 | import (
4 | "github.com/ipfs/go-datastore"
5 | "github.com/jackc/pgx/v4/pgxpool"
6 | )
7 |
8 | type WithPgxPool interface {
9 | PgxPool() *pgxpool.Pool
10 | }
11 |
12 | type BatchingWithPgxPool struct {
13 | Pool WithPgxPool
14 | datastore.Batching
15 | }
16 |
17 | func (x BatchingWithPgxPool) PgxPool() *pgxpool.Pool {
18 | return x.Pool.PgxPool()
19 | }
20 |
--------------------------------------------------------------------------------
/datastore/postgres.go:
--------------------------------------------------------------------------------
1 | package datastore
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 |
8 | pgds "github.com/ipfs/ipfs-ds-postgres"
9 | "github.com/jackc/pgx/v4/pgxpool"
10 | )
11 |
12 | const TableName = "records"
13 |
14 | // NewPostgreSQLDatastore creates a new pgds.Datastore that talks to a PostgreSQL database
15 | func NewPostgreSQLDatastore(ctx context.Context, connstr string, createDB bool) (*pgds.Datastore, error) {
16 | if createDB {
17 | connConf, err := pgxpool.ParseConfig(connstr)
18 | if err != nil {
19 | return nil, err
20 | }
21 | pool, err := pgxpool.ConnectConfig(ctx, connConf)
22 | if err != nil {
23 | return nil, err
24 | }
25 | fmt.Fprintf(os.Stderr, "Creating Table\n")
26 | _, err = pool.Exec(ctx, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (key TEXT NOT NULL UNIQUE, data BYTEA)", TableName))
27 | if err != nil {
28 | return nil, err
29 | }
30 | fmt.Fprintf(os.Stderr, "Creating Index\n")
31 | _, err = pool.Exec(ctx, fmt.Sprintf("CREATE INDEX IF NOT EXISTS %s_key_text_pattern_ops_idx ON %s (key text_pattern_ops)", TableName, TableName))
32 | if err != nil {
33 | return nil, err
34 | }
35 | pool.Close()
36 | }
37 | fmt.Fprintf(os.Stderr, "Connecting to Database\n")
38 | ds, err := pgds.NewDatastore(ctx, connstr, pgds.Table(TableName))
39 | if err != nil {
40 | return nil, err
41 | }
42 | return ds, nil
43 | }
44 |
--------------------------------------------------------------------------------
/docs/deployment.md:
--------------------------------------------------------------------------------
1 | # Hydra Booster Deployment Notes
2 |
3 | The Hydra nodes are deployed to DigitalOcean as a Kubenetes cluster. The [Kubenetes configuration files and instructions on applying the config](../k8s) are kept in this repo.
4 |
5 | ## Deployment config
6 |
7 | There are the environment variables that can be tweaked to affect the deployment:
8 |
9 | * `HYDRA_NAME` - a name for the Hydra that is used to more easily distinguish between Hydras in metrics
10 | * `HYDRA_NHEADS` - controls the number of heads that are spawned by a Hydra
11 | * `HYDRA_PORT_BEGIN` - controls the port that Hydra heads listen on. Each head is allocated a port sequentially beginning from the port specified here. See [Cluster Setup](#cluster-setup) below for what this value should be for each Hydra
12 | * `HYDRA_DB` - a PostgreSQL database connection string that can be shared by all Hydras in the swarm.
13 | * `HYDRA_DISABLE_PROV_GC` - disables provider record garbage collection (when used in combination with `HYDRA_DB` it should be `true` on all but one Hydra).
14 | * `HYDRA_DISABLE_PROV_COUNTS` - disables provider record counting, which is used in metrics reporting (when used in combination with `HYDRA_DB` it should be `true` on all but one Hydra).
15 |
16 | ## Cluster setup
17 |
18 | We have one _cluster_ in DigitalOcean's `SFO2` region with a _deployment_ for each Hydra. Deployments have a application name picked from [petnames.net](http://www.petnames.net/unusual-pet-names.html) and live in the `hydra-boosters` namespace. Each deployment has _one pod_ and a `NodePort` service that forwards external ports to internal ports on the pod.
19 |
20 | This [blog post](https://medium.com/google-cloud/kubernetes-nodeport-vs-loadbalancer-vs-ingress-when-should-i-use-what-922f010849e0) has some good info and diagrams on the differences between the different types of "services" that Kubernetes has.
21 |
22 | TLDR; `NodePort` restricts you to exposing public services on ports in the range `30000-32767`. For people wanting to expose HTTP services on port `80` this is problematic but we don't care. We also do not need any actual load balancing to happen, we just need ports exposed publically.
23 |
24 | Hydra head swarm listening ports are allocated as such:
25 |
26 | | | Port range |
27 | | -------- | ------------- |
28 | | Alasybil | `30000-30249` |
29 | | Bubbles | `30250-30499` |
30 | | Chumpy | `30500-30749` |
31 | | Domino | `30750-30999` |
32 | | Euclid | `31000-31249` |
33 | | Flake | `31250-31499` |
34 | | Grendel | `31500-31749` |
35 | | Hojo | `31750-31999` |
36 | | Ibycus | `32000-32249` |
37 | | Jetta | `32250-32499` |
38 | | ... | ... |
39 |
40 | This gives us **up to 10 hydras and 2,500 heads per cluster** (actually you can have an extra one with 100 heads if you really need it). It assumes we can run up to 250 heads on a single node. We may want to revist these allocations if the hardware is not capable.
41 |
42 | Ports `32600-32767` are reserved for misc other services. We currently have 2 per hydra (httpapi and metrics).
43 |
44 | Misc service ports are allocated as such:
45 |
46 | | | HTTP API port | Metrics port | Load Balancer for Metrics |
47 | | -------- | ------------- | ------------ | ------------------------- |
48 | | Alasybil | `32600` | `32601` | `32602` |
49 | | Bubbles | `32610` | `32611` | `32612` |
50 | | Chumpy | `32620` | `32621` | `32622` |
51 | | Domino | `32630` | `32631` | `32632` |
52 | | Euclid | `32640` | `32641` | `32642` |
53 | | Flake | `32650` | `32651` | `32652` |
54 | | Grendel | `32660` | `32661` | `32662` |
55 | | Hojo | `32670` | `32671` | `32672` |
56 | | Ibycus | `32680` | `32681` | `32682` |
57 | | Jetta | `32690` | `32691` | `32692` |
58 | | ... | ... | ... | ... |
59 |
60 | This gives us **up to 10 misc service ports per hydra**.
61 |
62 | There is one firewall rule ("Networking" => "Firewalls") that opens up ports `30000-32767` (the ports that `NodePort` allows us to bind to).
63 |
64 | We're currently running **10 Hydras** with the following head counts:
65 |
66 | | | Heads |
67 | | -------- | ----- |
68 | | Alasybil | `100` |
69 | | Bubbles | `100` |
70 | | Chumpy | `100` |
71 | | Domino | `100` |
72 | | Euclid | `100` |
73 | | Flake | `100` |
74 | | Grendel | `100` |
75 | | Hojo | `100` |
76 | | Ibycus | `100` |
77 | | Jetta | `100` |
78 | | ... | ... |
79 |
80 | ## PostgreSQL datastore
81 |
82 | This is a simple PostgreSQL database hosted by DigitalOcean.
83 |
84 | ## Metrics and reporting
85 |
86 | Metrics are available at the [PL Grafana](https://protocollabs.grafana.net).
87 |
88 | ### Grafana Prometheus config
89 |
90 | The Grafana Prometheus config you need is (substitute the `10.` IPs for the actual exposed load balancer IPs):
91 |
92 | ```yaml
93 | - job_name: 'hydrabooster'
94 | scrape_interval: 10s
95 | static_configs:
96 | - targets: ['10.8.5.79:8888', '10.8.15.102:8888', '10.8.10.98:8888', '10.8.5.238:8888', '10.8.15.157:8888']
97 | ```
98 |
99 | ## Misc
100 |
101 | I used the following script to generate the YAML config for the head ports:
102 |
103 | ```js
104 | const begin = 30200
105 | for (let i = 0; i < 100; i++) {
106 | console.log(` - name: head-${i.toString().padStart(3, '0')}
107 | port: ${begin + i}
108 | nodePort: ${begin + i}
109 | protocol: TCP
110 | targetPort: ${begin + i}`)
111 | }
112 | ```
113 |
--------------------------------------------------------------------------------
/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | ./hydra-booster -metrics-addr=0.0.0.0:8888 -httpapi-addr=0.0.0.0:7779 -ui-theme=none
4 |
--------------------------------------------------------------------------------
/exec_bash.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -u
3 | set -e
4 | docker build . -t hydra-booster
5 | docker run -it hydra-booster sh
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/libp2p/hydra-booster
2 |
3 | require (
4 | contrib.go.opencensus.io/exporter/prometheus v0.3.0
5 | github.com/alanshaw/prom-metrics-client v0.3.0
6 | github.com/aws/aws-sdk-go v1.43.1
7 | github.com/aws/aws-sdk-go-v2 v1.13.0
8 | github.com/aws/aws-sdk-go-v2/config v1.12.0
9 | github.com/aws/aws-sdk-go-v2/credentials v1.7.0
10 | github.com/aws/aws-sdk-go-v2/service/dynamodb v1.13.0
11 | github.com/aws/smithy-go v1.10.0
12 | github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f
13 | github.com/benbjohnson/clock v1.3.0
14 | github.com/dustin/go-humanize v1.0.0
15 | github.com/go-kit/log v0.2.0
16 | github.com/gorilla/mux v1.8.0
17 | github.com/hashicorp/go-multierror v1.1.1
18 | github.com/hnlq715/golang-lru v0.2.1-0.20200422024707-82ba7badf9a6
19 | github.com/ipfs/go-cid v0.3.2
20 | github.com/ipfs/go-datastore v0.6.0
21 | github.com/ipfs/go-ds-dynamodb v0.1.0
22 | github.com/ipfs/go-ds-leveldb v0.5.0
23 | github.com/ipfs/go-ipns v0.3.0
24 | github.com/ipfs/go-libipfs v0.0.0-20221207180439-c7e7738575f9
25 | github.com/ipfs/go-log v1.0.5
26 | github.com/ipfs/ipfs-ds-postgres v0.2.0
27 | github.com/jackc/pgx/v4 v4.9.0
28 | github.com/libp2p/go-libp2p v0.24.1
29 | github.com/libp2p/go-libp2p-kad-dht v0.20.0
30 | github.com/libp2p/go-libp2p-kbucket v0.5.0
31 | github.com/libp2p/go-libp2p-record v0.2.0
32 | github.com/multiformats/go-multiaddr v0.8.0
33 | github.com/multiformats/go-multicodec v0.7.0
34 | github.com/multiformats/go-multihash v0.2.1
35 | github.com/ncabatoff/process-exporter v0.7.10
36 | github.com/prometheus/client_golang v1.14.0
37 | github.com/prometheus/node_exporter v1.3.1
38 | github.com/stretchr/testify v1.8.1
39 | github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee
40 | go.opencensus.io v0.24.0
41 | golang.org/x/crypto v0.3.0
42 | )
43 |
44 | require (
45 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
46 | github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
47 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 // indirect
48 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.4 // indirect
49 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.2.0 // indirect
50 | github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 // indirect
51 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.7.0 // indirect
52 | github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.5.0 // indirect
53 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 // indirect
54 | github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 // indirect
55 | github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 // indirect
56 | github.com/beevik/ntp v0.3.0 // indirect
57 | github.com/beorn7/perks v1.0.1 // indirect
58 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
59 | github.com/containerd/cgroups v1.0.4 // indirect
60 | github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
61 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect
62 | github.com/davecgh/go-spew v1.1.1 // indirect
63 | github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
64 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
65 | github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect
66 | github.com/docker/go-units v0.5.0 // indirect
67 | github.com/elastic/gosigar v0.14.2 // indirect
68 | github.com/ema/qdisc v0.0.0-20200603082823-62d0308e3e00 // indirect
69 | github.com/flynn/noise v1.0.0 // indirect
70 | github.com/francoispqt/gojay v1.2.13 // indirect
71 | github.com/go-logfmt/logfmt v0.5.1 // indirect
72 | github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
73 | github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 // indirect
74 | github.com/godbus/dbus/v5 v5.1.0 // indirect
75 | github.com/gogo/protobuf v1.3.2 // indirect
76 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
77 | github.com/golang/mock v1.6.0 // indirect
78 | github.com/golang/protobuf v1.5.2 // indirect
79 | github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
80 | github.com/google/go-cmp v0.5.9 // indirect
81 | github.com/google/gopacket v1.1.19 // indirect
82 | github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect
83 | github.com/google/uuid v1.3.0 // indirect
84 | github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect
85 | github.com/gorilla/websocket v1.5.0 // indirect
86 | github.com/hashicorp/errwrap v1.1.0 // indirect
87 | github.com/hashicorp/go-envparse v0.0.0-20200406174449-d9cfd743a15e // indirect
88 | github.com/hashicorp/golang-lru v0.5.4 // indirect
89 | github.com/hodgesds/perf-utils v0.4.0 // indirect
90 | github.com/huin/goupnp v1.0.3 // indirect
91 | github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973 // indirect
92 | github.com/ipfs/go-ipfs-util v0.0.2 // indirect
93 | github.com/ipfs/go-log/v2 v2.5.1 // indirect
94 | github.com/ipld/go-ipld-prime v0.18.0 // indirect
95 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect
96 | github.com/jackc/pgconn v1.7.0 // indirect
97 | github.com/jackc/pgio v1.0.0 // indirect
98 | github.com/jackc/pgpassfile v1.0.0 // indirect
99 | github.com/jackc/pgproto3/v2 v2.0.5 // indirect
100 | github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
101 | github.com/jackc/pgtype v1.5.0 // indirect
102 | github.com/jackc/puddle v1.1.2 // indirect
103 | github.com/jackpal/go-nat-pmp v1.0.2 // indirect
104 | github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
105 | github.com/jbenet/goprocess v0.1.4 // indirect
106 | github.com/jmespath/go-jmespath v0.4.0 // indirect
107 | github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 // indirect
108 | github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786 // indirect
109 | github.com/klauspost/compress v1.15.12 // indirect
110 | github.com/klauspost/cpuid/v2 v2.2.1 // indirect
111 | github.com/koron/go-ssdp v0.0.3 // indirect
112 | github.com/libp2p/go-buffer-pool v0.1.0 // indirect
113 | github.com/libp2p/go-cidranger v1.1.0 // indirect
114 | github.com/libp2p/go-flow-metrics v0.1.0 // indirect
115 | github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect
116 | github.com/libp2p/go-msgio v0.2.0 // indirect
117 | github.com/libp2p/go-nat v0.1.0 // indirect
118 | github.com/libp2p/go-netroute v0.2.1 // indirect
119 | github.com/libp2p/go-openssl v0.1.0 // indirect
120 | github.com/libp2p/go-reuseport v0.2.0 // indirect
121 | github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
122 | github.com/lucas-clemente/quic-go v0.31.1 // indirect
123 | github.com/lufia/iostat v1.2.0 // indirect
124 | github.com/marten-seemann/qtls-go1-18 v0.1.3 // indirect
125 | github.com/marten-seemann/qtls-go1-19 v0.1.1 // indirect
126 | github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
127 | github.com/mattn/go-isatty v0.0.16 // indirect
128 | github.com/mattn/go-pointer v0.0.1 // indirect
129 | github.com/mattn/go-xmlrpc v0.0.3 // indirect
130 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
131 | github.com/mdlayher/genetlink v1.0.0 // indirect
132 | github.com/mdlayher/netlink v1.4.1 // indirect
133 | github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 // indirect
134 | github.com/mdlayher/wifi v0.0.0-20200527114002-84f0b9457fdd // indirect
135 | github.com/miekg/dns v1.1.50 // indirect
136 | github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
137 | github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
138 | github.com/minio/sha256-simd v1.0.0 // indirect
139 | github.com/mr-tron/base58 v1.2.0 // indirect
140 | github.com/multiformats/go-base32 v0.1.0 // indirect
141 | github.com/multiformats/go-base36 v0.2.0 // indirect
142 | github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
143 | github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
144 | github.com/multiformats/go-multibase v0.1.1 // indirect
145 | github.com/multiformats/go-multistream v0.3.3 // indirect
146 | github.com/multiformats/go-varint v0.0.7 // indirect
147 | github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833 // indirect
148 | github.com/onsi/ginkgo/v2 v2.5.1 // indirect
149 | github.com/opencontainers/runtime-spec v1.0.2 // indirect
150 | github.com/opentracing/opentracing-go v1.2.0 // indirect
151 | github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
152 | github.com/pkg/errors v0.9.1 // indirect
153 | github.com/pmezard/go-difflib v1.0.0 // indirect
154 | github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
155 | github.com/prometheus/client_model v0.3.0 // indirect
156 | github.com/prometheus/common v0.37.0 // indirect
157 | github.com/prometheus/procfs v0.8.0 // indirect
158 | github.com/prometheus/statsd_exporter v0.21.0 // indirect
159 | github.com/raulk/go-watchdog v1.3.0 // indirect
160 | github.com/safchain/ethtool v0.1.0 // indirect
161 | github.com/samber/lo v1.36.0 // indirect
162 | github.com/smartystreets/assertions v1.0.1 // indirect
163 | github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a // indirect
164 | github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
165 | github.com/spaolacci/murmur3 v1.1.0 // indirect
166 | github.com/stretchr/objx v0.5.0 // indirect
167 | github.com/syndtr/goleveldb v1.0.0 // indirect
168 | github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
169 | go.uber.org/atomic v1.10.0 // indirect
170 | go.uber.org/dig v1.15.0 // indirect
171 | go.uber.org/fx v1.18.2 // indirect
172 | go.uber.org/multierr v1.8.0 // indirect
173 | go.uber.org/zap v1.24.0 // indirect
174 | golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
175 | golang.org/x/mod v0.7.0 // indirect
176 | golang.org/x/net v0.3.0 // indirect
177 | golang.org/x/sync v0.1.0 // indirect
178 | golang.org/x/sys v0.3.0 // indirect
179 | golang.org/x/text v0.5.0 // indirect
180 | golang.org/x/tools v0.3.0 // indirect
181 | golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
182 | google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect
183 | google.golang.org/grpc v1.40.0 // indirect
184 | google.golang.org/protobuf v1.28.1 // indirect
185 | gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect
186 | gopkg.in/yaml.v2 v2.4.0 // indirect
187 | gopkg.in/yaml.v3 v3.0.1 // indirect
188 | lukechampine.com/blake3 v1.1.7 // indirect
189 | )
190 |
191 | go 1.18
192 |
--------------------------------------------------------------------------------
/head/head.go:
--------------------------------------------------------------------------------
1 | package head
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "sync"
8 | "time"
9 |
10 | "github.com/hnlq715/golang-lru/simplelru"
11 | "github.com/ipfs/go-cid"
12 | "github.com/ipfs/go-datastore"
13 | "github.com/ipfs/go-ipns"
14 | "github.com/libp2p/go-libp2p"
15 | dht "github.com/libp2p/go-libp2p-kad-dht"
16 | "github.com/libp2p/go-libp2p-kad-dht/providers"
17 | kbucket "github.com/libp2p/go-libp2p-kbucket"
18 | record "github.com/libp2p/go-libp2p-record"
19 | "github.com/libp2p/go-libp2p/core/host"
20 | "github.com/libp2p/go-libp2p/core/network"
21 | "github.com/libp2p/go-libp2p/core/peer"
22 | "github.com/libp2p/go-libp2p/core/routing"
23 | rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
24 | "github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs"
25 | connmgr "github.com/libp2p/go-libp2p/p2p/net/connmgr"
26 | noise "github.com/libp2p/go-libp2p/p2p/security/noise"
27 | tls "github.com/libp2p/go-libp2p/p2p/security/tls"
28 | quic "github.com/libp2p/go-libp2p/p2p/transport/quic"
29 | tcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
30 | "github.com/libp2p/hydra-booster/head/opts"
31 | "github.com/libp2p/hydra-booster/metrics"
32 | "github.com/libp2p/hydra-booster/metricstasks"
33 | "github.com/libp2p/hydra-booster/periodictasks"
34 | hproviders "github.com/libp2p/hydra-booster/providers"
35 | "github.com/libp2p/hydra-booster/version"
36 | "github.com/multiformats/go-multiaddr"
37 | )
38 |
39 | const (
40 | providerRecordsTaskInterval = time.Minute * 5
41 | provDisabledGCInterval = time.Hour * 24 * 365 * 100 // set really high to be "disabled"
42 | provCacheSize = 256
43 | provCacheExpiry = time.Hour
44 | )
45 |
46 | // BootstrapStatus describes the status of connecting to a bootstrap node.
47 | type BootstrapStatus struct {
48 | Done bool
49 | Err error
50 | }
51 |
52 | // Head is a container for ipfs/libp2p components used by a Hydra head.
53 | type Head struct {
54 | Host host.Host
55 | Datastore datastore.Datastore
56 | Routing routing.Routing
57 | }
58 |
59 | func buildRcmgr(ctx context.Context, disableRM bool, limitsFile string) (network.ResourceManager, error) {
60 | var limiter rcmgr.Limiter
61 |
62 | if disableRM {
63 | limiter = rcmgr.NewFixedLimiter(rcmgr.InfiniteLimits)
64 | } else if limitsFile != "" {
65 | f, err := os.Open(limitsFile)
66 | if err != nil {
67 | return nil, fmt.Errorf("opening Resource Manager limits file: %w", err)
68 | }
69 | limiter, err = rcmgr.NewDefaultLimiterFromJSON(f)
70 | if err != nil {
71 | return nil, fmt.Errorf("creating Resource Manager limiter: %w", err)
72 | }
73 | } else {
74 | limits := rcmgr.DefaultLimits
75 |
76 | limits.SystemBaseLimit.ConnsOutbound = 128
77 | limits.SystemBaseLimit.ConnsInbound = 128
78 | limits.SystemBaseLimit.Conns = 256
79 | limits.SystemLimitIncrease.Conns = 1024
80 | limits.SystemLimitIncrease.ConnsInbound = 1024
81 | limits.SystemLimitIncrease.ConnsOutbound = 1024
82 | libp2p.SetDefaultServiceLimits(&limits)
83 |
84 | limiter = rcmgr.NewFixedLimiter(limits.AutoScale())
85 | }
86 |
87 | rcmgrMetrics, err := metrics.CreateRcmgrMetrics(ctx)
88 | if err != nil {
89 | return nil, fmt.Errorf("creating Resource Manager metrics: %w", err)
90 | }
91 | mgr, err := rcmgr.NewResourceManager(
92 | limiter,
93 | rcmgr.WithTraceReporter(obs.StatsTraceReporter{}),
94 | rcmgr.WithMetrics(rcmgrMetrics),
95 | )
96 | if err != nil {
97 | return nil, fmt.Errorf("constructing resource manager: %w", err)
98 | }
99 |
100 | return mgr, nil
101 | }
102 |
103 | // NewHead constructs a new Hydra Booster head node
104 | func NewHead(ctx context.Context, options ...opts.Option) (*Head, chan BootstrapStatus, error) {
105 | cfg := opts.Options{}
106 | cfg.Apply(append([]opts.Option{opts.Defaults}, options...)...)
107 |
108 | cmgr, err := connmgr.NewConnManager(cfg.ConnMgrLowWater, cfg.ConnMgrHighWater, connmgr.WithGracePeriod(cfg.ConnMgrGracePeriod))
109 | if err != nil {
110 | return nil, nil, fmt.Errorf("building connmgr: %w", err)
111 | }
112 |
113 | ua := version.UserAgent
114 | if cfg.EnableRelay {
115 | ua += "+relay"
116 | }
117 |
118 | rm, err := buildRcmgr(ctx, cfg.DisableResourceManager, cfg.ResourceManagerLimitsFile)
119 | if err != nil {
120 | return nil, nil, err
121 | }
122 |
123 | libp2pOpts := []libp2p.Option{
124 | libp2p.UserAgent(version.UserAgent),
125 | libp2p.ListenAddrs(cfg.Addrs...),
126 | libp2p.ConnectionManager(cmgr),
127 | libp2p.Identity(cfg.ID),
128 | libp2p.EnableNATService(),
129 | libp2p.AutoNATServiceRateLimit(0, 3, time.Minute),
130 | libp2p.DefaultMuxers,
131 | libp2p.Transport(quic.NewTransport),
132 | libp2p.Transport(tcp.NewTCPTransport),
133 | libp2p.Security(tls.ID, tls.New),
134 | libp2p.Security(noise.ID, noise.New),
135 | libp2p.ResourceManager(rm),
136 | }
137 | if cfg.Peerstore != nil {
138 | libp2pOpts = append(libp2pOpts, libp2p.Peerstore(cfg.Peerstore))
139 | }
140 | if cfg.EnableRelay {
141 | libp2pOpts = append(libp2pOpts, libp2p.EnableRelay())
142 | }
143 |
144 | node, err := libp2p.New(libp2pOpts...)
145 | if err != nil {
146 | return nil, nil, fmt.Errorf("failed to spawn libp2p node: %w", err)
147 | }
148 | go func() {
149 | <-ctx.Done()
150 | node.Close()
151 | }()
152 |
153 | dhtOpts := []dht.Option{
154 | dht.Mode(dht.ModeServer),
155 | dht.ProtocolPrefix(cfg.ProtocolPrefix),
156 | dht.BucketSize(cfg.BucketSize),
157 | dht.Datastore(cfg.Datastore),
158 | dht.QueryFilter(dht.PublicQueryFilter),
159 | dht.RoutingTableFilter(dht.PublicRoutingTableFilter),
160 | }
161 |
162 | if cfg.DisableValues {
163 | dhtOpts = append(dhtOpts, dht.DisableValues())
164 | } else {
165 | dhtOpts = append(dhtOpts, dht.Validator(record.NamespacedValidator{
166 | "pk": record.PublicKeyValidator{},
167 | "ipns": ipns.Validator{KeyBook: node.Peerstore()},
168 | }))
169 | }
170 | if cfg.DisableProviders {
171 | dhtOpts = append(dhtOpts, dht.DisableProviders())
172 | }
173 |
174 | var providerStore providers.ProviderStore
175 | if cfg.ProviderStoreBuilder == nil {
176 | ps, err := newDefaultProviderStore(ctx, cfg, node)
177 | if err != nil {
178 | return nil, nil, err
179 | }
180 | providerStore = ps
181 | } else {
182 | ps, err := cfg.ProviderStoreBuilder(cfg, node)
183 | if err != nil {
184 | return nil, nil, err
185 | }
186 | providerStore = ps
187 | }
188 |
189 | if !cfg.DisableProvCounts {
190 | periodictasks.RunTasks(ctx, []periodictasks.PeriodicTask{metricstasks.NewProviderRecordsTask(cfg.Datastore, providerStore, providerRecordsTaskInterval)})
191 | }
192 |
193 | var cachingProviderStore *hproviders.CachingProviderStore
194 | if cfg.ProvidersFinder != nil {
195 | cachingProviderStore = hproviders.NewCachingProviderStore(providerStore, providerStore, cfg.ProvidersFinder, nil)
196 | providerStore = cachingProviderStore
197 | }
198 |
199 | dhtOpts = append(dhtOpts, dht.ProviderStore(providerStore))
200 |
201 | dhtNode, err := dht.New(ctx, node, dhtOpts...)
202 | if err != nil {
203 | return nil, nil, fmt.Errorf("failed to instantiate DHT: %w", err)
204 | }
205 |
206 | // if we are using the caching provider store, we need to give it the content router to use (the DHT)
207 | if cachingProviderStore != nil {
208 | cachingProviderStore.Router = dhtNode
209 | }
210 |
211 | // bootstrap in the background
212 | // it's safe to start doing this _before_ establishing any connections
213 | // as we'll trigger a boostrap round as soon as we get a connection anyways.
214 | dhtNode.Bootstrap(ctx)
215 |
216 | bsCh := make(chan BootstrapStatus)
217 | hd := Head{
218 | Host: node,
219 | Datastore: cfg.Datastore,
220 | Routing: dhtNode,
221 | }
222 |
223 | go func() {
224 | // ❓ what is this limiter for?
225 | if cfg.Limiter != nil {
226 | select {
227 | case cfg.Limiter <- struct{}{}:
228 | case <-ctx.Done():
229 | return
230 | }
231 | }
232 |
233 | // Connect to all bootstrappers, and protect them.
234 | if len(cfg.BootstrapPeers) > 0 {
235 | var wg sync.WaitGroup
236 | wg.Add(len(cfg.BootstrapPeers))
237 | for _, addr := range cfg.BootstrapPeers {
238 | go func(addr multiaddr.Multiaddr) {
239 | defer wg.Done()
240 | ai, err := peer.AddrInfoFromP2pAddr(addr)
241 | if err != nil {
242 | select {
243 | case bsCh <- BootstrapStatus{Err: fmt.Errorf("failed to get random bootstrap multiaddr: %w", err)}:
244 | case <-ctx.Done():
245 | }
246 | return
247 | }
248 | if err := node.Connect(context.Background(), *ai); err != nil {
249 | select {
250 | case bsCh <- BootstrapStatus{Err: fmt.Errorf("bootstrap connect failed with error: %w. Trying again", err)}:
251 | case <-ctx.Done():
252 | }
253 | return
254 | }
255 | node.ConnManager().Protect(ai.ID, "bootstrap-peer")
256 | }(addr)
257 | }
258 | wg.Wait()
259 |
260 | if ctx.Err() != nil {
261 | return
262 | }
263 |
264 | select {
265 | case bsCh <- BootstrapStatus{Done: true}:
266 | case <-ctx.Done():
267 | return
268 | }
269 | }
270 |
271 | if cfg.Limiter != nil {
272 | <-cfg.Limiter
273 | }
274 |
275 | close(bsCh)
276 | }()
277 |
278 | return &hd, bsCh, nil
279 | }
280 |
281 | func newDefaultProviderStore(ctx context.Context, options opts.Options, h host.Host) (providers.ProviderStore, error) {
282 | fmt.Fprintf(os.Stderr, "🥞 Using default providerstore\n")
283 | var provMgrOpts []providers.Option
284 | if options.DisableProvGC {
285 | cache, err := simplelru.NewLRUWithExpire(provCacheSize, provCacheExpiry, nil)
286 | if err != nil {
287 | return nil, err
288 | }
289 | provMgrOpts = append(provMgrOpts,
290 | providers.CleanupInterval(provDisabledGCInterval),
291 | providers.Cache(cache),
292 | )
293 | }
294 | var ps providers.ProviderStore
295 | ps, err := providers.NewProviderManager(ctx, h.ID(), h.Peerstore(), options.Datastore, provMgrOpts...)
296 | if err != nil {
297 | return nil, err
298 | }
299 | return ps, nil
300 | }
301 |
302 | // RoutingTable returns the underlying RoutingTable for this head
303 | func (s *Head) RoutingTable() *kbucket.RoutingTable {
304 | dht, _ := s.Routing.(*dht.IpfsDHT)
305 | return dht.RoutingTable()
306 | }
307 |
308 | // AddProvider adds the given provider to the datastore
309 | func (s *Head) AddProvider(ctx context.Context, c cid.Cid, id peer.ID) {
310 | dht, _ := s.Routing.(*dht.IpfsDHT)
311 | dht.ProviderStore().AddProvider(ctx, c.Hash(), peer.AddrInfo{ID: id})
312 | }
313 |
--------------------------------------------------------------------------------
/head/head_test.go:
--------------------------------------------------------------------------------
1 | package head
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "testing"
7 |
8 | "github.com/ipfs/go-datastore"
9 | "github.com/ipfs/go-datastore/sync"
10 | "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
11 | "github.com/libp2p/hydra-booster/head/opts"
12 | hydratesting "github.com/libp2p/hydra-booster/testing"
13 | )
14 |
15 | func TestSpawnHead(t *testing.T) { // TODO spawn a node to bootstrap from so we don't hit the public bootstrappers
16 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
17 | defer cancel()
18 |
19 | _, bsCh, err := NewHead(ctx, opts.Datastore(datastore.NewMapDatastore()))
20 | if err != nil {
21 | t.Fatal(err)
22 | }
23 |
24 | for {
25 | status, ok := <-bsCh
26 | if !ok {
27 | t.Fatal(fmt.Errorf("channel closed before bootstrap complete"))
28 | }
29 | if status.Err != nil {
30 | fmt.Println(status.Err)
31 | }
32 | if status.Done {
33 | break
34 | }
35 | }
36 | }
37 |
38 | func TestSpawnHeadWithDisabledProviderGC(t *testing.T) { // TODO spawn a node to bootstrap from so we don't hit the public bootstrappers
39 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
40 | defer cancel()
41 |
42 | _, bsCh, err := NewHead(
43 | ctx,
44 | opts.Datastore(datastore.NewMapDatastore()),
45 | opts.DisableProvGC(),
46 | )
47 | if err != nil {
48 | t.Fatal(err)
49 | }
50 |
51 | for {
52 | status, ok := <-bsCh
53 | if !ok {
54 | t.Fatal(fmt.Errorf("channel closed before bootstrap complete"))
55 | }
56 | if status.Err != nil {
57 | fmt.Println(status.Err)
58 | }
59 | if status.Done {
60 | break
61 | }
62 | }
63 | }
64 |
65 | func TestSpawnHeadWithCustomProtocolPrefix(t *testing.T) { // TODO spawn a node to bootstrap from so we don't hit the public bootstrappers
66 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
67 | defer cancel()
68 |
69 | _, bsCh, err := NewHead(
70 | ctx,
71 | opts.Datastore(datastore.NewMapDatastore()),
72 | opts.ProtocolPrefix("/myapp"),
73 | opts.DisableProviders(),
74 | opts.DisableValues(),
75 | )
76 | if err != nil {
77 | t.Fatal(err)
78 | }
79 |
80 | for {
81 | status, ok := <-bsCh
82 | if !ok {
83 | t.Fatal(fmt.Errorf("channel closed before bootstrap complete"))
84 | }
85 | if status.Err != nil {
86 | fmt.Println(status.Err)
87 | }
88 | if status.Done {
89 | break
90 | }
91 | }
92 | }
93 |
94 | func TestSpawnHeadWithCustomPeerstore(t *testing.T) {
95 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
96 | defer cancel()
97 |
98 | pstore, err := pstoreds.NewPeerstore(ctx, sync.MutexWrap(datastore.NewMapDatastore()), pstoreds.DefaultOpts())
99 | if err != nil {
100 | t.Fatal(err)
101 | }
102 |
103 | _, bsCh, err := NewHead(
104 | ctx,
105 | opts.Datastore(datastore.NewMapDatastore()),
106 | opts.Peerstore(pstore),
107 | )
108 | if err != nil {
109 | t.Fatal(err)
110 | }
111 |
112 | for {
113 | status, ok := <-bsCh
114 | if !ok {
115 | t.Fatal(fmt.Errorf("channel closed before bootstrap complete"))
116 | }
117 | if status.Err != nil {
118 | fmt.Println(status.Err)
119 | }
120 | if status.Done {
121 | break
122 | }
123 | }
124 | }
125 |
126 | func TestGetRoutingTable(t *testing.T) { // TODO spawn a node to bootstrap from so we don't hit the public bootstrappers
127 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
128 | defer cancel()
129 |
130 | hd, _, err := NewHead(ctx, opts.Datastore(datastore.NewMapDatastore()))
131 | if err != nil {
132 | t.Fatal(err)
133 | }
134 |
135 | hd.RoutingTable()
136 | }
137 |
--------------------------------------------------------------------------------
/head/opts/options.go:
--------------------------------------------------------------------------------
1 | package opts
2 |
3 | import (
4 | "fmt"
5 | "net/http"
6 | "time"
7 |
8 | ds "github.com/ipfs/go-datastore"
9 | dssync "github.com/ipfs/go-datastore/sync"
10 | dht "github.com/libp2p/go-libp2p-kad-dht"
11 | "github.com/libp2p/go-libp2p-kad-dht/providers"
12 | kbucket "github.com/libp2p/go-libp2p-kbucket"
13 | "github.com/libp2p/go-libp2p/core/crypto"
14 | "github.com/libp2p/go-libp2p/core/host"
15 | "github.com/libp2p/go-libp2p/core/peerstore"
16 | "github.com/libp2p/go-libp2p/core/protocol"
17 | hproviders "github.com/libp2p/hydra-booster/providers"
18 | "github.com/multiformats/go-multiaddr"
19 | )
20 |
21 | type ProviderStoreBuilderFunc func(opts Options, host host.Host) (providers.ProviderStore, error)
22 |
23 | // Options are Hydra Head options
24 | type Options struct {
25 | Datastore ds.Batching
26 | Peerstore peerstore.Peerstore
27 | ProviderStoreBuilder ProviderStoreBuilderFunc
28 | DelegateHTTPClient *http.Client
29 | RoutingTable *kbucket.RoutingTable
30 | EnableRelay bool
31 | Addrs []multiaddr.Multiaddr
32 | ProtocolPrefix protocol.ID
33 | BucketSize int
34 | Limiter chan struct{}
35 | BootstrapPeers []multiaddr.Multiaddr
36 | ID crypto.PrivKey
37 | DisableProvGC bool
38 | DisableProvCounts bool
39 | DisableProviders bool
40 | DisableValues bool
41 | ProvidersFinder hproviders.ProvidersFinder
42 | DisableResourceManager bool
43 | ResourceManagerLimitsFile string
44 | ConnMgrHighWater int
45 | ConnMgrLowWater int
46 | ConnMgrGracePeriod time.Duration
47 | }
48 |
49 | // Option is the Hydra Head option type.
50 | type Option func(*Options) error
51 |
52 | // Apply applies the given options to this Option.
53 | func (o *Options) Apply(opts ...Option) error {
54 | for i, opt := range opts {
55 | if err := opt(o); err != nil {
56 | return fmt.Errorf("hydra node option %d failed: %s", i, err)
57 | }
58 | }
59 | return nil
60 | }
61 |
62 | // Defaults are the default Hydra Head options. This option will be automatically
63 | // prepended to any options you pass to the Hydra Head constructor.
64 | var Defaults = func(o *Options) error {
65 | o.Datastore = dssync.MutexWrap(ds.NewMapDatastore())
66 | tcpAddr, _ := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
67 | quicAddr, _ := multiaddr.NewMultiaddr("/ip4/0.0.0.0/udp/0/quic")
68 | o.Addrs = []multiaddr.Multiaddr{tcpAddr, quicAddr}
69 | o.ProtocolPrefix = dht.DefaultPrefix
70 | o.BucketSize = 20
71 | o.BootstrapPeers = dht.DefaultBootstrapPeers
72 | o.ConnMgrHighWater = 1800
73 | o.ConnMgrLowWater = 1200
74 | o.ConnMgrGracePeriod = time.Minute
75 | return nil
76 | }
77 |
78 | // Datastore configures the Hydra Head to use the specified datastore.
79 | // Defaults to an in-memory (temporary) map.
80 | func Datastore(ds ds.Batching) Option {
81 | return func(o *Options) error {
82 | o.Datastore = ds
83 | return nil
84 | }
85 | }
86 |
87 | // Peerstore configures the Hydra Head to use the specified peerstore.
88 | // Defaults to an in-memory (temporary) map.
89 | func Peerstore(ps peerstore.Peerstore) Option {
90 | return func(o *Options) error {
91 | o.Peerstore = ps
92 | return nil
93 | }
94 | }
95 |
96 | func ProviderStoreBuilder(builder func(Options, host.Host) (providers.ProviderStore, error)) Option {
97 | return func(o *Options) error {
98 | o.ProviderStoreBuilder = builder
99 | return nil
100 | }
101 | }
102 |
103 | func DelegateHTTPClient(c *http.Client) Option {
104 | return func(o *Options) error {
105 | o.DelegateHTTPClient = c
106 | return nil
107 | }
108 | }
109 |
110 | // RoutingTable configures the Hydra Head to use the specified routing table.
111 | // Defaults to the routing table provided by IpfsDHT.
112 | func RoutingTable(rt *kbucket.RoutingTable) Option {
113 | return func(o *Options) error {
114 | o.RoutingTable = rt
115 | return nil
116 | }
117 | }
118 |
119 | // EnableRelay configures whether this node acts as a relay node.
120 | // The default value is false.
121 | func EnableRelay() Option {
122 | return func(o *Options) error {
123 | o.EnableRelay = true
124 | return nil
125 | }
126 | }
127 |
128 | // Addrs configures the swarm addresses for this Hydra node.
129 | // The default value is /ip4/0.0.0.0/tcp/0 and /ip4/0.0.0.0/udp/0/quic.
130 | func Addrs(addrs []multiaddr.Multiaddr) Option {
131 | return func(o *Options) error {
132 | o.Addrs = addrs
133 | return nil
134 | }
135 | }
136 |
137 | // ProtocolPrefix configures the application specific prefix attached to all DHT protocols by default.
138 | // The default value is "/ipfs".
139 | func ProtocolPrefix(pfx protocol.ID) Option {
140 | return func(o *Options) error {
141 | if pfx != "" {
142 | o.ProtocolPrefix = pfx
143 | }
144 | return nil
145 | }
146 | }
147 |
148 | // BucketSize configures the bucket size of the routing table.
149 | // The default value is 20.
150 | func BucketSize(bucketSize int) Option {
151 | return func(o *Options) error {
152 | if bucketSize != 0 {
153 | o.BucketSize = bucketSize
154 | }
155 | return nil
156 | }
157 | }
158 |
159 | // Limiter configures ???.
160 | // The default value is nil.
161 | func Limiter(l chan struct{}) Option {
162 | return func(o *Options) error {
163 | o.Limiter = l
164 | return nil
165 | }
166 | }
167 |
168 | // BootstrapPeers configures the set of bootstrap peers that should be randomly selected from.
169 | // The default value is `dht.DefaultBootstrapPeers`.
170 | func BootstrapPeers(addrs []multiaddr.Multiaddr) Option {
171 | return func(o *Options) error {
172 | if len(addrs) > 0 {
173 | o.BootstrapPeers = addrs
174 | }
175 | return nil
176 | }
177 | }
178 |
179 | // ID for the head
180 | func ID(id crypto.PrivKey) Option {
181 | return func(o *Options) error {
182 | if id != nil {
183 | o.ID = id
184 | }
185 | return nil
186 | }
187 | }
188 |
189 | // DisableProvGC disables garbage collections of provider records from the shared datastore.
190 | // The default value is false.
191 | func DisableProvGC() Option {
192 | return func(o *Options) error {
193 | o.DisableProvGC = true
194 | return nil
195 | }
196 | }
197 |
198 | // DisableProviders disables storing and retrieving provider records.
199 | // The default value is false.
200 | func DisableProviders() Option {
201 | return func(o *Options) error {
202 | o.DisableProviders = true
203 | return nil
204 | }
205 | }
206 |
207 | // DisableValues disables storing and retrieving value records (including public keys).
208 | // The default value is false.
209 | func DisableValues() Option {
210 | return func(o *Options) error {
211 | o.DisableValues = true
212 | return nil
213 | }
214 | }
215 |
216 | // DisableProvCounts disables counting the number of providers in the provider store.
217 | func DisableProvCounts() Option {
218 | return func(o *Options) error {
219 | o.DisableProvCounts = true
220 | return nil
221 | }
222 | }
223 |
224 | func ProvidersFinder(f hproviders.ProvidersFinder) Option {
225 | return func(o *Options) error {
226 | o.ProvidersFinder = f
227 | return nil
228 | }
229 | }
230 |
231 | func DisableResourceManager(b bool) Option {
232 | return func(o *Options) error {
233 | o.DisableResourceManager = b
234 | return nil
235 | }
236 | }
237 |
238 | func ResourceManagerLimitsFile(f string) Option {
239 | return func(o *Options) error {
240 | o.ResourceManagerLimitsFile = f
241 | return nil
242 | }
243 | }
244 |
245 | func ConnMgrHighWater(n int) Option {
246 | return func(o *Options) error {
247 | o.ConnMgrHighWater = n
248 | return nil
249 | }
250 | }
251 |
252 | func ConnMgrLowWater(n int) Option {
253 | return func(o *Options) error {
254 | o.ConnMgrLowWater = n
255 | return nil
256 | }
257 | }
258 |
259 | func ConnMgrGracePeriod(n time.Duration) Option {
260 | return func(o *Options) error {
261 | o.ConnMgrGracePeriod = n
262 | return nil
263 | }
264 | }
265 |
--------------------------------------------------------------------------------
/head/testing.go:
--------------------------------------------------------------------------------
1 | package head
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/ipfs/go-datastore"
8 | "github.com/libp2p/hydra-booster/head/opts"
9 | )
10 |
11 | // SpawnHead creates a new Hydra head with an in memory datastore and 0 bootstrap peers by default.
12 | // It also waits for bootstrapping to complete.
13 | func SpawnTestHead(ctx context.Context, options ...opts.Option) (*Head, error) {
14 | defaults := []opts.Option{
15 | opts.Datastore(datastore.NewMapDatastore()),
16 | opts.BootstrapPeers(nil),
17 | }
18 | hd, bsCh, err := NewHead(ctx, append(defaults, options...)...)
19 | if err != nil {
20 | return nil, err
21 | }
22 |
23 | for {
24 | status, ok := <-bsCh
25 | if !ok {
26 | break
27 | }
28 | if status.Err != nil {
29 | fmt.Println(status.Err)
30 | }
31 | }
32 |
33 | return hd, nil
34 | }
35 |
36 | // SpawnHeads creates n new Hydra nodes with an in memory datastore and 0 bootstrap peers by default
37 | func SpawnTestHeads(ctx context.Context, n int, options ...opts.Option) ([]*Head, error) {
38 | var hds []*Head
39 | for i := 0; i < n; i++ {
40 | hd, err := SpawnTestHead(ctx, options...)
41 | if err != nil {
42 | for _, nd := range hds {
43 | nd.Host.Close()
44 | }
45 | return nil, err
46 | }
47 | hds = append(hds, hd)
48 | }
49 |
50 | return hds, nil
51 | }
52 |
--------------------------------------------------------------------------------
/httpapi/httpapi.go:
--------------------------------------------------------------------------------
1 | package httpapi
2 |
3 | import (
4 | "encoding/base64"
5 | "encoding/json"
6 | "errors"
7 | "fmt"
8 | "net"
9 | "net/http"
10 | "strconv"
11 | "time"
12 |
13 | "github.com/gorilla/mux"
14 | "github.com/multiformats/go-multiaddr"
15 |
16 | cid "github.com/ipfs/go-cid"
17 | dsq "github.com/ipfs/go-datastore/query"
18 | "github.com/libp2p/go-libp2p-kad-dht/providers"
19 | "github.com/libp2p/go-libp2p/core/crypto"
20 | "github.com/libp2p/go-libp2p/core/network"
21 | "github.com/libp2p/go-libp2p/core/peer"
22 | "github.com/libp2p/hydra-booster/hydra"
23 | "github.com/libp2p/hydra-booster/idgen"
24 | )
25 |
26 | // ListenAndServe instructs a Hydra HTTP API server to listen and serve on the passed address
27 | func ListenAndServe(hy *hydra.Hydra, addr string) error {
28 | srv := &http.Server{
29 | Addr: addr,
30 | // Good practice to set timeouts to avoid Slowloris attacks.
31 | WriteTimeout: time.Second * 60,
32 | ReadTimeout: time.Second * 60,
33 | IdleTimeout: time.Second * 60,
34 | Handler: NewRouter(hy),
35 | }
36 | return srv.ListenAndServe()
37 | }
38 |
39 | // NewRouter creates a new Hydra Booster HTTP API Gorilla Mux
40 | func NewRouter(hy *hydra.Hydra) *mux.Router {
41 | mux := mux.NewRouter()
42 | mux.HandleFunc("/heads", headsHandler(hy))
43 | mux.HandleFunc("/records/fetch/{key}", recordFetchHandler(hy))
44 | mux.HandleFunc("/records/list", recordListHandler(hy))
45 | mux.HandleFunc("/idgen/add", idgenAddHandler()).Methods("POST")
46 | mux.HandleFunc("/idgen/remove", idgenRemoveHandler()).Methods("POST")
47 | mux.HandleFunc("/swarm/peers", swarmPeersHandler(hy))
48 | mux.HandleFunc("/pstore/list", pstoreListHandler(hy))
49 | return mux
50 | }
51 |
52 | // "/heads" Get the peers created by hydra booster (ndjson)
53 | func headsHandler(hy *hydra.Hydra) func(http.ResponseWriter, *http.Request) {
54 | return func(w http.ResponseWriter, r *http.Request) {
55 | enc := json.NewEncoder(w)
56 |
57 | for _, hd := range hy.Heads {
58 | enc.Encode(peer.AddrInfo{
59 | ID: hd.Host.ID(),
60 | Addrs: hd.Host.Addrs(),
61 | })
62 | }
63 | }
64 | }
65 |
66 | // "/records/fetch" Receive a record and fetch it from the network, if available
67 | func recordFetchHandler(hy *hydra.Hydra) func(http.ResponseWriter, *http.Request) {
68 | return func(w http.ResponseWriter, r *http.Request) {
69 | vars := mux.Vars(r)
70 | cidStr := vars["key"]
71 | cid, err := cid.Decode(cidStr)
72 | if err != nil {
73 | fmt.Printf("Received invalid CID, got %s\n", cidStr)
74 | w.WriteHeader(http.StatusBadRequest)
75 | return
76 | }
77 |
78 | first := true
79 | nProviders := 1
80 | nProvidersStr := r.FormValue("nProviders")
81 | if nProvidersStr != "" {
82 | nProviders, err = strconv.Atoi(nProvidersStr)
83 | if err != nil {
84 | fmt.Printf("Received invalid nProviders, got %s\n", nProvidersStr)
85 | w.WriteHeader(http.StatusBadRequest)
86 | return
87 | }
88 | }
89 | enc := json.NewEncoder(w)
90 | ctx := r.Context()
91 | for peerAddrInfo := range hy.Heads[0].Routing.FindProvidersAsync(ctx, cid, nProviders) {
92 | // fmt.Printf("Got one provider %s\n", peerAddrInfo.String())
93 | // Store the Provider locally
94 | hy.Heads[0].AddProvider(ctx, cid, peerAddrInfo.ID)
95 | if first {
96 | first = false
97 | }
98 | enc.Encode(peerAddrInfo)
99 | }
100 | if first {
101 | w.WriteHeader(http.StatusNotFound)
102 | return
103 | }
104 | }
105 | }
106 |
107 | // "/records/list" Receive a record and fetch it from the network, if available
108 | func recordListHandler(hy *hydra.Hydra) func(http.ResponseWriter, *http.Request) {
109 | return func(w http.ResponseWriter, r *http.Request) {
110 | // TODO Improve this handler once ProvideManager gets exposed
111 | // https://discuss.libp2p.io/t/list-provider-records/450
112 | // for now, enumerate the Provider Records in the datastore
113 | ds := hy.SharedDatastore
114 | results, err := ds.Query(r.Context(), dsq.Query{Prefix: providers.ProvidersKeyPrefix})
115 | if err != nil {
116 | fmt.Printf("Error on retrieving provider records: %s\n", err)
117 | w.WriteHeader(500)
118 | return
119 | }
120 |
121 | enc := json.NewEncoder(w)
122 |
123 | for result := range results.Next() {
124 | enc.Encode(result.Entry)
125 | }
126 | results.Close()
127 | }
128 | }
129 |
130 | type PeerInfo struct {
131 | AddrInfo peer.AddrInfo // separate field to allow proper (un)marshalling of multi addresses
132 | HeadID peer.ID
133 | AgentVersion string
134 | Protocols []string
135 | }
136 |
137 | // "/pstore/list" lists all peer information the hydra heads store in their peer stores.
138 | func pstoreListHandler(hy *hydra.Hydra) func(http.ResponseWriter, *http.Request) {
139 | return func(w http.ResponseWriter, r *http.Request) {
140 | enc := json.NewEncoder(w)
141 | for _, head := range hy.Heads {
142 | ps := head.Host.Peerstore()
143 | for _, p := range ps.Peers() {
144 | // get address information
145 | pi := ps.PeerInfo(p)
146 |
147 | // get all protocols
148 | protocols, err := ps.GetProtocols(p)
149 | if err != nil {
150 | fmt.Printf("error getting protocols: %s\n", err)
151 | protocols = []string{}
152 | }
153 |
154 | // get agent version
155 | var agentVersion string
156 | if agent, err := ps.Get(p, "AgentVersion"); err == nil {
157 | agentVersion = agent.(string)
158 | }
159 |
160 | // marshal and send response to client
161 | err = enc.Encode(PeerInfo{
162 | AddrInfo: pi,
163 | HeadID: head.Host.ID(),
164 | AgentVersion: agentVersion,
165 | Protocols: protocols,
166 | })
167 | if err != nil {
168 | fmt.Printf("error encoding peer info: %s\n", err)
169 | var netErr net.Error
170 | if errors.As(err, &netErr) {
171 | return
172 | }
173 | }
174 | }
175 | }
176 | }
177 | }
178 |
179 | func idgenAddHandler() func(http.ResponseWriter, *http.Request) {
180 | return func(w http.ResponseWriter, r *http.Request) {
181 | pk, err := idgen.HydraIdentityGenerator.AddBalanced()
182 | if err != nil {
183 | fmt.Println(fmt.Errorf("failed to generate Peer ID: %w", err))
184 | w.WriteHeader(http.StatusInternalServerError)
185 | return
186 | }
187 |
188 | b, err := crypto.MarshalPrivateKey(pk)
189 | if err != nil {
190 | fmt.Println(fmt.Errorf("failed to extract private key bytes: %w", err))
191 | w.WriteHeader(http.StatusInternalServerError)
192 | return
193 | }
194 |
195 | enc := json.NewEncoder(w)
196 | enc.Encode(base64.StdEncoding.EncodeToString(b))
197 | }
198 | }
199 |
200 | func idgenRemoveHandler() func(http.ResponseWriter, *http.Request) {
201 | return func(w http.ResponseWriter, r *http.Request) {
202 | dec := json.NewDecoder(r.Body)
203 | var b64 string
204 | if err := dec.Decode(&b64); err != nil {
205 | w.WriteHeader(http.StatusBadRequest)
206 | return
207 | }
208 |
209 | bytes, err := base64.StdEncoding.DecodeString(b64)
210 | if err != nil {
211 | w.WriteHeader(http.StatusBadRequest)
212 | return
213 | }
214 |
215 | pk, err := crypto.UnmarshalPrivateKey(bytes)
216 | if err != nil {
217 | w.WriteHeader(http.StatusBadRequest)
218 | return
219 | }
220 |
221 | err = idgen.HydraIdentityGenerator.Remove(pk)
222 | if err != nil {
223 | fmt.Println(fmt.Errorf("failed to remove private key: %w", err))
224 | w.WriteHeader(http.StatusInternalServerError)
225 | return
226 | }
227 |
228 | w.WriteHeader(http.StatusNoContent)
229 | }
230 | }
231 |
232 | type swarmPeersPeer struct {
233 | ID peer.ID
234 | Addr multiaddr.Multiaddr
235 | Direction network.Direction
236 | }
237 | type swarmPeersHostPeer struct {
238 | ID peer.ID
239 | Peer swarmPeersPeer
240 | }
241 |
242 | // "/swarm/peers[?head=]" Get the peers with open connections optionally filtered by hydra head (ndjson)
243 | func swarmPeersHandler(hy *hydra.Hydra) func(http.ResponseWriter, *http.Request) {
244 | return func(w http.ResponseWriter, r *http.Request) {
245 | headID := r.FormValue("head")
246 |
247 | enc := json.NewEncoder(w)
248 |
249 | for _, hd := range hy.Heads {
250 | if headID != "" && headID != hd.Host.ID().String() {
251 | continue
252 | }
253 |
254 | for _, c := range hd.Host.Network().Conns() {
255 | enc.Encode(swarmPeersHostPeer{
256 | ID: hd.Host.ID(),
257 | Peer: swarmPeersPeer{
258 | ID: c.RemotePeer(),
259 | Addr: c.RemoteMultiaddr(),
260 | Direction: c.Stat().Direction,
261 | },
262 | })
263 | }
264 |
265 | }
266 | }
267 | }
268 |
--------------------------------------------------------------------------------
/httpapi/httpapi_test.go:
--------------------------------------------------------------------------------
1 | package httpapi
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/base64"
7 | "encoding/json"
8 | "fmt"
9 | "net"
10 | "net/http"
11 | "testing"
12 |
13 | "github.com/ipfs/go-cid"
14 | dsq "github.com/ipfs/go-datastore/query"
15 | "github.com/libp2p/go-libp2p/core/crypto"
16 | "github.com/libp2p/go-libp2p/core/peer"
17 | "github.com/libp2p/hydra-booster/head"
18 | "github.com/libp2p/hydra-booster/hydra"
19 | "github.com/libp2p/hydra-booster/idgen"
20 | hydratesting "github.com/libp2p/hydra-booster/testing"
21 | )
22 |
23 | func TestHTTPAPIHeads(t *testing.T) {
24 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
25 | defer cancel()
26 |
27 | hds, err := head.SpawnTestHeads(ctx, 2)
28 | if err != nil {
29 | t.Fatal(err)
30 | }
31 |
32 | listener, err := net.Listen("tcp", ":0")
33 | if err != nil {
34 | t.Fatal(err)
35 | }
36 |
37 | go http.Serve(listener, NewRouter(&hydra.Hydra{Heads: hds}))
38 | defer listener.Close()
39 |
40 | url := fmt.Sprintf("http://%s/heads", listener.Addr().String())
41 | res, err := http.Get(url)
42 | if err != nil {
43 | t.Fatal(err)
44 | }
45 | if res.StatusCode < 200 || res.StatusCode > 299 {
46 | t.Fatal(fmt.Errorf("got non-2XX status code %d: %s", res.StatusCode, url))
47 | }
48 |
49 | dec := json.NewDecoder(res.Body)
50 | ais := []peer.AddrInfo{}
51 |
52 | for {
53 | var ai peer.AddrInfo
54 | if err := dec.Decode(&ai); err != nil {
55 | break
56 | }
57 | ais = append(ais, ai)
58 | }
59 |
60 | for _, ai := range ais {
61 | found := false
62 | for _, hd := range hds {
63 | if ai.ID == hd.Host.ID() {
64 | found = true
65 | break
66 | }
67 | }
68 | if !found {
69 | t.Fatal(fmt.Errorf("%s not found in spawned node peer IDs", ai.ID))
70 | }
71 | }
72 | }
73 |
74 | func TestHTTPAPIRecordsListWithoutRecords(t *testing.T) {
75 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
76 | defer cancel()
77 |
78 | hds, err := head.SpawnTestHeads(ctx, 1)
79 | if err != nil {
80 | t.Fatal(err)
81 | }
82 |
83 | listener, err := net.Listen("tcp", ":0")
84 | if err != nil {
85 | t.Fatal(err)
86 | }
87 |
88 | go http.Serve(listener, NewRouter(&hydra.Hydra{Heads: hds, SharedDatastore: hds[0].Datastore}))
89 | defer listener.Close()
90 |
91 | url := fmt.Sprintf("http://%s/records/list", listener.Addr().String())
92 | res, err := http.Get(url)
93 | if err != nil {
94 | t.Fatal(err)
95 | }
96 | if res.StatusCode < 200 || res.StatusCode > 299 {
97 | t.Fatal(fmt.Errorf("got non-2XX status code %d: %s", res.StatusCode, url))
98 | }
99 |
100 | dec := json.NewDecoder(res.Body)
101 | entries := []dsq.Entry{}
102 |
103 | for {
104 | var e dsq.Entry
105 | if err := dec.Decode(&e); err != nil {
106 | break
107 | }
108 | entries = append(entries, e)
109 | }
110 |
111 | if len(entries) > 0 {
112 | t.Fatal(fmt.Errorf("Expected to have 0 records stored, found %d", len(entries)))
113 | }
114 | }
115 |
116 | func TestHTTPAPIRecordsFetch(t *testing.T) {
117 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
118 | defer cancel()
119 |
120 | hds, err := head.SpawnTestHeads(ctx, 1)
121 | if err != nil {
122 | t.Fatal(err)
123 | }
124 |
125 | listener, err := net.Listen("tcp", ":0")
126 | if err != nil {
127 | t.Fatal(err)
128 | }
129 |
130 | go http.Serve(listener, NewRouter(&hydra.Hydra{Heads: hds, SharedDatastore: hds[0].Datastore}))
131 | defer listener.Close()
132 |
133 | cidStr := "QmVBEq6nnXQR2Ueb6etMFMUVhGM5vu34Y2KfHW5FVdGFok"
134 | cid, err := cid.Decode(cidStr)
135 | if err != nil {
136 | t.Fatal(err)
137 | }
138 |
139 | // Add the provider as itself for the test
140 | // In an ideal testing scenario, we would spawn multiple nodes and see that they can indeed
141 | // fetch from each other
142 | hds[0].AddProvider(ctx, cid, hds[0].Host.ID())
143 |
144 | // Valid CID
145 | url := fmt.Sprintf("http://%s/records/fetch/%s", listener.Addr().String(), cidStr)
146 | res, err := http.Get(url)
147 | if err != nil {
148 | t.Fatal(err)
149 | }
150 |
151 | if res.StatusCode < 200 || res.StatusCode > 299 {
152 | t.Fatal(fmt.Errorf("got non-2XX status code %d: %s", res.StatusCode, url))
153 | }
154 |
155 | dec := json.NewDecoder(res.Body)
156 | entries := []peer.AddrInfo{}
157 |
158 | for {
159 | var e peer.AddrInfo
160 | if err := dec.Decode(&e); err != nil {
161 | break
162 | }
163 | entries = append(entries, e)
164 | }
165 |
166 | // We can ensure how many we will get as we are testing this with live network
167 | if len(entries) < 1 {
168 | t.Fatal(fmt.Errorf("Expected to found 1 or more records, found %d", len(entries)))
169 | }
170 |
171 | // Valid with queryString
172 | url = fmt.Sprintf("http://%s/records/fetch/%s?nProviders=2", listener.Addr().String(), cidStr)
173 | res, err = http.Get(url)
174 | if err != nil {
175 | t.Fatal(err)
176 | }
177 | if res.StatusCode < 200 || res.StatusCode > 299 {
178 | t.Fatal(fmt.Errorf("got non-2XX status code %d: %s", res.StatusCode, url))
179 | }
180 |
181 | dec = json.NewDecoder(res.Body)
182 | entries = []peer.AddrInfo{}
183 |
184 | for {
185 | var e peer.AddrInfo
186 | if err := dec.Decode(&e); err != nil {
187 | break
188 | }
189 | entries = append(entries, e)
190 | }
191 |
192 | // We can ensure how many we will get as we are testing this with live network
193 | if len(entries) < 1 {
194 | t.Fatal(fmt.Errorf("Expected to found 1 or more records, found %d", len(entries)))
195 | }
196 | }
197 |
198 | func TestHTTPAPIRecordsFetchErrorStates(t *testing.T) {
199 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
200 | defer cancel()
201 |
202 | hds, err := head.SpawnTestHeads(ctx, 1)
203 | if err != nil {
204 | t.Fatal(err)
205 | }
206 |
207 | listener, err := net.Listen("tcp", ":0")
208 | if err != nil {
209 | t.Fatal(err)
210 | }
211 |
212 | go http.Serve(listener, NewRouter(&hydra.Hydra{Heads: hds, SharedDatastore: hds[0].Datastore}))
213 | defer listener.Close()
214 |
215 | // Missing CID
216 | url := fmt.Sprintf("http://%s/records/fetch", listener.Addr().String())
217 | res, err := http.Get(url)
218 | if err != nil {
219 | t.Fatal(err)
220 | }
221 | if res.StatusCode != 404 {
222 | t.Fatal(fmt.Errorf("Should have got a 404, got %d: %s", res.StatusCode, url))
223 | }
224 |
225 | // Malformed CID
226 | url = fmt.Sprintf("http://%s/records/fetch/notacid", listener.Addr().String())
227 | res, err = http.Get(url)
228 | if err != nil {
229 | t.Fatal(err)
230 | }
231 | if res.StatusCode != 400 {
232 | t.Fatal(fmt.Errorf("Should have got a 400, got %d: %s", res.StatusCode, url))
233 | }
234 |
235 | // Malformed queryString
236 | url = fmt.Sprintf("http://%s/records/fetch/notacid?nProviders=bananas", listener.Addr().String())
237 | res, err = http.Get(url)
238 | if err != nil {
239 | t.Fatal(err)
240 | }
241 | if res.StatusCode != 400 {
242 | t.Fatal(fmt.Errorf("Should have got a 400, got %d: %s", res.StatusCode, url))
243 | }
244 | }
245 |
246 | func TestHTTPAPIPStoreList(t *testing.T) {
247 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
248 | defer cancel()
249 |
250 | hds, err := head.SpawnTestHeads(ctx, 1)
251 | if err != nil {
252 | t.Fatal(err)
253 | }
254 |
255 | listener, err := net.Listen("tcp", ":0")
256 | if err != nil {
257 | t.Fatal(err)
258 | }
259 |
260 | go http.Serve(listener, NewRouter(&hydra.Hydra{Heads: hds, SharedDatastore: hds[0].Datastore}))
261 | defer listener.Close()
262 |
263 | url := fmt.Sprintf("http://%s/pstore/list", listener.Addr().String())
264 | res, err := http.Get(url)
265 | if err != nil {
266 | t.Fatal(err)
267 | }
268 | if res.StatusCode < 200 || res.StatusCode > 299 {
269 | t.Fatal(fmt.Errorf("got non-2XX status code %d: %s", res.StatusCode, url))
270 | }
271 |
272 | dec := json.NewDecoder(res.Body)
273 |
274 | var peerInfos []PeerInfo
275 | for {
276 | var pi PeerInfo
277 | if err := dec.Decode(&pi); err != nil {
278 | break
279 | }
280 | peerInfos = append(peerInfos, pi)
281 | }
282 |
283 | if len(peerInfos) == 0 {
284 | t.Fatalf("Expected to have more than 0 peer records stored, found %d", len(peerInfos))
285 | }
286 | }
287 |
288 | func TestIDGeneratorAdd(t *testing.T) {
289 | listener, err := net.Listen("tcp", ":0")
290 | if err != nil {
291 | t.Fatal(err)
292 | }
293 |
294 | go http.Serve(listener, NewRouter(nil))
295 | defer listener.Close()
296 |
297 | url := fmt.Sprintf("http://%s/idgen/add", listener.Addr().String())
298 | res, err := http.Post(url, "application/json", nil)
299 | if err != nil {
300 | t.Fatal(err)
301 | }
302 | if res.StatusCode != 200 {
303 | t.Fatal(fmt.Errorf("unexpected status %d", res.StatusCode))
304 | }
305 |
306 | dec := json.NewDecoder(res.Body)
307 | var b64 string
308 | if err := dec.Decode(&b64); err != nil {
309 | t.Fatal(err)
310 | }
311 |
312 | bytes, err := base64.StdEncoding.DecodeString(b64)
313 | if err != nil {
314 | t.Fatal(err)
315 | }
316 |
317 | _, err = crypto.UnmarshalPrivateKey(bytes)
318 | if err != nil {
319 | t.Fatal(err)
320 | }
321 | }
322 |
323 | func TestIDGeneratorRemove(t *testing.T) {
324 | listener, err := net.Listen("tcp", ":0")
325 | if err != nil {
326 | t.Fatal(err)
327 | }
328 |
329 | go http.Serve(listener, NewRouter(nil))
330 | defer listener.Close()
331 |
332 | pk, err := idgen.HydraIdentityGenerator.AddBalanced()
333 | if err != nil {
334 | t.Fatal(err)
335 | }
336 |
337 | b, err := crypto.MarshalPrivateKey(pk)
338 | if err != nil {
339 | t.Fatal(err)
340 | }
341 |
342 | data, err := json.Marshal(base64.StdEncoding.EncodeToString(b))
343 | if err != nil {
344 | t.Fatal(err)
345 | }
346 |
347 | url := fmt.Sprintf("http://%s/idgen/remove", listener.Addr().String())
348 | res, err := http.Post(url, "application/json", bytes.NewReader(data))
349 | if err != nil {
350 | t.Fatal(err)
351 | }
352 | if res.StatusCode != 204 {
353 | t.Fatal(fmt.Errorf("unexpected status %d", res.StatusCode))
354 | }
355 | }
356 |
357 | func TestIDGeneratorRemoveInvalidJSON(t *testing.T) {
358 | listener, err := net.Listen("tcp", ":0")
359 | if err != nil {
360 | t.Fatal(err)
361 | }
362 |
363 | go http.Serve(listener, NewRouter(nil))
364 | defer listener.Close()
365 |
366 | url := fmt.Sprintf("http://%s/idgen/remove", listener.Addr().String())
367 | res, err := http.Post(url, "application/json", bytes.NewReader([]byte("{{")))
368 | if err != nil {
369 | t.Fatal(err)
370 | }
371 | if res.StatusCode != 400 {
372 | t.Fatal(fmt.Errorf("unexpected status %d", res.StatusCode))
373 | }
374 | }
375 |
376 | func TestIDGeneratorRemoveInvalidBase64(t *testing.T) {
377 | listener, err := net.Listen("tcp", ":0")
378 | if err != nil {
379 | t.Fatal(err)
380 | }
381 |
382 | go http.Serve(listener, NewRouter(nil))
383 | defer listener.Close()
384 |
385 | url := fmt.Sprintf("http://%s/idgen/remove", listener.Addr().String())
386 | res, err := http.Post(url, "application/json", bytes.NewReader([]byte("\"! invalid b64 !\"")))
387 | if err != nil {
388 | t.Fatal(err)
389 | }
390 | if res.StatusCode != 400 {
391 | t.Fatal(fmt.Errorf("unexpected status %d", res.StatusCode))
392 | }
393 | }
394 |
395 | func TestIDGeneratorRemoveInvalidPrivateKey(t *testing.T) {
396 | listener, err := net.Listen("tcp", ":0")
397 | if err != nil {
398 | t.Fatal(err)
399 | }
400 |
401 | go http.Serve(listener, NewRouter(nil))
402 | defer listener.Close()
403 |
404 | data, err := json.Marshal(base64.StdEncoding.EncodeToString([]byte("invalid private key")))
405 | if err != nil {
406 | t.Fatal(err)
407 | }
408 |
409 | url := fmt.Sprintf("http://%s/idgen/remove", listener.Addr().String())
410 | res, err := http.Post(url, "application/json", bytes.NewReader(data))
411 | if err != nil {
412 | t.Fatal(err)
413 | }
414 | if res.StatusCode != 400 {
415 | t.Fatal(fmt.Errorf("unexpected status %d", res.StatusCode))
416 | }
417 | }
418 |
419 | type hostPeer struct {
420 | ID peer.ID
421 | Peer struct {
422 | ID peer.ID
423 | Addr string
424 | Direction int
425 | }
426 | }
427 |
428 | func TestHTTPAPISwarmPeers(t *testing.T) {
429 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
430 | defer cancel()
431 |
432 | hds, err := head.SpawnTestHeads(ctx, 2)
433 | if err != nil {
434 | t.Fatal(err)
435 | }
436 |
437 | listener, err := net.Listen("tcp", ":0")
438 | if err != nil {
439 | t.Fatal(err)
440 | }
441 |
442 | go http.Serve(listener, NewRouter(&hydra.Hydra{Heads: hds}))
443 | defer listener.Close()
444 |
445 | err = hds[0].Host.Connect(ctx, peer.AddrInfo{
446 | ID: hds[1].Host.ID(),
447 | Addrs: hds[1].Host.Addrs(),
448 | })
449 | if err != nil {
450 | t.Fatal(err)
451 | }
452 |
453 | url := fmt.Sprintf("http://%s/swarm/peers", listener.Addr().String())
454 | res, err := http.Get(url)
455 | if err != nil {
456 | t.Fatal(err)
457 | }
458 | if res.StatusCode < 200 || res.StatusCode > 299 {
459 | t.Fatal(fmt.Errorf("got non-2XX status code %d: %s", res.StatusCode, url))
460 | }
461 |
462 | dec := json.NewDecoder(res.Body)
463 | hps := []hostPeer{}
464 |
465 | for {
466 | var hp hostPeer
467 | if err := dec.Decode(&hp); err != nil {
468 | break
469 | }
470 | hps = append(hps, hp)
471 | }
472 |
473 | found := false
474 | for _, hp := range hps {
475 | if hp.Peer.ID == hds[1].Host.ID() {
476 | found = true
477 | break
478 | }
479 | }
480 |
481 | if !found {
482 | t.Fatal(fmt.Errorf("head %s not in peer list", hds[1].Host.ID()))
483 | }
484 | }
485 |
486 | func TestHTTPAPISwarmPeersHeadFilter(t *testing.T) {
487 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
488 | defer cancel()
489 |
490 | hds, err := head.SpawnTestHeads(ctx, 2)
491 | if err != nil {
492 | t.Fatal(err)
493 | }
494 |
495 | listener, err := net.Listen("tcp", ":0")
496 | if err != nil {
497 | t.Fatal(err)
498 | }
499 |
500 | go http.Serve(listener, NewRouter(&hydra.Hydra{Heads: hds}))
501 | defer listener.Close()
502 |
503 | err = hds[0].Host.Connect(ctx, peer.AddrInfo{
504 | ID: hds[1].Host.ID(),
505 | Addrs: hds[1].Host.Addrs(),
506 | })
507 | if err != nil {
508 | t.Fatal(err)
509 | }
510 |
511 | url := fmt.Sprintf("http://%s/swarm/peers?head=%s", listener.Addr().String(), hds[0].Host.ID())
512 | res, err := http.Get(url)
513 | if err != nil {
514 | t.Fatal(err)
515 | }
516 | if res.StatusCode < 200 || res.StatusCode > 299 {
517 | t.Fatal(fmt.Errorf("got non-2XX status code %d: %s", res.StatusCode, url))
518 | }
519 |
520 | dec := json.NewDecoder(res.Body)
521 | hps := []hostPeer{}
522 |
523 | for {
524 | var hp hostPeer
525 | if err := dec.Decode(&hp); err != nil {
526 | break
527 | }
528 | hps = append(hps, hp)
529 | }
530 |
531 | for _, hp := range hps {
532 | if hp.ID != hds[0].Host.ID() {
533 | t.Fatal(fmt.Errorf("unexpectedly found head %s in peer list", hp.ID))
534 | }
535 | }
536 | }
537 |
--------------------------------------------------------------------------------
/hydra/hydra.go:
--------------------------------------------------------------------------------
1 | package hydra
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "net/http"
8 | "os"
9 | "strconv"
10 | "strings"
11 | "sync"
12 | "time"
13 |
14 | "github.com/aws/aws-sdk-go-v2/aws"
15 | "github.com/aws/aws-sdk-go-v2/aws/retry"
16 | "github.com/aws/aws-sdk-go-v2/config"
17 | "github.com/aws/aws-sdk-go-v2/service/dynamodb"
18 | "github.com/aws/aws-sdk-go/aws/session"
19 | ddbv1 "github.com/aws/aws-sdk-go/service/dynamodb"
20 | "github.com/axiomhq/hyperloglog"
21 | "github.com/ipfs/go-datastore"
22 | ddbds "github.com/ipfs/go-ds-dynamodb"
23 | leveldb "github.com/ipfs/go-ds-leveldb"
24 | "github.com/ipfs/go-libipfs/routing/http/client"
25 | "github.com/libp2p/go-libp2p-kad-dht/providers"
26 | "github.com/libp2p/go-libp2p/core/host"
27 | "github.com/libp2p/go-libp2p/core/network"
28 | "github.com/libp2p/go-libp2p/core/protocol"
29 | "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
30 | hyds "github.com/libp2p/hydra-booster/datastore"
31 | "github.com/libp2p/hydra-booster/head"
32 | "github.com/libp2p/hydra-booster/head/opts"
33 | "github.com/libp2p/hydra-booster/idgen"
34 | "github.com/libp2p/hydra-booster/metrics"
35 | "github.com/libp2p/hydra-booster/metricstasks"
36 | "github.com/libp2p/hydra-booster/periodictasks"
37 | hproviders "github.com/libp2p/hydra-booster/providers"
38 | "github.com/libp2p/hydra-booster/utils"
39 | "github.com/multiformats/go-multiaddr"
40 | "go.opencensus.io/stats"
41 | "go.opencensus.io/tag"
42 | )
43 |
44 | // Default intervals between periodic task runs, more cpu/memory intensive tasks are run less frequently
45 | // TODO: expose these as command line options?
46 | const (
47 | routingTableSizeTaskInterval = 5 * time.Second
48 | uniquePeersTaskInterval = 5 * time.Second
49 | ipnsRecordsTaskInterval = 15 * time.Minute
50 | )
51 |
52 | // Hydra is a container for heads and their shared belly bits.
53 | type Hydra struct {
54 | Heads []*head.Head
55 | SharedDatastore datastore.Datastore
56 | // SharedRoutingTable *kbucket.RoutingTable
57 |
58 | hyperLock *sync.Mutex
59 | hyperlog *hyperloglog.Sketch
60 | }
61 |
62 | // Options are configuration for a new hydra.
63 | type Options struct {
64 | Name string
65 | DatastorePath string
66 | PeerstorePath string
67 | ProviderStore string
68 | DelegateTimeout time.Duration
69 | GetPort func() int
70 | NHeads int
71 | ProtocolPrefix protocol.ID
72 | BucketSize int
73 | BsCon int
74 | EnableRelay bool
75 | Stagger time.Duration
76 | IDGenerator idgen.IdentityGenerator
77 | DisableProvGC bool
78 | DisableProviders bool
79 | DisableValues bool
80 | BootstrapPeers []multiaddr.Multiaddr
81 | DisablePrefetch bool
82 | DisableProvCounts bool
83 | DisableDBCreate bool
84 | DisableResourceManager bool
85 | ResourceManagerLimitsFile string
86 | ConnMgrHighWater int
87 | ConnMgrLowWater int
88 | ConnMgrGracePeriod time.Duration
89 | }
90 |
91 | // NewHydra creates a new Hydra with the passed options.
92 | func NewHydra(ctx context.Context, options Options) (*Hydra, error) {
93 | if options.Name != "" {
94 | nctx, err := tag.New(ctx, tag.Insert(metrics.KeyName, options.Name))
95 | if err != nil {
96 | return nil, err
97 | }
98 | ctx = nctx
99 | }
100 |
101 | var ds datastore.Batching
102 | var err error
103 | if strings.HasPrefix(options.DatastorePath, "postgresql://") {
104 | fmt.Fprintf(os.Stderr, "🐘 Using PostgreSQL datastore\n")
105 | ds, err = hyds.NewPostgreSQLDatastore(ctx, options.DatastorePath, !options.DisableDBCreate)
106 | } else if strings.HasPrefix(options.DatastorePath, "dynamodb://") {
107 | optsStr := strings.TrimPrefix(options.DatastorePath, "dynamodb://")
108 | table, err := parseDDBTable(optsStr)
109 | if err != nil {
110 | return nil, err
111 | }
112 | fmt.Fprintf(os.Stderr, "Using DynamoDB datastore with table '%s'\n", table)
113 | ddbClient := ddbv1.New(session.Must(session.NewSession()))
114 | ddbDS := ddbds.New(ddbClient, table, ddbds.WithScanParallelism(5))
115 | ds = ddbDS
116 | periodictasks.RunTasks(ctx, []periodictasks.PeriodicTask{metricstasks.NewIPNSRecordsTask(ddbDS, ipnsRecordsTaskInterval)})
117 | } else {
118 | fmt.Fprintf(os.Stderr, "🥞 Using LevelDB datastore\n")
119 | ds, err = leveldb.NewDatastore(options.DatastorePath, nil)
120 | }
121 | if err != nil {
122 | return nil, fmt.Errorf("failed to create datastore: %w", err)
123 | }
124 |
125 | var hds []*head.Head
126 |
127 | if options.PeerstorePath == "" {
128 | fmt.Fprintf(os.Stderr, "💭 Using in-memory peerstore\n")
129 | } else {
130 | fmt.Fprintf(os.Stderr, "🥞 Using LevelDB peerstore (EXPERIMENTAL)\n")
131 | }
132 |
133 | if options.IDGenerator == nil {
134 | options.IDGenerator = idgen.HydraIdentityGenerator
135 | }
136 | fmt.Fprintf(os.Stderr, "🐲 Spawning %d heads: \n", options.NHeads)
137 |
138 | var hyperLock sync.Mutex
139 | hyperlog := hyperloglog.New()
140 |
141 | // What is a limiter?
142 | limiter := make(chan struct{}, options.BsCon)
143 |
144 | // Increase per-host connection pool since we are making lots of concurrent requests to a small number of hosts.
145 | transport := http.DefaultTransport.(*http.Transport).Clone()
146 | transport.MaxIdleConns = 500
147 | transport.MaxIdleConnsPerHost = 100
148 | limitedTransport := &client.ResponseBodyLimitedTransport{RoundTripper: transport, LimitBytes: 1 << 20}
149 |
150 | delegateHTTPClient := &http.Client{
151 | Timeout: options.DelegateTimeout,
152 | Transport: limitedTransport,
153 | }
154 |
155 | providerStoreBuilder, err := newProviderStoreBuilder(ctx, delegateHTTPClient, options)
156 | if err != nil {
157 | return nil, err
158 | }
159 |
160 | providersFinder := hproviders.NewAsyncProvidersFinder(5*time.Second, 1000, 1*time.Hour)
161 | providersFinder.Run(ctx, 1000)
162 |
163 | // Reuse the HTTP client across all the heads.
164 | for i := 0; i < options.NHeads; i++ {
165 | time.Sleep(options.Stagger)
166 | fmt.Fprintf(os.Stderr, ".")
167 |
168 | port := options.GetPort()
169 | tcpAddr, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", port))
170 | quicAddr, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/udp/%d/quic", port))
171 | priv, err := options.IDGenerator.AddBalanced()
172 | if err != nil {
173 | return nil, fmt.Errorf("failed to generate balanced private key %w", err)
174 | }
175 | hdOpts := []opts.Option{
176 | opts.Datastore(ds),
177 | opts.ProviderStoreBuilder(providerStoreBuilder),
178 | opts.Addrs([]multiaddr.Multiaddr{tcpAddr, quicAddr}),
179 | opts.ProtocolPrefix(options.ProtocolPrefix),
180 | opts.BucketSize(options.BucketSize),
181 | opts.Limiter(limiter),
182 | opts.ID(priv),
183 | opts.BootstrapPeers(options.BootstrapPeers),
184 | opts.DelegateHTTPClient(delegateHTTPClient),
185 | opts.DisableResourceManager(options.DisableResourceManager),
186 | opts.ResourceManagerLimitsFile(options.ResourceManagerLimitsFile),
187 | opts.ConnMgrHighWater(options.ConnMgrHighWater),
188 | opts.ConnMgrLowWater(options.ConnMgrLowWater),
189 | opts.ConnMgrGracePeriod(options.ConnMgrGracePeriod),
190 | }
191 | if options.EnableRelay {
192 | hdOpts = append(hdOpts, opts.EnableRelay())
193 | }
194 | if options.DisableProviders {
195 | hdOpts = append(hdOpts, opts.DisableProviders())
196 | }
197 | if options.DisableValues {
198 | hdOpts = append(hdOpts, opts.DisableValues())
199 | }
200 | if options.DisableProvGC || i > 0 {
201 | // the first head GCs, if it's enabled
202 | hdOpts = append(hdOpts, opts.DisableProvGC())
203 | }
204 | if options.DisableProvCounts || i > 0 {
205 | // the first head counts providers, if it's enabled
206 | hdOpts = append(hdOpts, opts.DisableProvCounts())
207 | }
208 | if !options.DisablePrefetch {
209 | hdOpts = append(hdOpts, opts.ProvidersFinder(providersFinder))
210 | }
211 | if options.PeerstorePath != "" {
212 | pstoreDs, err := leveldb.NewDatastore(fmt.Sprintf("%s/head-%d", options.PeerstorePath, i), nil)
213 | if err != nil {
214 | return nil, fmt.Errorf("failed to create peerstore datastore: %w", err)
215 | }
216 | pstore, err := pstoreds.NewPeerstore(ctx, pstoreDs, pstoreds.DefaultOpts())
217 | if err != nil {
218 | return nil, fmt.Errorf("failed to create peerstore: %w", err)
219 | }
220 | hdOpts = append(hdOpts, opts.Peerstore(pstore))
221 | }
222 |
223 | hd, bsCh, err := head.NewHead(ctx, hdOpts...)
224 | if err != nil {
225 | return nil, fmt.Errorf("failed to spawn node with swarm addresses %v %v: %w", tcpAddr, quicAddr, err)
226 | }
227 |
228 | hdCtx, err := tag.New(ctx, tag.Insert(metrics.KeyPeerID, hd.Host.ID().String()))
229 | if err != nil {
230 | return nil, err
231 | }
232 |
233 | stats.Record(hdCtx, metrics.Heads.M(1))
234 |
235 | hd.Host.Network().Notify(&network.NotifyBundle{
236 | ConnectedF: func(n network.Network, v network.Conn) {
237 | hyperLock.Lock()
238 | hyperlog.Insert([]byte(v.RemotePeer()))
239 | hyperLock.Unlock()
240 | stats.Record(hdCtx, metrics.ConnectedPeers.M(1))
241 | },
242 | DisconnectedF: func(n network.Network, v network.Conn) {
243 | stats.Record(hdCtx, metrics.ConnectedPeers.M(-1))
244 | },
245 | })
246 |
247 | go handleBootstrapStatus(hdCtx, bsCh)
248 |
249 | hds = append(hds, hd)
250 | }
251 | fmt.Fprintf(os.Stderr, "\n")
252 |
253 | for _, hd := range hds {
254 | fmt.Fprintf(os.Stderr, "🆔 %v\n", hd.Host.ID())
255 | for _, addr := range hd.Host.Addrs() {
256 | fmt.Fprintf(os.Stderr, "🐝 Swarm listening on %v\n", addr)
257 | }
258 | }
259 |
260 | hydra := Hydra{
261 | Heads: hds,
262 | SharedDatastore: ds,
263 | hyperLock: &hyperLock,
264 | hyperlog: hyperlog,
265 | }
266 |
267 | tasks := []periodictasks.PeriodicTask{
268 | metricstasks.NewRoutingTableSizeTask(hydra.GetRoutingTableSize, routingTableSizeTaskInterval),
269 | metricstasks.NewUniquePeersTask(hydra.GetUniquePeersCount, uniquePeersTaskInterval),
270 | }
271 |
272 | periodictasks.RunTasks(ctx, tasks)
273 |
274 | return &hydra, nil
275 | }
276 |
277 | func newProviderStoreBuilder(ctx context.Context, httpClient *http.Client, options Options) (opts.ProviderStoreBuilderFunc, error) {
278 | if options.ProviderStore == "none" {
279 | return func(opts opts.Options, host host.Host) (providers.ProviderStore, error) {
280 | return &hproviders.NoopProviderStore{}, nil
281 | }, nil
282 | }
283 | if strings.HasPrefix(options.ProviderStore, "https://") {
284 | return func(opts opts.Options, host host.Host) (providers.ProviderStore, error) {
285 | fmt.Printf("Using HTTP provider store\n")
286 | return hproviders.NewHTTPProviderStore(httpClient, options.ProviderStore)
287 | }, nil
288 | }
289 | if strings.HasPrefix(options.ProviderStore, "dynamodb://") {
290 | // dynamodb,table=
,ttl=,queryLimit=
291 | ddbOpts, err := utils.ParseOptsString(strings.TrimPrefix(options.ProviderStore, "dynamodb://"))
292 | if err != nil {
293 | return nil, fmt.Errorf("parsing DynamoDB config string: %w", err)
294 | }
295 | table := ddbOpts["table"]
296 | if table == "" {
297 | return nil, errors.New("DynamoDB table must be specified")
298 | }
299 | ttlStr := ddbOpts["ttl"]
300 | if ttlStr == "" {
301 | return nil, errors.New("DynamoDB TTL must be specified")
302 | }
303 | ttl, err := time.ParseDuration(ttlStr)
304 | if err != nil {
305 | return nil, fmt.Errorf("parsing DynamoDB TTL: %w", err)
306 | }
307 |
308 | queryLimitStr := ddbOpts["queryLimit"]
309 | if queryLimitStr == "" {
310 | return nil, errors.New("DynamoDB query limit must be specified")
311 | }
312 | queryLimit64, err := strconv.ParseInt(queryLimitStr, 10, 32)
313 | if err != nil {
314 | return nil, fmt.Errorf("parsing DynamoDB query limit: %w", err)
315 | }
316 | queryLimit := int32(queryLimit64)
317 |
318 | fmt.Fprintf(os.Stderr, "🥞 Using DynamoDB providerstore with table=%s, ttl=%s, queryLimit=%d\n", table, ttl, queryLimit)
319 | awsCfg, err := config.LoadDefaultConfig(ctx,
320 | config.WithRetryer(func() aws.Retryer {
321 | return retry.NewStandard(func(so *retry.StandardOptions) { so.MaxAttempts = 1 })
322 | }))
323 | if err != nil {
324 | return nil, fmt.Errorf("loading AWS config: %w", err)
325 | }
326 | awsCfg.APIOptions = append(awsCfg.APIOptions, metrics.AddAWSSDKMiddleware)
327 |
328 | // reuse the client across all the heads
329 | ddbClient := dynamodb.NewFromConfig(awsCfg)
330 |
331 | return func(opts opts.Options, h host.Host) (providers.ProviderStore, error) {
332 | return hproviders.NewDynamoDBProviderStore(h.ID(), h.Peerstore(), ddbClient, table, ttl, queryLimit), nil
333 | }, nil
334 | }
335 | return nil, nil
336 | }
337 |
338 | func handleBootstrapStatus(ctx context.Context, ch chan head.BootstrapStatus) {
339 | for status := range ch {
340 | if status.Err != nil {
341 | fmt.Println(status.Err)
342 | }
343 | if status.Done {
344 | stats.Record(ctx, metrics.BootstrappedHeads.M(1))
345 | }
346 | }
347 | }
348 |
349 | func parseDDBTable(optsStr string) (string, error) {
350 | opts, err := utils.ParseOptsString(optsStr)
351 | if err != nil {
352 | return "", fmt.Errorf("parsing DynamoDB config string: %w", err)
353 | }
354 | table, ok := opts["table"]
355 | if !ok {
356 | return "", errors.New("must specify table in DynamoDB opts string")
357 | }
358 | return table, nil
359 | }
360 |
361 | // GetUniquePeersCount retrieves the current total for unique peers
362 | func (hy *Hydra) GetUniquePeersCount() uint64 {
363 | hy.hyperLock.Lock()
364 | defer hy.hyperLock.Unlock()
365 | return hy.hyperlog.Estimate()
366 | }
367 |
368 | func (hy *Hydra) GetRoutingTableSize() int {
369 | var rts int
370 | for i := range hy.Heads {
371 | rts += hy.Heads[i].RoutingTable().Size()
372 | }
373 | return rts
374 | }
375 |
--------------------------------------------------------------------------------
/hydra/hydra_test.go:
--------------------------------------------------------------------------------
1 | package hydra
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "testing"
7 | "time"
8 |
9 | "github.com/libp2p/go-libp2p/core/peer"
10 | hydratesting "github.com/libp2p/hydra-booster/testing"
11 | "github.com/libp2p/hydra-booster/utils"
12 | "github.com/multiformats/go-multiaddr"
13 | )
14 |
15 | func TestSpawnHydra(t *testing.T) {
16 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
17 | defer cancel()
18 |
19 | hy, err := NewHydra(ctx, Options{
20 | Name: "Scary",
21 | NHeads: 2,
22 | GetPort: utils.PortSelector(3000),
23 | })
24 | if err != nil {
25 | t.Fatal(err)
26 | }
27 |
28 | if len(hy.Heads) != 2 {
29 | t.Fatal("expected hydra to spawn 2 heads")
30 | }
31 | }
32 |
33 | func TestGetUniquePeersCount(t *testing.T) {
34 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
35 | defer cancel()
36 |
37 | hy, err := NewHydra(ctx, Options{
38 | NHeads: 2,
39 | GetPort: utils.PortSelector(3000),
40 | })
41 | if err != nil {
42 | t.Fatal(err)
43 | }
44 |
45 | hd0Addr := hy.Heads[0].Host.Addrs()[0]
46 | hd0ID := hy.Heads[0].Host.ID()
47 | hd0p2pAddr, err := multiaddr.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", hd0Addr, hd0ID))
48 | if err != nil {
49 | t.Fatal(err)
50 | }
51 | hd0AddrInfo, err := peer.AddrInfoFromP2pAddr(hd0p2pAddr)
52 | if err != nil {
53 | t.Fatal(err)
54 | }
55 |
56 | err = hy.Heads[1].Host.Connect(ctx, *hd0AddrInfo)
57 | if err != nil {
58 | t.Fatal(err)
59 | }
60 |
61 | c := hy.GetUniquePeersCount()
62 | if c <= 0 {
63 | t.Fatal("expected unique peers count to be greater than 0")
64 | }
65 | }
66 |
67 | func TestSpawnHydraWithCustomProtocolPrefix(t *testing.T) {
68 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
69 | defer cancel()
70 |
71 | hy, err := NewHydra(ctx, Options{
72 | NHeads: 2,
73 | GetPort: utils.PortSelector(3000),
74 | ProtocolPrefix: "/myapp",
75 | DisableProviders: true,
76 | DisableValues: true,
77 | })
78 | if err != nil {
79 | t.Fatal(err)
80 | }
81 |
82 | if len(hy.Heads) != 2 {
83 | t.Fatal("expected hydra to spawn 2 heads")
84 | }
85 | }
86 |
87 | func TestSpawnHydraWithPeerstorePath(t *testing.T) {
88 | ctx, cancel := context.WithCancel(hydratesting.NewContext())
89 | defer cancel()
90 |
91 | hy, err := NewHydra(ctx, Options{
92 | NHeads: 2,
93 | GetPort: utils.PortSelector(3000),
94 | PeerstorePath: fmt.Sprintf("../hydra-pstore/test-%d", time.Now().UnixNano()),
95 | })
96 | if err != nil {
97 | t.Fatal(err)
98 | }
99 |
100 | if len(hy.Heads) != 2 {
101 | t.Fatal("expected hydra to spawn 2 heads")
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/idgen/cleaning.go:
--------------------------------------------------------------------------------
1 | package idgen
2 |
3 | import (
4 | "sync"
5 |
6 | "github.com/hashicorp/go-multierror"
7 | "github.com/libp2p/go-libp2p/core/crypto"
8 | )
9 |
10 | // CleaningIDGenerator is an identity generator that provides an extra method to
11 | // remove all previously generated identities without passing any arguments.
12 | type CleaningIDGenerator struct {
13 | idgen IdentityGenerator
14 | keys []crypto.PrivKey
15 | locker sync.Mutex
16 | }
17 |
18 | // NewCleaningIDGenerator creates a new delegated identity
19 | // generator that provides an extra method to remove all previously generated
20 | // identities without passing any arguments.
21 | func NewCleaningIDGenerator(idgen IdentityGenerator) *CleaningIDGenerator {
22 | return &CleaningIDGenerator{idgen: idgen}
23 | }
24 |
25 | // AddBalanced stores the result of calling AddBalanced on the underlying
26 | // identify generator and then returns it.
27 | func (c *CleaningIDGenerator) AddBalanced() (crypto.PrivKey, error) {
28 | pk, err := c.idgen.AddBalanced()
29 | if err != nil {
30 | return nil, err
31 | }
32 | c.locker.Lock()
33 | defer c.locker.Unlock()
34 | c.keys = append(c.keys, pk)
35 | return pk, nil
36 | }
37 |
38 | // Remove calls Remove on the underlying identity generator and also removes the
39 | // passed key from it's memory of keys generated.
40 | func (c *CleaningIDGenerator) Remove(privKey crypto.PrivKey) error {
41 | err := c.idgen.Remove(privKey)
42 | if err != nil {
43 | return err
44 | }
45 | c.locker.Lock()
46 | defer c.locker.Unlock()
47 | var keys []crypto.PrivKey
48 | for _, pk := range c.keys {
49 | if !pk.Equals(privKey) {
50 | keys = append(keys, pk)
51 | }
52 | }
53 | c.keys = keys
54 | return nil
55 | }
56 |
57 | // Clean removes ALL previously generated keys by calling Remove on the
58 | // underlying identity generator for each key in it's memory.
59 | func (c *CleaningIDGenerator) Clean() error {
60 | var errs error
61 | c.locker.Lock()
62 | defer c.locker.Unlock()
63 | for _, pk := range c.keys {
64 | err := c.idgen.Remove(pk)
65 | if err != nil {
66 | errs = multierror.Append(errs, err)
67 | }
68 | }
69 | c.keys = []crypto.PrivKey{}
70 | return errs
71 | }
72 |
--------------------------------------------------------------------------------
/idgen/cleaning_test.go:
--------------------------------------------------------------------------------
1 | package idgen
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestCleaningIDGenerator(t *testing.T) {
8 | bidg := NewBalancedIdentityGenerator()
9 |
10 | count := bidg.Count()
11 | if count != 0 {
12 | t.Fatal("unexpected count")
13 | }
14 |
15 | didg := NewCleaningIDGenerator(bidg)
16 | _, err := didg.AddBalanced()
17 | if err != nil {
18 | t.Fatal(err)
19 | }
20 |
21 | pk, err := didg.AddBalanced()
22 | if err != nil {
23 | t.Fatal(err)
24 | }
25 |
26 | count = bidg.Count()
27 | if count != 2 {
28 | t.Fatal("unexpected count")
29 | }
30 |
31 | err = didg.Remove(pk)
32 | if err != nil {
33 | t.Fatal(err)
34 | }
35 |
36 | count = bidg.Count()
37 | if count != 1 {
38 | t.Fatal("unexpected count")
39 | }
40 |
41 | err = didg.Clean()
42 | if err != nil {
43 | t.Fatal(err)
44 | }
45 |
46 | count = bidg.Count()
47 | if count != 0 {
48 | t.Fatal("unexpected count")
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/idgen/delegated.go:
--------------------------------------------------------------------------------
1 | package idgen
2 |
3 | import (
4 | "bytes"
5 | "encoding/base64"
6 | "encoding/json"
7 | "fmt"
8 | "net/http"
9 |
10 | "github.com/libp2p/go-libp2p/core/crypto"
11 | )
12 |
13 | // DelegatedIDGenerator is an identity generator whose work is delegated to
14 | // another worker.
15 | type DelegatedIDGenerator struct {
16 | addr string
17 | }
18 |
19 | // NewDelegatedIDGenerator creates a new delegated identity generator whose
20 | // work is delegated to another worker. The delegate must be reachable on the
21 | // passed HTTP address and respond to HTTP POST messages sent to the following
22 | // endpoints:
23 | // `/idgen/add` - returns a JSON string, a base64 encoded private key.
24 | // `/idgen/remove` - accepts a JSON string, a base64 encoded private key.
25 | func NewDelegatedIDGenerator(addr string) *DelegatedIDGenerator {
26 | return &DelegatedIDGenerator{addr: addr}
27 | }
28 |
29 | // AddBalanced generates a balanced random identity by sending a HTTP POST
30 | // request to `/idgen/add`.
31 | func (g *DelegatedIDGenerator) AddBalanced() (crypto.PrivKey, error) {
32 | res, err := http.Post(fmt.Sprintf("%s/idgen/add", g.addr), "application/json", nil)
33 | if err != nil {
34 | return nil, err
35 | }
36 | defer res.Body.Close()
37 |
38 | if res.StatusCode != 200 {
39 | return nil, fmt.Errorf("unexpected HTTP status %d", res.StatusCode)
40 | }
41 |
42 | dec := json.NewDecoder(res.Body)
43 | var b64 string
44 | if err := dec.Decode(&b64); err != nil {
45 | return nil, err
46 | }
47 |
48 | bytes, err := base64.StdEncoding.DecodeString(b64)
49 | if err != nil {
50 | return nil, err
51 | }
52 |
53 | pk, err := crypto.UnmarshalPrivateKey(bytes)
54 | if err != nil {
55 | return nil, err
56 | }
57 |
58 | return pk, nil
59 | }
60 |
61 | // Remove removes a previously generated identity by sending a HTTP POST request
62 | // to `/idgen/remove`.
63 | func (g *DelegatedIDGenerator) Remove(privKey crypto.PrivKey) error {
64 | b, err := crypto.MarshalPrivateKey(privKey)
65 | if err != nil {
66 | return err
67 | }
68 |
69 | data, err := json.Marshal(base64.StdEncoding.EncodeToString(b))
70 | if err != nil {
71 | return err
72 | }
73 |
74 | res, err := http.Post(fmt.Sprintf("%s/idgen/remove", g.addr), "application/json", bytes.NewReader(data))
75 | if err != nil {
76 | return err
77 | }
78 | defer res.Body.Close()
79 |
80 | if res.StatusCode != 204 {
81 | return fmt.Errorf("unexpected HTTP status %d", res.StatusCode)
82 | }
83 |
84 | return nil
85 | }
86 |
--------------------------------------------------------------------------------
/idgen/delegated_test.go:
--------------------------------------------------------------------------------
1 | package idgen
2 |
3 | import (
4 | "encoding/base64"
5 | "encoding/json"
6 | "net"
7 | "net/http"
8 | "testing"
9 |
10 | "github.com/libp2p/go-libp2p/core/crypto"
11 | )
12 |
13 | func TestDelegatedAddBalanced(t *testing.T) {
14 | listener, err := net.Listen("tcp", ":0")
15 | if err != nil {
16 | t.Fatal(err)
17 | }
18 |
19 | bidg := NewBalancedIdentityGenerator()
20 |
21 | mux := http.NewServeMux()
22 | mux.HandleFunc("/idgen/add", func(w http.ResponseWriter, r *http.Request) {
23 | pk, _ := bidg.AddBalanced()
24 | b, _ := crypto.MarshalPrivateKey(pk)
25 | json.NewEncoder(w).Encode(base64.StdEncoding.EncodeToString(b))
26 | })
27 |
28 | go http.Serve(listener, mux)
29 | defer listener.Close()
30 |
31 | count := bidg.Count()
32 | if count != 0 {
33 | t.Fatal("unexpected count")
34 | }
35 |
36 | didg := NewDelegatedIDGenerator("http://" + listener.Addr().String())
37 | _, err = didg.AddBalanced()
38 | if err != nil {
39 | t.Fatal(err)
40 | }
41 |
42 | count = bidg.Count()
43 | if count != 1 {
44 | t.Fatal("unexpected count")
45 | }
46 | }
47 |
48 | func TestDelegatedRemove(t *testing.T) {
49 | listener, err := net.Listen("tcp", ":0")
50 | if err != nil {
51 | t.Fatal(err)
52 | }
53 |
54 | bidg := NewBalancedIdentityGenerator()
55 |
56 | mux := http.NewServeMux()
57 | mux.HandleFunc("/idgen/remove", func(w http.ResponseWriter, r *http.Request) {
58 | dec := json.NewDecoder(r.Body)
59 | var b64 string
60 | dec.Decode(&b64)
61 | bytes, _ := base64.StdEncoding.DecodeString(b64)
62 | pk, _ := crypto.UnmarshalPrivateKey(bytes)
63 | err = bidg.Remove(pk)
64 | if err != nil {
65 | w.WriteHeader(http.StatusInternalServerError)
66 | return
67 | }
68 | w.WriteHeader(http.StatusNoContent)
69 | })
70 |
71 | go http.Serve(listener, mux)
72 | defer listener.Close()
73 |
74 | pk, err := bidg.AddBalanced()
75 | if err != nil {
76 | t.Fatal(err)
77 | }
78 |
79 | count := bidg.Count()
80 | if count != 1 {
81 | t.Fatal("unexpected count")
82 | }
83 |
84 | didg := NewDelegatedIDGenerator("http://" + listener.Addr().String())
85 | err = didg.Remove(pk)
86 | if err != nil {
87 | t.Fatal(err)
88 | }
89 |
90 | count = bidg.Count()
91 | if count != 0 {
92 | t.Fatal("unexpected count")
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/idgen/idgen.go:
--------------------------------------------------------------------------------
1 | package idgen
2 |
3 | import (
4 | "crypto/rand"
5 | "crypto/sha256"
6 | "encoding/binary"
7 | "fmt"
8 | "math/bits"
9 | "sync"
10 | "sync/atomic"
11 |
12 | kbucket "github.com/libp2p/go-libp2p-kbucket"
13 | "github.com/libp2p/go-libp2p/core/crypto"
14 | "github.com/libp2p/go-libp2p/core/peer"
15 | "golang.org/x/crypto/hkdf"
16 | )
17 |
18 | // HydraIdentityGenerator is a shared balanced ID generator.
19 | var HydraIdentityGenerator = NewBalancedIdentityGenerator()
20 |
21 | // IdentityGenerator describes a facility that can generate IPFS private keys.
22 | type IdentityGenerator interface {
23 | AddBalanced() (crypto.PrivKey, error)
24 | Remove(privKey crypto.PrivKey) error
25 | }
26 |
27 | // BalancedIdentityGenerator is a facility for generating IPFS identities (i.e. IPFS private keys),
28 | // whose corresponding DHT keys are highly balanced, compared to just generating random keys.
29 | // Balancing is accomplished using "the power of two choices" paradigm:
30 | // https://www.eecs.harvard.edu/~michaelm/postscripts/mythesis.pdf
31 | //
32 | // New identities are generated by calling AddBalanced. BalancedIdentityGenerator remembers
33 | // generated identities, in order to ensure balance for future identities.
34 | // Generated identities can be removed using Remove.
35 | //
36 | // BalancedIdentityGenerator maintains the invariant that all identities, presently in its memory,
37 | // form an almost-perfectly balanced set.
38 | type BalancedIdentityGenerator struct {
39 | sync.Mutex
40 | xorTrie *XorTrie
41 | count int
42 | idgenCount uint32
43 | seed []byte
44 | }
45 |
46 | func RandomSeed() (blk []byte) {
47 | blk = make([]byte, 32)
48 | rand.Read(blk)
49 | return blk
50 | }
51 |
52 | // NewBalancedIdentityGenerator creates a new balanced identity generator.
53 | func NewBalancedIdentityGenerator() *BalancedIdentityGenerator {
54 | seed := RandomSeed()
55 | return NewBalancedIdentityGeneratorFromSeed(seed, 0)
56 | }
57 |
58 | func NewBalancedIdentityGeneratorFromSeed(seed []byte, idOffset int) *BalancedIdentityGenerator {
59 | idGenerator := &BalancedIdentityGenerator{
60 | xorTrie: NewXorTrie(),
61 | seed: seed,
62 | }
63 | for i := 0; i < idOffset; i++ {
64 | idGenerator.AddBalanced()
65 | }
66 | return idGenerator
67 | }
68 |
69 | // AddUnbalanced is used for testing purposes. It generates a purely random identity,
70 | // which is not balanced with respect to the existing identities in the generator.
71 | // The generated identity is stored in the generator's memory.
72 | func (bg *BalancedIdentityGenerator) AddUnbalanced() (crypto.PrivKey, error) {
73 | bg.Lock()
74 | defer bg.Unlock()
75 | p0, t0, _, err0 := bg.genUniqueID()
76 | if err0 != nil {
77 | return nil, fmt.Errorf("generating unbalanced ID candidate, %w", err0)
78 | }
79 | bg.xorTrie.Insert(t0)
80 | bg.count++
81 | return p0, nil
82 | }
83 |
84 | // AddBalanced generates a random identity, which
85 | // is balanced with respect to the existing identities in the generator.
86 | // The generated identity is stored in the generator's memory.
87 | func (bg *BalancedIdentityGenerator) AddBalanced() (crypto.PrivKey, error) {
88 | bg.Lock()
89 | defer bg.Unlock()
90 | p0, t0, d0, err0 := bg.genUniqueID()
91 | if err0 != nil {
92 | return nil, fmt.Errorf("generating first balanced ID candidate, %w", err0)
93 | }
94 | p1, t1, d1, err1 := bg.genUniqueID()
95 | if err1 != nil {
96 | return nil, fmt.Errorf("generating second balanced ID candidate, %w", err1)
97 | }
98 | if d0 < d1 {
99 | bg.xorTrie.Insert(t0)
100 | bg.count++
101 | return p0, nil
102 | } else {
103 | bg.xorTrie.Insert(t1)
104 | bg.count++
105 | return p1, nil
106 | }
107 | }
108 |
109 | func (bg *BalancedIdentityGenerator) genUniqueID() (privKey crypto.PrivKey, trieKey TrieKey, depth int, err error) {
110 | for {
111 | if privKey, trieKey, err = bg.genID(); err != nil {
112 | return nil, nil, 0, err
113 | }
114 | if depth, ok := bg.xorTrie.Insert(trieKey); ok {
115 | bg.xorTrie.Remove(trieKey)
116 | return privKey, trieKey, depth, nil
117 | }
118 | }
119 | }
120 |
121 | // Remove removes a previously generated identity from the generator's memory.
122 | func (bg *BalancedIdentityGenerator) Remove(privKey crypto.PrivKey) error {
123 | bg.Lock()
124 | defer bg.Unlock()
125 | if trieKey, err := privKeyToTrieKey(privKey); err != nil {
126 | return err
127 | } else {
128 | if _, ok := bg.xorTrie.Remove(trieKey); ok {
129 | bg.count--
130 | }
131 | return nil
132 | }
133 | }
134 |
135 | func (bg *BalancedIdentityGenerator) Count() int {
136 | bg.Lock()
137 | defer bg.Unlock()
138 | return bg.count
139 | }
140 |
141 | func (bg *BalancedIdentityGenerator) Depth() int {
142 | bg.Lock()
143 | defer bg.Unlock()
144 | return bg.xorTrie.Depth()
145 | }
146 |
147 | func (bg *BalancedIdentityGenerator) genID() (crypto.PrivKey, TrieKey, error) {
148 | hash := sha256.New
149 | info := []byte("hydra keys")
150 | seed := bg.seed
151 | salt := atomic.AddUint32(&bg.idgenCount, 1)
152 | salt_bytes := make([]byte, 4)
153 | binary.LittleEndian.PutUint32(salt_bytes, salt)
154 | privKey, _, err := crypto.GenerateKeyPairWithReader(crypto.Ed25519, 0, hkdf.New(hash, seed, salt_bytes, info))
155 | if err != nil {
156 | return nil, nil, fmt.Errorf("generating private key for trie, %w", err)
157 | }
158 | trieKey, err := privKeyToTrieKey(privKey)
159 | if err != nil {
160 | return nil, nil, fmt.Errorf("converting private key to a trie key, %w", err)
161 | }
162 | return privKey, trieKey, nil
163 | }
164 |
165 | // PrivKey -> PeerID -> KadID -> TrieKey
166 | func privKeyToTrieKey(privKey crypto.PrivKey) (TrieKey, error) {
167 | peerID, err := peer.IDFromPrivateKey(privKey)
168 | if err != nil {
169 | return nil, err
170 | }
171 | kadID := kbucket.ConvertPeerID(peerID)
172 | trieKey := TrieKey(reversePerByteBits(kadID))
173 | return trieKey, nil
174 | }
175 |
176 | // reversePerByteBits reverses the bit-endianness of each byte in a slice.
177 | func reversePerByteBits(blob []byte) []byte {
178 | for i := range blob {
179 | blob[i] = bits.Reverse8(blob[i])
180 | }
181 | return blob
182 | }
183 |
--------------------------------------------------------------------------------
/idgen/idgen_test.go:
--------------------------------------------------------------------------------
1 | package idgen
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/libp2p/go-libp2p/core/crypto"
7 | )
8 |
9 | func TestBalancedGeneration(t *testing.T) {
10 | const N = 10000
11 |
12 | genBalanced := NewBalancedIdentityGenerator()
13 | for i := 0; i < N; i++ {
14 | if _, err := genBalanced.AddBalanced(); err != nil {
15 | t.Errorf("adding balanced ID, %s", err)
16 | }
17 | }
18 |
19 | genUnbalanced := NewBalancedIdentityGenerator()
20 | for i := 0; i < N; i++ {
21 | if _, err := genUnbalanced.AddUnbalanced(); err != nil {
22 | t.Errorf("adding unbalanced ID, %s", err)
23 | }
24 | }
25 |
26 | if dBal, dUnbal := genBalanced.Depth(), genUnbalanced.Depth(); dBal > dUnbal {
27 | t.Errorf("balanced depth %d is bigger than unbalanced depth %d\n", dBal, dUnbal)
28 | }
29 | }
30 |
31 | func TestGenFromSeed(t *testing.T) {
32 | seed := RandomSeed()
33 | bg1 := NewBalancedIdentityGeneratorFromSeed(seed, 0)
34 | bg2 := NewBalancedIdentityGeneratorFromSeed(seed, 0)
35 | const N = 10000
36 | for i := 0; i < N; i++ {
37 | bg1_id, err := bg1.AddBalanced()
38 | if err != nil {
39 | t.Error("bg1_id creation error")
40 | }
41 | bg2_id, err := bg2.AddBalanced()
42 | if err != nil {
43 | t.Error("bg2_id creation error")
44 | }
45 | if !crypto.KeyEqual(bg1_id, bg2_id) {
46 | t.Error("IDs not same with same seed")
47 | }
48 | }
49 | // To generate N IDs, we should have tried 2*N candidates
50 | if bg1.idgenCount != 2*N {
51 | t.Errorf("bg1 should have %d items but it has %d", 2*N, bg1.idgenCount)
52 | }
53 | if bg2.idgenCount != 2*N {
54 | t.Errorf("bg2 should have %d items but it has %d", 2*N, bg2.idgenCount)
55 | }
56 | }
57 |
58 | func TestWithOffSet(t *testing.T) {
59 | seed := RandomSeed()
60 | bg1 := NewBalancedIdentityGeneratorFromSeed(seed, 0)
61 | bg2 := NewBalancedIdentityGeneratorFromSeed(seed, 100)
62 |
63 | // After using up 100 of bg1
64 | for i := 0; i < 100; i++ {
65 | bg1.AddBalanced()
66 | }
67 | // It should start to be the same as bg2
68 | for i := 0; i < 100; i++ {
69 | bg1_id, err := bg1.AddBalanced()
70 | if err != nil {
71 | t.Error("bg1_id creation error")
72 | }
73 | bg2_id, err := bg2.AddBalanced()
74 | if err != nil {
75 | t.Error("bg2_id creation error")
76 | }
77 | if !crypto.KeyEqual(bg1_id, bg2_id) {
78 | t.Error("IDs not same with same seed")
79 | }
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/idgen/xortrie.go:
--------------------------------------------------------------------------------
1 | package idgen
2 |
3 | import "bytes"
4 |
5 | // TrieKey is a vector of bits backed by a Go byte slice in big endian byte order and big-endian bit order.
6 | type TrieKey []byte
7 |
8 | func (bs TrieKey) BitAt(offset int) byte {
9 | if bs[offset/8]&(1<<(offset%8)) == 0 {
10 | return 0
11 | } else {
12 | return 1
13 | }
14 | }
15 |
16 | func (bs TrieKey) BitLen() int {
17 | return 8 * len(bs)
18 | }
19 |
20 | func TrieKeyEqual(x, y TrieKey) bool {
21 | return bytes.Equal(x, y)
22 | }
23 |
24 | // XorTrie is a trie for equal-length bit vectors, which stores values only in the leaves.
25 | type XorTrie struct {
26 | branch [2]*XorTrie
27 | key TrieKey
28 | }
29 |
30 | func NewXorTrie() *XorTrie {
31 | return &XorTrie{}
32 | }
33 |
34 | func (trie *XorTrie) Depth() int {
35 | return trie.depth(0)
36 | }
37 |
38 | func (trie *XorTrie) depth(depth int) int {
39 | if trie.branch[0] == nil && trie.branch[1] == nil {
40 | return depth
41 | } else {
42 | return max(trie.branch[0].depth(depth+1), trie.branch[1].depth(depth+1))
43 | }
44 | }
45 |
46 | func max(x, y int) int {
47 | if x > y {
48 | return x
49 | }
50 | return y
51 | }
52 |
53 | func (trie *XorTrie) Insert(q TrieKey) (insertedDepth int, insertedOK bool) {
54 | return trie.insert(0, q)
55 | }
56 |
57 | func (trie *XorTrie) insert(depth int, q TrieKey) (insertedDepth int, insertedOK bool) {
58 | if qb := trie.branch[q.BitAt(depth)]; qb != nil {
59 | return qb.insert(depth+1, q)
60 | } else {
61 | if trie.key == nil {
62 | trie.key = q
63 | return depth, true
64 | } else {
65 | if TrieKeyEqual(trie.key, q) {
66 | return depth, false
67 | } else {
68 | p := trie.key
69 | trie.key = nil
70 | // both branches are nil
71 | trie.branch[0], trie.branch[1] = &XorTrie{}, &XorTrie{}
72 | trie.branch[p.BitAt(depth)].insert(depth+1, p)
73 | return trie.branch[q.BitAt(depth)].insert(depth+1, q)
74 | }
75 | }
76 | }
77 | }
78 |
79 | func (trie *XorTrie) Remove(q TrieKey) (removedDepth int, removed bool) {
80 | return trie.remove(0, q)
81 | }
82 |
83 | func (trie *XorTrie) remove(depth int, q TrieKey) (reachedDepth int, removed bool) {
84 | if qb := trie.branch[q.BitAt(depth)]; qb != nil {
85 | if d, ok := qb.remove(depth+1, q); ok {
86 | trie.shrink()
87 | return d, true
88 | } else {
89 | return d, false
90 | }
91 | } else {
92 | if trie.key != nil && TrieKeyEqual(q, trie.key) {
93 | trie.key = nil
94 | return depth, true
95 | } else {
96 | return depth, false
97 | }
98 | }
99 | }
100 |
101 | func (trie *XorTrie) isEmptyLeaf() bool {
102 | return trie.key == nil && trie.branch[0] == nil && trie.branch[1] == nil
103 | }
104 |
105 | func (trie *XorTrie) isNonEmptyLeaf() bool {
106 | return trie.key != nil && trie.branch[0] == nil && trie.branch[1] == nil
107 | }
108 |
109 | func (trie *XorTrie) shrink() {
110 | b0, b1 := trie.branch[0], trie.branch[1]
111 | switch {
112 | case b0.isEmptyLeaf() && b1.isEmptyLeaf():
113 | trie.branch[0], trie.branch[1] = nil, nil
114 | case b0.isEmptyLeaf() && b1.isNonEmptyLeaf():
115 | trie.key = b1.key
116 | trie.branch[0], trie.branch[1] = nil, nil
117 | case b0.isNonEmptyLeaf() && b1.isEmptyLeaf():
118 | trie.key = b0.key
119 | trie.branch[0], trie.branch[1] = nil, nil
120 | }
121 | }
122 |
--------------------------------------------------------------------------------
/idgen/xortrie_test.go:
--------------------------------------------------------------------------------
1 | package idgen
2 |
3 | import "testing"
4 |
5 | func TestInsertRemove(t *testing.T) {
6 | r := NewXorTrie()
7 | testSeq(r, t)
8 | testSeq(r, t)
9 | }
10 |
11 | func testSeq(r *XorTrie, t *testing.T) {
12 | for _, s := range testInsertSeq {
13 | depth, _ := r.Insert(TrieKey(s.key))
14 | if depth != s.insertedDepth {
15 | t.Errorf("inserting expected %d, got %d", s.insertedDepth, depth)
16 | }
17 | }
18 | for _, s := range testRemoveSeq {
19 | depth, _ := r.Remove(TrieKey(s.key))
20 | if depth != s.reachedDepth {
21 | t.Errorf("removing expected %d, got %d", s.reachedDepth, depth)
22 | }
23 | }
24 | }
25 |
26 | var testInsertSeq = []struct {
27 | key []byte
28 | insertedDepth int
29 | }{
30 | {key: []byte{0x0}, insertedDepth: 0},
31 | {key: []byte{0x1}, insertedDepth: 1},
32 | {key: []byte{0x8}, insertedDepth: 4},
33 | {key: []byte{0x3}, insertedDepth: 2},
34 | {key: []byte{0x4}, insertedDepth: 3},
35 | }
36 |
37 | var testRemoveSeq = []struct {
38 | key []byte
39 | reachedDepth int
40 | }{
41 | {key: []byte{0x0}, reachedDepth: 4},
42 | {key: []byte{0x8}, reachedDepth: 3},
43 | {key: []byte{0x4}, reachedDepth: 1},
44 | {key: []byte{0x1}, reachedDepth: 2},
45 | {key: []byte{0x3}, reachedDepth: 0},
46 | }
47 |
--------------------------------------------------------------------------------
/k8s/README.md:
--------------------------------------------------------------------------------
1 | # Example Kubenetes Configuration
2 |
3 | These are the configuration files used to deploy the Hydra Booster nodes that operate on the IPFS network.
4 |
5 | ## Deploying to DigitalOcean
6 |
7 | First create a cluster with some machines. A 50 Head Hydra requires about `12Gi` of RAM.
8 |
9 | Next install [`doctl`](https://github.com/digitalocean/doctl) and [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) and run the following commands to deploy Hydras:
10 |
11 | ```sh
12 | # Get k8s config and set it as the current context
13 | doctl kubernetes cluster kubeconfig save
14 | # Create the namespace that hydras run in
15 | kubectl create -f k8s/namespace.yaml
16 | kubectl apply -f k8s/alasybil.yaml
17 | kubectl apply -f k8s/bubbles.yaml
18 | kubectl apply -f k8s/chumpy.yaml
19 | kubectl apply -f k8s/domino.yaml
20 | kubectl apply -f k8s/euclid.yaml
21 | kubectl apply -f k8s/flake.yaml
22 | kubectl apply -f k8s/grendel.yaml
23 | kubectl apply -f k8s/hojo.yaml
24 | kubectl apply -f k8s/ibycus.yaml
25 | kubectl apply -f k8s/jetta.yaml
26 | ```
27 |
28 | ## Updating a deployment
29 |
30 | The config uses the latest `libp2p/hydra-booster:master` image, so if you've tagged and pushed a new version all you need to do is scale down and up each deployment:
31 |
32 | ```sh
33 | # Scale down all deployments
34 | kubectl scale deployment/alasybil-deployment --replicas=0 -n hydra-boosters
35 | kubectl scale deployment/bubbles-deployment --replicas=0 -n hydra-boosters
36 | kubectl scale deployment/chumpy-deployment --replicas=0 -n hydra-boosters
37 | kubectl scale deployment/domino-deployment --replicas=0 -n hydra-boosters
38 | kubectl scale deployment/euclid-deployment --replicas=0 -n hydra-boosters
39 | kubectl scale deployment/flake-deployment --replicas=0 -n hydra-boosters
40 | kubectl scale deployment/grendel-deployment --replicas=0 -n hydra-boosters
41 | kubectl scale deployment/hojo-deployment --replicas=0 -n hydra-boosters
42 | kubectl scale deployment/ibycus-deployment --replicas=0 -n hydra-boosters
43 | kubectl scale deployment/jetta-deployment --replicas=0 -n hydra-boosters
44 |
45 | # Scale up all deployments
46 | kubectl scale deployment/alasybil-deployment --replicas=1 -n hydra-boosters
47 | kubectl scale deployment/bubbles-deployment --replicas=1 -n hydra-boosters
48 | kubectl scale deployment/chumpy-deployment --replicas=1 -n hydra-boosters
49 | kubectl scale deployment/domino-deployment --replicas=1 -n hydra-boosters
50 | kubectl scale deployment/euclid-deployment --replicas=1 -n hydra-boosters
51 | kubectl scale deployment/flake-deployment --replicas=1 -n hydra-boosters
52 | kubectl scale deployment/grendel-deployment --replicas=1 -n hydra-boosters
53 | kubectl scale deployment/hojo-deployment --replicas=1 -n hydra-boosters
54 | kubectl scale deployment/ibycus-deployment --replicas=1 -n hydra-boosters
55 | kubectl scale deployment/jetta-deployment --replicas=1 -n hydra-boosters
56 | ```
57 |
58 | ## Deploying a branch
59 |
60 | 1. Publish a new tagged image to dockerhub. e.g. we use `libp2p/hydra-booster:next` for smoke testing upcoming releases. Add the tag to `docker commit` and `docker push` when [publishing](https://github.com/libp2p/hydra-booster#publish-a-new-image).
61 | 2. Update the `image:` property in the [deployment spec](https://github.com/libp2p/hydra-booster/blob/30b2924b519aeee8f3ff6c3e87e1215ea65e81ad/k8s/alasybil.yaml#L38) for the hydra(s) you want to use the image.
62 | 3. Apply the updated config to the hydra(s) using `kubectl apply -f k8s/HYDRA_NAME.yaml`
63 |
--------------------------------------------------------------------------------
/k8s/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: hydra-boosters
5 | labels:
6 | name: hydra-boosters
7 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "encoding/base64"
6 | "flag"
7 | "fmt"
8 | "log"
9 | "math/rand"
10 | "os"
11 | "os/signal"
12 | "strconv"
13 | "strings"
14 | "syscall"
15 | "time"
16 |
17 | dht "github.com/libp2p/go-libp2p-kad-dht"
18 | "github.com/libp2p/go-libp2p/core/crypto"
19 | "github.com/libp2p/go-libp2p/core/protocol"
20 | "github.com/libp2p/hydra-booster/httpapi"
21 | "github.com/libp2p/hydra-booster/hydra"
22 | "github.com/libp2p/hydra-booster/idgen"
23 | "github.com/libp2p/hydra-booster/metrics"
24 | hyui "github.com/libp2p/hydra-booster/ui"
25 | uiopts "github.com/libp2p/hydra-booster/ui/opts"
26 | "github.com/libp2p/hydra-booster/utils"
27 | "github.com/multiformats/go-multiaddr"
28 | )
29 |
30 | const (
31 | defaultBucketSize = 20
32 | defaultMetricsAddr = "127.0.0.1:9758"
33 | defaultHTTPAPIAddr = "127.0.0.1:7779"
34 | defaultConnMgrHighWater = 1800
35 | defaultConnMgrLowWater = 1200
36 | defaultConnMgrGracePeriod = "60s"
37 | )
38 |
39 | func main() {
40 | start := time.Now()
41 | nheads := flag.Int("nheads", -1, "Specify the number of Hydra heads to create.")
42 | randomSeed := flag.String("random-seed", "", "Seed to use to generate IDs (useful if you want to have persistent IDs). Should be Base64 encoded and 256bits")
43 | idOffset := flag.Int("id-offset", -1, "What offset in the sequence of keys generated from random-seed to start from")
44 | dbpath := flag.String("db", "", "Datastore directory (for LevelDB store) or postgresql:// connection URI (for PostgreSQL store) or 'dynamodb://table='")
45 | pstorePath := flag.String("pstore", "", "Peerstore directory for LevelDB store (defaults to in-memory store)")
46 | providerStore := flag.String("provider-store", "", "A non-default provider store to use, either \"none\" or \"dynamodb://table=,ttl=,queryLimit=\"")
47 | httpAPIAddr := flag.String("httpapi-addr", defaultHTTPAPIAddr, "Specify an IP and port to run the HTTP API server on")
48 | delegateTimeout := flag.Int("delegate-timeout", 0, "Timeout for delegated routing in milliseconds")
49 | inmem := flag.Bool("mem", false, "Use an in-memory database. This overrides the -db option")
50 | metricsAddr := flag.String("metrics-addr", defaultMetricsAddr, "Specify an IP and port to run Prometheus metrics and pprof HTTP server on")
51 | enableRelay := flag.Bool("enable-relay", false, "Enable libp2p circuit relaying for this node (default false).")
52 | portBegin := flag.Int("port-begin", -1, "If set, begin port allocation here")
53 | protocolPrefix := flag.String("protocol-prefix", string(dht.DefaultPrefix), "Specify the DHT protocol prefix (default \"/ipfs\")")
54 | bucketSize := flag.Int("bucket-size", defaultBucketSize, "Specify the bucket size, note that for some protocols this must be a specific value i.e. for \"/ipfs\" it MUST be 20")
55 | bootstrapConcurrency := flag.Int("bootstrap-conc", 32, "How many concurrent bootstraps to run")
56 | bootstrapPeers := flag.String("bootstrap-peers", "", "A CSV list of peer addresses to bootstrap from.")
57 | stagger := flag.Duration("stagger", 0*time.Second, "Duration to stagger nodes starts by")
58 | uiTheme := flag.String("ui-theme", "logey", "UI theme, \"logey\", \"gooey\" or \"none\" (default \"logey\")")
59 | name := flag.String("name", "", "A name for the Hydra (for use in metrics)")
60 | idgenAddr := flag.String("idgen-addr", "", "Address of an idgen HTTP API endpoint to use for generating private keys for heads")
61 | disableProvGC := flag.Bool("disable-prov-gc", false, "Disable provider record garbage collection (default false).")
62 | disableProviders := flag.Bool("disable-providers", false, "Disable storing and retrieving provider records, note that for some protocols, like \"/ipfs\", it MUST be false (default false).")
63 | disableValues := flag.Bool("disable-values", false, "Disable storing and retrieving value records, note that for some protocols, like \"/ipfs\", it MUST be false (default false).")
64 | disablePrefetch := flag.Bool("disable-prefetch", false, "Disables pre-fetching of discovered provider records (default false).")
65 | disableProvCounts := flag.Bool("disable-prov-counts", false, "Disable counting provider records for metrics reporting (default false).")
66 | disableDBCreate := flag.Bool("disable-db-create", false, "Don't create table and index in the target database (default false).")
67 | disableResourceManager := flag.Bool("disable-rcmgr", false, "Disable libp2p Resource Manager by configuring it with infinite limits (default false).")
68 | resourceManagerLimits := flag.String("rcmgr-limits", "", "Resource Manager limits JSON config (default none).")
69 | connMgrHighWater := flag.Int("connmgr-high-water", defaultConnMgrHighWater, "High water limit for the connection manager.")
70 | connMgrLowWater := flag.Int("connmgr-low-water", defaultConnMgrLowWater, "Low water limit for the connection manager.")
71 | connMgrGracePeriod := flag.String("connmgr-grace-period", defaultConnMgrGracePeriod, "Grace period for connections as a Go duration string such as \"60s\".")
72 |
73 | flag.Parse()
74 |
75 | fmt.Fprintf(os.Stderr, "🐉 Hydra Booster starting up...\n")
76 |
77 | if *inmem {
78 | *dbpath = ""
79 | } else if *dbpath == "" {
80 | *dbpath = os.Getenv("HYDRA_DB")
81 | if *dbpath == "" {
82 | *dbpath = "hydra-belly"
83 | }
84 | }
85 | if *nheads == -1 {
86 | *nheads = mustGetEnvInt("HYDRA_NHEADS", 1)
87 | }
88 | if *randomSeed == "" {
89 | *randomSeed = os.Getenv("HYDRA_RANDOM_SEED")
90 | }
91 | if *idOffset == -1 {
92 | *idOffset = mustGetEnvInt("HYDRA_ID_OFFSET", 0)
93 | }
94 | if *portBegin == -1 {
95 | *portBegin = mustGetEnvInt("HYDRA_PORT_BEGIN", 0)
96 | }
97 | if *name == "" {
98 | *name = os.Getenv("HYDRA_NAME")
99 | }
100 | if *idgenAddr == "" {
101 | *idgenAddr = os.Getenv("HYDRA_IDGEN_ADDR")
102 | }
103 | if !*disableProvGC {
104 | *disableProvGC = mustGetEnvBool("HYDRA_DISABLE_PROV_GC", false)
105 | }
106 | if *bootstrapPeers == "" {
107 | *bootstrapPeers = os.Getenv("HYDRA_BOOTSTRAP_PEERS")
108 | }
109 | if !*disablePrefetch {
110 | *disablePrefetch = mustGetEnvBool("HYDRA_DISABLE_PREFETCH", false)
111 | }
112 | if !*disableDBCreate {
113 | *disableDBCreate = mustGetEnvBool("HYDRA_DISABLE_DBCREATE", false)
114 | }
115 | if !*disableProvCounts {
116 | *disableProvCounts = mustGetEnvBool("HYDRA_DISABLE_PROV_COUNTS", false)
117 | }
118 | if *pstorePath == "" {
119 | *pstorePath = os.Getenv("HYDRA_PSTORE")
120 | }
121 | if *providerStore == "" {
122 | *providerStore = os.Getenv("HYDRA_PROVIDER_STORE")
123 | }
124 | if *delegateTimeout == 0 {
125 | *delegateTimeout = mustGetEnvInt("HYDRA_DELEGATED_ROUTING_TIMEOUT", 1000)
126 | }
127 | if !*disableResourceManager {
128 | *disableResourceManager = mustGetEnvBool("DISABLE_RCMGR", false)
129 | }
130 | if *resourceManagerLimits == "" {
131 | *resourceManagerLimits = os.Getenv("RCMGR_LIMITS")
132 | }
133 |
134 | if *connMgrHighWater == defaultConnMgrHighWater {
135 | *connMgrHighWater = mustGetEnvInt("HYDRA_CONNMGR_HIGH_WATER", defaultConnMgrHighWater)
136 | }
137 |
138 | if *connMgrLowWater == defaultConnMgrLowWater {
139 | *connMgrLowWater = mustGetEnvInt("HYDRA_CONNMGR_LOW_WATER", defaultConnMgrLowWater)
140 | }
141 |
142 | if *connMgrGracePeriod == defaultConnMgrGracePeriod {
143 | envVal := os.Getenv("HYDRA_CONNMGR_GRACE_PERIOD")
144 | if envVal != "" {
145 | *connMgrGracePeriod = envVal
146 | }
147 | }
148 | connMgrGracePeriodDuration, err := time.ParseDuration(*connMgrGracePeriod)
149 | if err != nil {
150 | log.Fatalf("parsing grace period duration: %s", err)
151 | }
152 |
153 | // Allow short keys. Otherwise, we'll refuse connections from the bootsrappers and break the network.
154 | // TODO: Remove this when we shut those bootstrappers down.
155 | crypto.MinRsaKeyBits = 1024
156 |
157 | // Seed the random number generator used by Hydra heads to select a bootstrap peer
158 | rand.Seed(time.Now().UTC().UnixNano())
159 |
160 | ctx, cancel := context.WithCancel(context.Background())
161 | defer cancel()
162 |
163 | var idGenerator idgen.IdentityGenerator
164 | if *randomSeed != "" && *idgenAddr != "" {
165 | log.Fatalln("error: Should not set both idgen-addr and random-seed")
166 | }
167 | if *randomSeed != "" {
168 | seed, err := base64.StdEncoding.DecodeString(*randomSeed)
169 | if err != nil {
170 | log.Fatalln("error: Could not base64 decode seed")
171 | }
172 | if len(seed) != 32 {
173 | log.Fatalln("error: Seed should be 256bit in base64")
174 | }
175 | idGenerator = idgen.NewBalancedIdentityGeneratorFromSeed(seed, *idOffset)
176 | }
177 | if *idgenAddr != "" {
178 | dg := idgen.NewCleaningIDGenerator(idgen.NewDelegatedIDGenerator(*idgenAddr))
179 | defer func() {
180 | err := dg.Clean()
181 | if err != nil {
182 | fmt.Println(err)
183 | }
184 | }()
185 | idGenerator = dg
186 | }
187 |
188 | opts := hydra.Options{
189 | Name: *name,
190 | DatastorePath: *dbpath,
191 | PeerstorePath: *pstorePath,
192 | ProviderStore: *providerStore,
193 | DelegateTimeout: time.Millisecond * time.Duration(*delegateTimeout),
194 | EnableRelay: *enableRelay,
195 | ProtocolPrefix: protocol.ID(*protocolPrefix),
196 | BucketSize: *bucketSize,
197 | GetPort: utils.PortSelector(*portBegin),
198 | NHeads: *nheads,
199 | BsCon: *bootstrapConcurrency,
200 | Stagger: *stagger,
201 | IDGenerator: idGenerator,
202 | DisableProvGC: *disableProvGC,
203 | DisableProviders: *disableProviders,
204 | DisableValues: *disableValues,
205 | BootstrapPeers: mustConvertToMultiaddr(*bootstrapPeers),
206 | DisablePrefetch: *disablePrefetch,
207 | DisableProvCounts: *disableProvCounts,
208 | DisableDBCreate: *disableDBCreate,
209 | DisableResourceManager: *disableResourceManager,
210 | ResourceManagerLimitsFile: *resourceManagerLimits,
211 |
212 | ConnMgrHighWater: *connMgrHighWater,
213 | ConnMgrLowWater: *connMgrLowWater,
214 | ConnMgrGracePeriod: connMgrGracePeriodDuration,
215 | }
216 |
217 | go func() {
218 | err := metrics.ListenAndServe(*metricsAddr)
219 | if err != nil {
220 | log.Fatalln(err)
221 | }
222 | }()
223 | fmt.Fprintf(os.Stderr, "📊 Prometheus metrics and pprof server listening on http://%v\n", *metricsAddr)
224 |
225 | hy, err := hydra.NewHydra(ctx, opts)
226 | if err != nil {
227 | log.Fatalln(err)
228 | }
229 |
230 | var ui *hyui.UI
231 | if *uiTheme != "none" {
232 | var theme hyui.Theme
233 | if *uiTheme == "gooey" {
234 | theme = hyui.Gooey
235 | }
236 |
237 | ui, err = hyui.NewUI(theme, uiopts.Start(start), uiopts.MetricsURL(fmt.Sprintf("http://%v/metrics", *metricsAddr)))
238 | if err != nil {
239 | log.Fatalln(err)
240 | }
241 |
242 | go func() {
243 | err = ui.Render(ctx)
244 | if err != nil {
245 | log.Fatalln(err)
246 | }
247 | }()
248 | }
249 |
250 | go func() {
251 | err := httpapi.ListenAndServe(hy, *httpAPIAddr)
252 | if err != nil {
253 | log.Fatalln(err)
254 | }
255 | }()
256 | fmt.Fprintf(os.Stderr, "🧩 HTTP API listening on http://%s\n", *httpAPIAddr)
257 |
258 | termChan := make(chan os.Signal, 1)
259 | signal.Notify(termChan, os.Interrupt, syscall.SIGTERM)
260 | <-termChan // Blocks here until either SIGINT or SIGTERM is received.
261 | fmt.Println("Received interrupt signal, shutting down...")
262 | }
263 |
264 | func mustGetEnvInt(key string, def int) int {
265 | if os.Getenv(key) == "" {
266 | return def
267 | }
268 | val, err := strconv.Atoi(os.Getenv(key))
269 | if err != nil {
270 | log.Fatalln(fmt.Errorf("invalid %s env value: %w", key, err))
271 | }
272 | return val
273 | }
274 |
275 | func mustGetEnvBool(key string, def bool) bool {
276 | if os.Getenv(key) == "" {
277 | return def
278 | }
279 | val, err := strconv.ParseBool(os.Getenv(key))
280 | if err != nil {
281 | log.Fatalln(fmt.Errorf("invalid %s env value: %w", key, err))
282 | }
283 | return val
284 | }
285 |
286 | func mustConvertToMultiaddr(csv string) []multiaddr.Multiaddr {
287 | var peers []multiaddr.Multiaddr
288 | if csv != "" {
289 | addrs := strings.Split(csv, ",")
290 | for _, addr := range addrs {
291 | ma, err := multiaddr.NewMultiaddr(addr)
292 | if err != nil {
293 | log.Fatalln(fmt.Errorf("invalid multiaddr %s: %w", addr, err))
294 | }
295 | peers = append(peers, ma)
296 | }
297 | }
298 | return peers
299 | }
300 |
--------------------------------------------------------------------------------
/metrics/aws.go:
--------------------------------------------------------------------------------
1 | package metrics
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "strconv"
8 | "time"
9 |
10 | awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
11 | "github.com/aws/aws-sdk-go-v2/aws/retry"
12 | "github.com/aws/smithy-go"
13 | smithymiddle "github.com/aws/smithy-go/middleware"
14 | smithyhttp "github.com/aws/smithy-go/transport/http"
15 | "go.opencensus.io/stats"
16 | "go.opencensus.io/tag"
17 | )
18 |
19 | // AWS SDK middleware that records client side metrics of all AWS SDK requests.
20 | type AWSMetricsMiddleware struct{}
21 |
22 | func (m *AWSMetricsMiddleware) ID() string { return "AWSMetrics" }
23 | func (m *AWSMetricsMiddleware) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (smithymiddle.FinalizeOutput, smithymiddle.Metadata, error) {
24 | start := time.Now()
25 |
26 | out, md, err := next.HandleFinalize(ctx, in)
27 |
28 | service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx)
29 |
30 | httpCode := 0
31 | if httpResp, ok := awsmiddle.GetRawResponse(md).(*smithyhttp.Response); ok {
32 | httpCode = httpResp.StatusCode
33 | }
34 | errCode := "none"
35 | var apiErr smithy.APIError
36 | if errors.As(err, &apiErr) {
37 | errCode = apiErr.ErrorCode()
38 | }
39 | tags := []tag.Mutator{
40 | tag.Upsert(KeyOperation, fmt.Sprintf("%s.%s", service, operation)),
41 | tag.Upsert(KeyHTTPCode, strconv.Itoa(httpCode)),
42 | tag.Upsert(KeyErrorCode, errCode),
43 | }
44 |
45 | t, ok := awsmiddle.GetResponseAt(md)
46 | if ok {
47 | stats.RecordWithTags(ctx, tags, AWSRequestDurationMillis.M(float64(t.Sub(start).Milliseconds())))
48 | }
49 |
50 | attemptResults, ok := retry.GetAttemptResults(md)
51 | if ok {
52 | retries := int64(0)
53 | for _, result := range attemptResults.Results {
54 | if result.Retried {
55 | retries++
56 | }
57 | }
58 | stats.RecordWithTags(ctx, tags, AWSRequests.M(int64(len(attemptResults.Results))))
59 | stats.RecordWithTags(ctx, tags, AWSRequestRetries.M(retries))
60 | }
61 |
62 | return out, md, err
63 | }
64 |
65 | var _ smithymiddle.FinalizeMiddleware = (*AWSMetricsMiddleware)(nil)
66 |
67 | // Install the AWS metrics middleware onto an SDK stack.
68 | // Typically you use this on a config object like this:
69 | //
70 | // awsCfg.APIOptions = append(awsCfg.APIOptions, metrics.AddAWSSDKMiddleware)
71 | func AddAWSSDKMiddleware(stack *smithymiddle.Stack) error {
72 | m := &AWSMetricsMiddleware{}
73 | return stack.Finalize.Add(m, smithymiddle.Before)
74 | }
75 |
--------------------------------------------------------------------------------
/metrics/metrics.go:
--------------------------------------------------------------------------------
1 | package metrics
2 |
3 | import (
4 | "expvar"
5 | "fmt"
6 | "net/http"
7 | "net/http/pprof"
8 | "os"
9 |
10 | "contrib.go.opencensus.io/exporter/prometheus"
11 | "github.com/go-kit/log"
12 | "github.com/ipfs/go-libipfs/routing/http/client"
13 | "github.com/ncabatoff/process-exporter/collector"
14 | "github.com/ncabatoff/process-exporter/config"
15 | prom "github.com/prometheus/client_golang/prometheus"
16 | "github.com/prometheus/client_golang/prometheus/collectors"
17 | necoll "github.com/prometheus/node_exporter/collector"
18 | "go.opencensus.io/stats/view"
19 | "go.opencensus.io/tag"
20 | "go.opencensus.io/zpages"
21 | )
22 |
23 | // PrometheusNamespace is the unique prefix for metrics exported from the app
24 | var PrometheusNamespace = "hydrabooster"
25 |
26 | func buildProcCollector() (*collector.NamedProcessCollector, error) {
27 | rules := config.MatcherRules{{
28 | ExeRules: []string{os.Args[0]},
29 | }}
30 | config, err := rules.ToConfig()
31 | if err != nil {
32 | return nil, fmt.Errorf("building process collector config: %w", err)
33 | }
34 | proc1Collector, err := collector.NewProcessCollector(collector.ProcessCollectorOption{
35 | ProcFSPath: "/proc",
36 | Children: true,
37 | Threads: true,
38 | GatherSMaps: false,
39 | Namer: config.MatchNamers,
40 | })
41 | if err != nil {
42 | return nil, fmt.Errorf("creating process collector: %w", err)
43 | }
44 | return proc1Collector, nil
45 | }
46 |
47 | // ListenAndServe sets up an endpoint to collect process metrics (e.g. pprof).
48 | func ListenAndServe(address string) error {
49 | // setup Prometheus
50 | registry := prom.NewRegistry()
51 | goCollector := collectors.NewGoCollector()
52 | procCollector := collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})
53 |
54 | nodeCollector, err := necoll.NewNodeCollector(log.NewNopLogger())
55 | if err != nil {
56 | return err
57 | }
58 |
59 | proc1Collector, err := buildProcCollector()
60 | if err != nil {
61 | return err
62 | }
63 |
64 | registry.MustRegister(goCollector, procCollector, nodeCollector, proc1Collector)
65 | pe, err := prometheus.NewExporter(prometheus.Options{
66 | Namespace: PrometheusNamespace,
67 | Registry: registry,
68 | })
69 | if err != nil {
70 | return fmt.Errorf("failed to create exporter: %w", err)
71 | }
72 |
73 | view.RegisterExporter(pe)
74 |
75 | views := DefaultViews
76 | for _, view := range client.OpenCensusViews {
77 | // add name tag to each view so we can distinguish hydra instances
78 | view.TagKeys = append(view.TagKeys, tag.MustNewKey("name"))
79 | views = append(views, view)
80 | }
81 |
82 | if err := view.Register(views...); err != nil {
83 | return fmt.Errorf("failed to register hydra views: %w", err)
84 | }
85 |
86 | mux := http.NewServeMux()
87 | zpages.Handle(mux, "/debug")
88 | mux.Handle("/metrics", pe)
89 | mux.Handle("/debug/vars", expvar.Handler())
90 |
91 | mux.HandleFunc("/debug/pprof/", pprof.Index)
92 | mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
93 | mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
94 | mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
95 | mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
96 |
97 | return http.ListenAndServe(address, mux)
98 | }
99 |
--------------------------------------------------------------------------------
/metrics/rcmgr.go:
--------------------------------------------------------------------------------
1 | package metrics
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "strconv"
7 |
8 | "github.com/libp2p/go-libp2p/core/network"
9 | "github.com/libp2p/go-libp2p/core/peer"
10 | "github.com/libp2p/go-libp2p/core/protocol"
11 | rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
12 | "go.opencensus.io/stats"
13 | "go.opencensus.io/tag"
14 | )
15 |
16 | func CreateRcmgrMetrics(ctx context.Context) (rcmgr.MetricsReporter, error) {
17 | name, ok := tag.FromContext(ctx).Value(KeyName)
18 | if !ok {
19 | return nil, errors.New("context must contain a 'name' key")
20 | }
21 | return rcmgrMetrics{name: name}, nil
22 | }
23 |
24 | type rcmgrMetrics struct {
25 | name string
26 | }
27 |
28 | func getDirection(d network.Direction) string {
29 | switch d {
30 | default:
31 | return ""
32 | case network.DirInbound:
33 | return "inbound"
34 | case network.DirOutbound:
35 | return "outbound"
36 | }
37 | }
38 |
39 | func (r rcmgrMetrics) AllowConn(dir network.Direction, usefd bool) {
40 | stats.RecordWithTags(
41 | context.Background(),
42 | []tag.Mutator{
43 | tag.Upsert(KeyName, r.name),
44 | tag.Upsert(KeyDirection, getDirection(dir)),
45 | tag.Upsert(KeyUsesFD, strconv.FormatBool(usefd)),
46 | },
47 | RcmgrConnsAllowed.M(1),
48 | RcmgrConnsBlocked.M(0),
49 | )
50 | }
51 |
52 | func (r rcmgrMetrics) BlockConn(dir network.Direction, usefd bool) {
53 | stats.RecordWithTags(
54 | context.Background(),
55 | []tag.Mutator{
56 | tag.Upsert(KeyName, r.name),
57 | tag.Upsert(KeyDirection, getDirection(dir)),
58 | tag.Update(KeyUsesFD, strconv.FormatBool(usefd)),
59 | },
60 | RcmgrConnsAllowed.M(0),
61 | RcmgrConnsBlocked.M(1),
62 | )
63 | }
64 |
65 | func (r rcmgrMetrics) AllowStream(_ peer.ID, dir network.Direction) {
66 | stats.RecordWithTags(
67 | context.Background(),
68 | []tag.Mutator{
69 | tag.Upsert(KeyName, r.name),
70 | tag.Upsert(KeyDirection, getDirection(dir)),
71 | },
72 | RcmgrStreamsAllowed.M(1),
73 | RcmgrStreamsBlocked.M(0),
74 | )
75 | }
76 |
77 | func (r rcmgrMetrics) BlockStream(_ peer.ID, dir network.Direction) {
78 | stats.RecordWithTags(
79 | context.Background(),
80 | []tag.Mutator{
81 | tag.Upsert(KeyName, r.name),
82 | tag.Upsert(KeyDirection, getDirection(dir)),
83 | },
84 | RcmgrStreamsAllowed.M(0),
85 | RcmgrStreamsBlocked.M(1),
86 | )
87 | }
88 |
89 | func (r rcmgrMetrics) AllowPeer(_ peer.ID) {
90 | stats.RecordWithTags(
91 | context.Background(),
92 | []tag.Mutator{
93 | tag.Upsert(KeyName, r.name),
94 | },
95 | RcmgrPeersAllowed.M(1),
96 | RcmgrPeersBlocked.M(0),
97 | )
98 | }
99 |
100 | func (r rcmgrMetrics) BlockPeer(_ peer.ID) {
101 | stats.RecordWithTags(
102 | context.Background(),
103 | []tag.Mutator{
104 | tag.Upsert(KeyName, r.name),
105 | },
106 | RcmgrPeersAllowed.M(0),
107 | RcmgrPeersBlocked.M(1),
108 | )
109 | }
110 |
111 | func (r rcmgrMetrics) AllowProtocol(proto protocol.ID) {
112 | stats.RecordWithTags(
113 | context.Background(),
114 | []tag.Mutator{
115 | tag.Upsert(KeyName, r.name),
116 | tag.Upsert(KeyProtocol, string(proto)),
117 | },
118 | RcmgrProtocolsAllowed.M(1),
119 | RcmgrProtocolsBlocked.M(0),
120 | )
121 | }
122 |
123 | func (r rcmgrMetrics) BlockProtocol(proto protocol.ID) {
124 | stats.RecordWithTags(
125 | context.Background(),
126 | []tag.Mutator{
127 | tag.Upsert(KeyName, r.name),
128 | tag.Upsert(KeyProtocol, string(proto)),
129 | },
130 | RcmgrProtocolsAllowed.M(0),
131 | RcmgrProtocolsBlocked.M(1),
132 | )
133 | }
134 |
135 | func (r rcmgrMetrics) BlockProtocolPeer(proto protocol.ID, _ peer.ID) {
136 | stats.RecordWithTags(
137 | context.Background(),
138 | []tag.Mutator{
139 | tag.Upsert(KeyName, r.name),
140 | tag.Upsert(KeyProtocol, string(proto)),
141 | },
142 | RcmgrProtocolPeersBlocked.M(1),
143 | )
144 | }
145 |
146 | func (r rcmgrMetrics) AllowService(svc string) {
147 | stats.RecordWithTags(
148 | context.Background(),
149 | []tag.Mutator{
150 | tag.Upsert(KeyName, r.name),
151 | tag.Upsert(KeyService, svc),
152 | },
153 | RcmgrServiceAllowed.M(1),
154 | RcmgrServiceBlocked.M(0),
155 | )
156 | }
157 |
158 | func (r rcmgrMetrics) BlockService(svc string) {
159 | stats.RecordWithTags(
160 | context.Background(),
161 | []tag.Mutator{
162 | tag.Upsert(KeyName, r.name),
163 | tag.Upsert(KeyService, svc),
164 | },
165 | RcmgrServiceAllowed.M(0),
166 | RcmgrServiceBlocked.M(1),
167 | )
168 | }
169 |
170 | func (r rcmgrMetrics) BlockServicePeer(svc string, _ peer.ID) {
171 | stats.RecordWithTags(
172 | context.Background(),
173 | []tag.Mutator{
174 | tag.Upsert(KeyName, r.name),
175 | tag.Upsert(KeyService, svc),
176 | },
177 | RcmgrServicePeersBlocked.M(1),
178 | )
179 | }
180 |
181 | func (r rcmgrMetrics) AllowMemory(_ int) {
182 | stats.RecordWithTags(
183 | context.Background(),
184 | []tag.Mutator{
185 | tag.Upsert(KeyName, r.name),
186 | },
187 | RcmgrMemoryAllowed.M(1),
188 | RcmgrMemoryBlocked.M(0),
189 | )
190 | }
191 |
192 | func (r rcmgrMetrics) BlockMemory(_ int) {
193 | stats.RecordWithTags(
194 | context.Background(),
195 | []tag.Mutator{
196 | tag.Upsert(KeyName, r.name),
197 | },
198 | RcmgrMemoryAllowed.M(0),
199 | RcmgrMemoryBlocked.M(1),
200 | )
201 | }
202 |
--------------------------------------------------------------------------------
/metricstasks/metricstasks.go:
--------------------------------------------------------------------------------
1 | package metricstasks
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | ds "github.com/ipfs/go-datastore"
9 | "github.com/ipfs/go-datastore/query"
10 | "github.com/jackc/pgx/v4/pgxpool"
11 | "github.com/libp2p/go-libp2p-kad-dht/providers"
12 | hydrads "github.com/libp2p/hydra-booster/datastore"
13 |
14 | "github.com/libp2p/hydra-booster/metrics"
15 | "github.com/libp2p/hydra-booster/periodictasks"
16 | "go.opencensus.io/stats"
17 | )
18 |
19 | func countProviderRecordsExactly(ctx context.Context, datastore ds.Datastore) error {
20 | fmt.Println("counting provider records")
21 | prs, err := datastore.Query(ctx, query.Query{Prefix: "/providers", KeysOnly: true})
22 | if err != nil {
23 | return err
24 | }
25 | defer prs.Close()
26 |
27 | // TODO: make fast https://github.com/libp2p/go-libp2p-kad-dht/issues/487
28 | var provRecords int
29 | for {
30 | select {
31 | case r, ok := <-prs.Next():
32 | if !ok {
33 | stats.Record(ctx, metrics.ProviderRecords.M(int64(provRecords)))
34 | return nil
35 | }
36 | if r.Error == nil {
37 | provRecords++
38 | }
39 | case <-ctx.Done():
40 | return nil
41 | }
42 | }
43 | }
44 |
45 | func countProviderRecordsApproximately(ctx context.Context, pgxPool *pgxpool.Pool) error {
46 | var approxCountSql = `SELECT
47 | (reltuples/relpages) * (
48 | pg_relation_size($1) /
49 | (current_setting('block_size')::integer)
50 | )
51 | FROM pg_class where relname = $2;`
52 | fmt.Println("approximating provider records")
53 | row := pgxPool.QueryRow(ctx, approxCountSql, hydrads.TableName, hydrads.TableName)
54 | var numProvRecords float64
55 | err := row.Scan(&numProvRecords)
56 | if err != nil {
57 | return err
58 | }
59 | fmt.Printf("found %v provider records\n", int64(numProvRecords))
60 | stats.Record(ctx, metrics.ProviderRecords.M(int64(numProvRecords)))
61 | return nil
62 | }
63 |
64 | type providerRecordCounter interface {
65 | CountProviderRecords(ctx context.Context) (int64, error)
66 | }
67 |
68 | func recordFromProviderRecordCounter(ctx context.Context, c providerRecordCounter) error {
69 | n, err := c.CountProviderRecords(ctx)
70 | if err != nil {
71 | return err
72 | }
73 | stats.Record(ctx, metrics.ProviderRecords.M(n))
74 | return nil
75 | }
76 |
77 | func NewProviderRecordsTask(datastore ds.Datastore, providerstore providers.ProviderStore, d time.Duration) periodictasks.PeriodicTask {
78 | var task func(ctx context.Context) error
79 | if pgBackend, ok := datastore.(hydrads.WithPgxPool); ok {
80 | task = func(ctx context.Context) error { return countProviderRecordsApproximately(ctx, pgBackend.PgxPool()) }
81 | } else if counter, ok := providerstore.(providerRecordCounter); ok {
82 | task = func(ctx context.Context) error { return recordFromProviderRecordCounter(ctx, counter) }
83 | } else {
84 | task = func(ctx context.Context) error { return countProviderRecordsExactly(ctx, datastore) }
85 | }
86 | return periodictasks.PeriodicTask{
87 | Interval: d,
88 | Run: task,
89 | }
90 | }
91 |
92 | func NewRoutingTableSizeTask(getRoutingTableSize func() int, d time.Duration) periodictasks.PeriodicTask {
93 | return periodictasks.PeriodicTask{
94 | Interval: d,
95 | Run: func(ctx context.Context) error {
96 | stats.Record(ctx, metrics.RoutingTableSize.M(int64(getRoutingTableSize())))
97 | return nil
98 | },
99 | }
100 | }
101 |
102 | func NewUniquePeersTask(getUniquePeersCount func() uint64, d time.Duration) periodictasks.PeriodicTask {
103 | return periodictasks.PeriodicTask{
104 | Interval: d,
105 | Run: func(ctx context.Context) error {
106 | stats.Record(ctx, metrics.UniquePeers.M(int64(getUniquePeersCount())))
107 | return nil
108 | },
109 | }
110 | }
111 |
112 | type entryCounter interface {
113 | EntryCount(context.Context) (uint64, error)
114 | }
115 |
116 | func NewIPNSRecordsTask(c entryCounter, d time.Duration) periodictasks.PeriodicTask {
117 | return periodictasks.PeriodicTask{
118 | Interval: d,
119 | Run: func(ctx context.Context) error {
120 | count, err := c.EntryCount(ctx)
121 | if err != nil {
122 | return err
123 | }
124 | stats.Record(ctx, metrics.IPNSRecords.M(int64(count)))
125 | return nil
126 | },
127 | }
128 | }
129 |
--------------------------------------------------------------------------------
/metricstasks/metricstasks_test.go:
--------------------------------------------------------------------------------
1 | package metricstasks
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "math/rand"
7 | "testing"
8 | "time"
9 |
10 | "github.com/ipfs/go-datastore"
11 | "github.com/libp2p/hydra-booster/metrics"
12 | "go.opencensus.io/stats/view"
13 | )
14 |
15 | func TestNewProviderRecordsTask(t *testing.T) {
16 | ctx, cancel := context.WithCancel(context.Background())
17 | defer cancel()
18 |
19 | ds := datastore.NewMapDatastore()
20 | defer ds.Close()
21 |
22 | rand.Seed(time.Now().UTC().UnixNano())
23 | count := rand.Intn(100) + 1
24 |
25 | for i := 0; i < count; i++ {
26 | err := ds.Put(ctx, datastore.NewKey(fmt.Sprintf("/providers/%d", i)), []byte{})
27 | if err != nil {
28 | t.Fatal(err)
29 | }
30 | }
31 |
32 | pt := NewProviderRecordsTask(ds, nil, time.Second)
33 |
34 | if pt.Interval != time.Second {
35 | t.Fatal("invalid interval")
36 | }
37 |
38 | if err := view.Register(metrics.ProviderRecordsView); err != nil {
39 | t.Fatal(err)
40 | }
41 | defer view.Unregister(metrics.ProviderRecordsView)
42 |
43 | err := pt.Run(ctx)
44 | if err != nil {
45 | t.Fatal(err)
46 | }
47 |
48 | rows, err := view.RetrieveData(metrics.ProviderRecordsView.Name)
49 | if err != nil {
50 | t.Fatal(err)
51 | }
52 |
53 | if len(rows) == 0 {
54 | t.Fatal("no data was recorded")
55 | }
56 |
57 | data := rows[0].Data
58 | dis, ok := data.(*view.LastValueData)
59 | if !ok {
60 | t.Fatalf("want LastValueData, got %+v\n", data)
61 | }
62 |
63 | if int(dis.Value) != count {
64 | t.Fatal("incorrect value recorded")
65 | }
66 | }
67 |
68 | func TestNewRoutingTableSizeTask(t *testing.T) {
69 | ctx, cancel := context.WithCancel(context.Background())
70 | defer cancel()
71 |
72 | rt := NewRoutingTableSizeTask(func() int { return 1 }, time.Second)
73 |
74 | if rt.Interval != time.Second {
75 | t.Fatal("invalid interval")
76 | }
77 |
78 | if err := view.Register(metrics.RoutingTableSizeView); err != nil {
79 | t.Fatal(err)
80 | }
81 | defer view.Unregister(metrics.RoutingTableSizeView)
82 |
83 | err := rt.Run(ctx)
84 | if err != nil {
85 | t.Fatal(err)
86 | }
87 |
88 | rows, err := view.RetrieveData(metrics.RoutingTableSizeView.Name)
89 | if err != nil {
90 | t.Fatal(err)
91 | }
92 |
93 | if len(rows) == 0 {
94 | t.Fatal("no data was recorded")
95 | }
96 |
97 | data := rows[0].Data
98 | dis, ok := data.(*view.LastValueData)
99 | if !ok {
100 | t.Fatalf("want LastValueData, got %+v\n", data)
101 | }
102 |
103 | if int(dis.Value) != 1 {
104 | t.Fatal("incorrect value recorded")
105 | }
106 | }
107 |
108 | func TestNewUniquePeersTask(t *testing.T) {
109 | ctx, cancel := context.WithCancel(context.Background())
110 | defer cancel()
111 |
112 | rt := NewUniquePeersTask(func() uint64 { return 1 }, time.Second)
113 |
114 | if rt.Interval != time.Second {
115 | t.Fatal("invalid interval")
116 | }
117 |
118 | if err := view.Register(metrics.UniquePeersView); err != nil {
119 | t.Fatal(err)
120 | }
121 | defer view.Unregister(metrics.UniquePeersView)
122 |
123 | err := rt.Run(ctx)
124 | if err != nil {
125 | t.Fatal(err)
126 | }
127 |
128 | rows, err := view.RetrieveData(metrics.UniquePeersView.Name)
129 | if err != nil {
130 | t.Fatal(err)
131 | }
132 |
133 | if len(rows) == 0 {
134 | t.Fatal("no data was recorded")
135 | }
136 |
137 | data := rows[0].Data
138 | dis, ok := data.(*view.LastValueData)
139 | if !ok {
140 | t.Fatalf("want LastValueData, got %+v\n", data)
141 | }
142 |
143 | if int(dis.Value) != 1 {
144 | t.Fatal("incorrect value recorded")
145 | }
146 | }
147 |
--------------------------------------------------------------------------------
/periodictasks/runner.go:
--------------------------------------------------------------------------------
1 | package periodictasks
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 | )
8 |
9 | // PeriodicTask describes a task that should be run periodically
10 | type PeriodicTask struct {
11 | Interval time.Duration
12 | Run func(ctx context.Context) error
13 | }
14 |
15 | // RunTasks immeidately begins to periodically run the passed tasks.
16 | func RunTasks(ctx context.Context, tasks []PeriodicTask) {
17 | for _, task := range tasks {
18 | go func(t PeriodicTask) {
19 | timer := time.NewTimer(t.Interval)
20 | defer timer.Stop()
21 | for {
22 | select {
23 | case <-ctx.Done():
24 | return
25 | case <-timer.C:
26 | err := t.Run(ctx)
27 | if err != nil {
28 | fmt.Println(fmt.Errorf("failed to run periodic task: %w", err))
29 | }
30 | timer.Reset(t.Interval)
31 | }
32 | }
33 | }(task)
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/periodictasks/runner_test.go:
--------------------------------------------------------------------------------
1 | package periodictasks
2 |
3 | import (
4 | "context"
5 | "math/rand"
6 | "sync"
7 | "testing"
8 | "time"
9 | )
10 |
11 | func TestRunSingleTask(t *testing.T) {
12 | ctx, cancel := context.WithCancel(context.Background())
13 | defer cancel()
14 |
15 | n := rand.Intn(50) + 1
16 |
17 | wg := sync.WaitGroup{}
18 | wg.Add(1)
19 |
20 | ts := []PeriodicTask{
21 | {
22 | Interval: time.Millisecond,
23 | Run: func(ctx context.Context) error {
24 | n--
25 | if n == 0 {
26 | wg.Done()
27 | }
28 | return nil
29 | },
30 | },
31 | }
32 |
33 | RunTasks(ctx, ts)
34 | wg.Wait()
35 | }
36 |
37 | func TestRunMultiTask(t *testing.T) {
38 | ctx, cancel := context.WithCancel(context.Background())
39 | defer cancel()
40 |
41 | n0 := rand.Intn(50) + 1
42 | n1 := rand.Intn(50) + 1
43 |
44 | wg0 := sync.WaitGroup{}
45 | wg0.Add(1)
46 |
47 | wg1 := sync.WaitGroup{}
48 | wg1.Add(1)
49 |
50 | ts := []PeriodicTask{
51 | {
52 | Interval: time.Millisecond,
53 | Run: func(ctx context.Context) error {
54 | n0--
55 | if n0 == 0 {
56 | wg0.Done()
57 | }
58 | return nil
59 | },
60 | },
61 | {
62 | Interval: time.Millisecond + 1,
63 | Run: func(ctx context.Context) error {
64 | n1--
65 | if n1 == 0 {
66 | wg1.Done()
67 | }
68 | return nil
69 | },
70 | },
71 | }
72 |
73 | RunTasks(ctx, ts)
74 |
75 | wg0.Wait()
76 | wg1.Wait()
77 | }
78 |
--------------------------------------------------------------------------------
/promconfig.yaml:
--------------------------------------------------------------------------------
1 | scrape_configs:
2 | - job_name: 'hydrabooster'
3 |
4 | scrape_interval: 10s
5 |
6 | static_configs:
7 | - targets: ['localhost:8888']
8 |
--------------------------------------------------------------------------------
/providers/caching.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "context"
5 |
6 | logging "github.com/ipfs/go-log"
7 | "github.com/libp2p/go-libp2p-kad-dht/providers"
8 | "github.com/libp2p/go-libp2p/core/peer"
9 | "github.com/libp2p/hydra-booster/metrics"
10 | "go.opencensus.io/stats"
11 | )
12 |
13 | // CachingProviderStore checks the ReadProviderStore for providers. If no providers are returned,
14 | // then the Finder is used to find providers, which are then added to the WriteProviderStore.
15 | type CachingProviderStore struct {
16 | ReadProviderStore providers.ProviderStore
17 | WriteProviderStore providers.ProviderStore
18 | Finder ProvidersFinder
19 | Router ReadContentRouting
20 | log logging.EventLogger
21 | }
22 |
23 | func NewCachingProviderStore(getDelegate providers.ProviderStore, addDelegate providers.ProviderStore, finder ProvidersFinder, router ReadContentRouting) *CachingProviderStore {
24 | return &CachingProviderStore{
25 | ReadProviderStore: getDelegate,
26 | WriteProviderStore: addDelegate,
27 | Finder: finder,
28 | Router: router,
29 | log: logging.Logger("hydra/providers"),
30 | }
31 | }
32 |
33 | func (s *CachingProviderStore) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error {
34 | return s.WriteProviderStore.AddProvider(ctx, key, prov)
35 | }
36 |
37 | // GetProviders gets providers for the given key from the providerstore.
38 | // If the providerstore does not have providers for the key, then the ProvidersFinder is queried and the results are cached.
39 | func (d *CachingProviderStore) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) {
40 | addrInfos, err := d.ReadProviderStore.GetProviders(ctx, key)
41 | if err != nil {
42 | return addrInfos, err
43 | }
44 |
45 | if len(addrInfos) > 0 {
46 | return addrInfos, nil
47 | }
48 |
49 | return nil, d.Finder.Find(ctx, d.Router, key, func(ai peer.AddrInfo) {
50 | err := d.WriteProviderStore.AddProvider(ctx, key, ai)
51 | if err != nil {
52 | d.log.Errorf("failed to add provider to providerstore: %s", err)
53 | stats.Record(ctx, metrics.PrefetchFailedToCache.M(1))
54 | }
55 | })
56 | }
57 |
--------------------------------------------------------------------------------
/providers/caching_test.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "testing"
7 | "time"
8 |
9 | "github.com/libp2p/go-libp2p/core/peer"
10 | "github.com/stretchr/testify/assert"
11 | )
12 |
13 | type mockProviderStore struct {
14 | providers map[string][]peer.AddrInfo
15 | err error
16 | }
17 |
18 | func (m *mockProviderStore) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error {
19 | if m.err != nil {
20 | return m.err
21 | }
22 | if m.providers == nil {
23 | m.providers = map[string][]peer.AddrInfo{}
24 | }
25 | m.providers[string(key)] = append(m.providers[string(key)], prov)
26 | return nil
27 | }
28 |
29 | func (m *mockProviderStore) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) {
30 | if m.err != nil {
31 | return nil, m.err
32 | }
33 | return m.providers[string(key)], nil
34 | }
35 |
36 | type mockFinder struct {
37 | providers map[string][]peer.AddrInfo
38 | }
39 |
40 | func (m *mockFinder) Find(ctx context.Context, router ReadContentRouting, key []byte, onProvider onProviderFunc) error {
41 | for _, ai := range m.providers[string(key)] {
42 | onProvider(ai)
43 | }
44 | return nil
45 | }
46 |
47 | func TestCachingProviderStore_GetProviders(t *testing.T) {
48 | cases := []struct {
49 | name string
50 | mh string
51 |
52 | delegateErr error
53 | readProviders map[string][]peer.AddrInfo
54 | routerProviders map[string][]peer.AddrInfo
55 | finderProviders map[string][]peer.AddrInfo
56 |
57 | expProviders []peer.AddrInfo
58 | expWriteProviders map[string][]peer.AddrInfo
59 | expErr error
60 | }{
61 | {
62 | name: "returns providers when delegate has them",
63 | mh: "mh1",
64 | readProviders: map[string][]peer.AddrInfo{
65 | "mh1": {peer.AddrInfo{ID: peer.ID([]byte("peer1"))}},
66 | },
67 | expProviders: []peer.AddrInfo{
68 | {ID: peer.ID([]byte("peer1"))},
69 | },
70 | },
71 | {
72 | name: "finds and caches providers when delegate doesn't have them",
73 | mh: "mh1",
74 | readProviders: map[string][]peer.AddrInfo{},
75 | finderProviders: map[string][]peer.AddrInfo{
76 | "mh1": {peer.AddrInfo{ID: peer.ID([]byte("peer1"))}},
77 | },
78 | expWriteProviders: map[string][]peer.AddrInfo{
79 | "mh1": {peer.AddrInfo{ID: peer.ID([]byte("peer1"))}},
80 | },
81 | },
82 | {
83 | name: "returns error on delegate error",
84 | delegateErr: errors.New("boom"),
85 | expErr: errors.New("boom"),
86 | },
87 | }
88 |
89 | for _, c := range cases {
90 | t.Run(c.name, func(t *testing.T) {
91 | ctx, stop := context.WithTimeout(context.Background(), 2*time.Second)
92 | defer stop()
93 | writePS := &mockProviderStore{
94 | err: c.delegateErr,
95 | }
96 | readPS := &mockProviderStore{
97 | providers: c.readProviders,
98 | err: c.delegateErr,
99 | }
100 | finder := &mockFinder{
101 | providers: c.finderProviders,
102 | }
103 |
104 | ps := NewCachingProviderStore(readPS, writePS, finder, nil)
105 |
106 | provs, err := ps.GetProviders(ctx, []byte(c.mh))
107 | assert.Equal(t, c.expErr, err)
108 | assert.Equal(t, c.expProviders, provs)
109 | assert.Equal(t, c.expWriteProviders, writePS.providers)
110 | })
111 | }
112 | }
113 |
--------------------------------------------------------------------------------
/providers/combined.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/hashicorp/go-multierror"
7 | logging "github.com/ipfs/go-log"
8 | "github.com/libp2p/go-libp2p-kad-dht/providers"
9 | "github.com/libp2p/go-libp2p/core/peer"
10 | "github.com/multiformats/go-multiaddr"
11 | )
12 |
13 | var log = logging.Logger("hydra/providers")
14 |
15 | func CombineProviders(backend ...providers.ProviderStore) providers.ProviderStore {
16 | return &CombinedProviderStore{backends: backend}
17 | }
18 |
19 | type CombinedProviderStore struct {
20 | backends []providers.ProviderStore
21 | }
22 |
23 | func (s *CombinedProviderStore) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error {
24 | ch := make(chan error, len(s.backends))
25 | for _, b := range s.backends {
26 | go func(backend providers.ProviderStore) {
27 | ch <- backend.AddProvider(ctx, key, prov)
28 | }(b)
29 | }
30 | var errs *multierror.Error
31 | for range s.backends {
32 | if e := <-ch; e != nil {
33 | multierror.Append(errs, e)
34 | }
35 | }
36 | if len(errs.WrappedErrors()) > 0 {
37 | log.Errorf("some providers returned errors (%v)", errs)
38 | }
39 | if len(errs.WrappedErrors()) == len(s.backends) {
40 | return errs
41 | } else {
42 | return nil
43 | }
44 | }
45 |
46 | type findProvidersAsyncResult struct {
47 | AddrInfo []peer.AddrInfo
48 | Err error
49 | }
50 |
51 | func (s *CombinedProviderStore) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) {
52 | ch := make(chan findProvidersAsyncResult, len(s.backends))
53 | for _, b := range s.backends {
54 | go func(backend providers.ProviderStore) {
55 | infos, err := backend.GetProviders(ctx, key)
56 | ch <- findProvidersAsyncResult{AddrInfo: infos, Err: err}
57 | }(b)
58 | }
59 | infos := []peer.AddrInfo{}
60 | var errs *multierror.Error
61 | for range s.backends {
62 | r := <-ch
63 | if r.Err == nil {
64 | infos = append(infos, r.AddrInfo...)
65 | } else {
66 | multierror.Append(errs, r.Err)
67 | }
68 | }
69 | infos = mergeAddrInfos(infos)
70 | if len(errs.WrappedErrors()) > 0 {
71 | log.Errorf("some providers returned errors (%v)", errs)
72 | }
73 | if len(errs.WrappedErrors()) == len(s.backends) {
74 | return infos, errs
75 | } else {
76 | return infos, nil
77 | }
78 | }
79 |
80 | func mergeAddrInfos(infos []peer.AddrInfo) []peer.AddrInfo {
81 | m := map[peer.ID][]multiaddr.Multiaddr{}
82 | for _, info := range infos {
83 | m[info.ID] = mergeMultiaddrs(append(m[info.ID], info.Addrs...))
84 | }
85 | var r []peer.AddrInfo
86 | for k, v := range m {
87 | if k.Validate() == nil {
88 | r = append(r, peer.AddrInfo{ID: k, Addrs: v})
89 | }
90 | }
91 | return r
92 | }
93 |
94 | func mergeMultiaddrs(addrs []multiaddr.Multiaddr) []multiaddr.Multiaddr {
95 | m := map[string]multiaddr.Multiaddr{}
96 | for _, addr := range addrs {
97 | m[addr.String()] = addr
98 | }
99 | var r []multiaddr.Multiaddr
100 | for _, v := range m {
101 | r = append(r, v)
102 | }
103 | return r
104 | }
105 |
--------------------------------------------------------------------------------
/providers/ddb.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "reflect"
8 | "strconv"
9 | "time"
10 |
11 | "github.com/aws/aws-sdk-go-v2/aws"
12 | "github.com/aws/aws-sdk-go-v2/service/dynamodb"
13 | "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
14 | "github.com/benbjohnson/clock"
15 | "github.com/libp2p/go-libp2p/core/peer"
16 | "github.com/libp2p/go-libp2p/core/peerstore"
17 | "github.com/libp2p/hydra-booster/metrics"
18 | "github.com/multiformats/go-multiaddr"
19 | "go.opencensus.io/stats"
20 | )
21 |
22 | type ddbClient interface {
23 | dynamodb.QueryAPIClient
24 | dynamodb.DescribeTableAPIClient
25 | PutItem(ctx context.Context, params *dynamodb.PutItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.PutItemOutput, error)
26 | }
27 |
28 | type peerStore interface {
29 | PeerInfo(peer.ID) peer.AddrInfo
30 | AddAddrs(p peer.ID, addrs []multiaddr.Multiaddr, ttl time.Duration)
31 | }
32 |
33 | type dynamoDBProviderStore struct {
34 | Self peer.ID
35 | Peerstore peerStore
36 | DDBClient ddbClient
37 | TableName string
38 | TTL time.Duration
39 | QueryLimit int32
40 | clock clock.Clock
41 | }
42 |
43 | func NewDynamoDBProviderStore(self peer.ID, peerstore peerStore, ddbClient ddbClient, tableName string, ttl time.Duration, queryLimit int32) *dynamoDBProviderStore {
44 | return &dynamoDBProviderStore{
45 | Self: "peer",
46 | Peerstore: peerstore,
47 | DDBClient: ddbClient,
48 | TableName: tableName,
49 | TTL: ttl,
50 | QueryLimit: queryLimit,
51 | clock: clock.New(),
52 | }
53 | }
54 |
55 | func (d *dynamoDBProviderStore) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error {
56 | if prov.ID != d.Self { // don't add own addrs.
57 | d.Peerstore.AddAddrs(prov.ID, prov.Addrs, peerstore.AddressTTL)
58 | }
59 |
60 | ttlEpoch := d.clock.Now().Add(d.TTL).UnixNano() / 1e9
61 | ttlEpochStr := strconv.FormatInt(ttlEpoch, 10)
62 | _, err := d.DDBClient.PutItem(ctx, &dynamodb.PutItemInput{
63 | TableName: &d.TableName,
64 | Item: map[string]types.AttributeValue{
65 | "key": &types.AttributeValueMemberB{Value: key},
66 | "prov": &types.AttributeValueMemberB{Value: []byte(prov.ID)},
67 | "ttl": &types.AttributeValueMemberN{Value: ttlEpochStr},
68 | },
69 | ConditionExpression: aws.String("attribute_not_exists(#k) AND attribute_not_exists(#t)"),
70 | ExpressionAttributeNames: map[string]string{
71 | "#k": "key",
72 | "#t": "ttl",
73 | },
74 | })
75 | var ccfe *types.ConditionalCheckFailedException
76 | if errors.As(err, &ccfe) {
77 | // the item already exists which means we tried to write >1 providers for a CID at the exact same millisecond
78 | // nothing to do, move on
79 | // (there is a metric recorded for this, since all error codes are recorded)
80 | return nil
81 | }
82 | return err
83 | }
84 |
85 | func (d *dynamoDBProviderStore) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) {
86 | providersLeft := d.QueryLimit
87 |
88 | // dedupe the providers and preserve order
89 | providersSet := map[string]bool{}
90 | providers := []peer.AddrInfo{}
91 |
92 | var startKey map[string]types.AttributeValue
93 | for {
94 | res, err := d.DDBClient.Query(ctx, &dynamodb.QueryInput{
95 | TableName: &d.TableName,
96 | KeyConditionExpression: aws.String("#k = :key"),
97 | ExpressionAttributeValues: map[string]types.AttributeValue{
98 | ":key": &types.AttributeValueMemberB{Value: key},
99 | },
100 | ExpressionAttributeNames: map[string]string{
101 | "#k": "key",
102 | },
103 | ScanIndexForward: aws.Bool(false), // return most recent entries first
104 | ExclusiveStartKey: startKey,
105 | Limit: &providersLeft,
106 | })
107 | if err != nil {
108 | return nil, err
109 | }
110 | for _, item := range res.Items {
111 | prov, ok := item["prov"]
112 | if !ok {
113 | return nil, errors.New("unexpected item without a 'prov' attribute")
114 | }
115 | provB, ok := prov.(*types.AttributeValueMemberB)
116 | if !ok {
117 |
118 | return nil, fmt.Errorf("unexpected value type of '%s' for 'prov' attribute", reflect.TypeOf(prov))
119 | }
120 | provStr := string(provB.Value)
121 | peerID := peer.ID(string(provB.Value))
122 | addrInfo := d.Peerstore.PeerInfo(peerID)
123 |
124 | if _, ok := providersSet[provStr]; !ok {
125 | providersSet[provStr] = true
126 | providers = append(providers, addrInfo)
127 | }
128 | }
129 |
130 | numItems := int32(len(res.Items))
131 | if numItems >= providersLeft || len(res.LastEvaluatedKey) == 0 {
132 | break
133 | }
134 |
135 | providersLeft -= numItems
136 | }
137 |
138 | stats.Record(ctx, stats.Measurement(metrics.ProviderRecordsPerKey.M(int64(len(providers)))))
139 |
140 | if len(providers) > 0 {
141 | recordPrefetches(ctx, "local")
142 | }
143 |
144 | return providers, nil
145 | }
146 |
147 | // CountProviderRecords returns the approximate number of records in the table. This shouldn't be called more often than once every few seconds, as
148 | // DynamoDB may start throttling the requests.
149 | func (d *dynamoDBProviderStore) CountProviderRecords(ctx context.Context) (int64, error) {
150 | res, err := d.DDBClient.DescribeTable(ctx, &dynamodb.DescribeTableInput{
151 | TableName: &d.TableName,
152 | })
153 | if err != nil {
154 | return 0, err
155 | }
156 | return res.Table.ItemCount, nil
157 | }
158 |
--------------------------------------------------------------------------------
/providers/ddb_test.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "fmt"
7 | "os/exec"
8 | "strconv"
9 | "strings"
10 | "testing"
11 | "time"
12 |
13 | "github.com/aws/aws-sdk-go-v2/aws"
14 | "github.com/aws/aws-sdk-go-v2/config"
15 | "github.com/aws/aws-sdk-go-v2/credentials"
16 | "github.com/aws/aws-sdk-go-v2/service/dynamodb"
17 | "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
18 | "github.com/benbjohnson/clock"
19 | "github.com/libp2p/go-libp2p/core/peer"
20 | "github.com/multiformats/go-multiaddr"
21 | "github.com/stretchr/testify/assert"
22 | "github.com/stretchr/testify/mock"
23 | )
24 |
25 | var tableName = "testtable"
26 |
27 | func startDDBLocal(ctx context.Context, ddbClient *dynamodb.Client) (func(), error) {
28 | cmd := exec.Command("docker", "run", "-d", "-p", "8000:8000", "amazon/dynamodb-local", "-jar", "DynamoDBLocal.jar", "-inMemory")
29 | buf := &bytes.Buffer{}
30 | cmd.Stdout = buf
31 | cmd.Stderr = buf
32 | err := cmd.Run()
33 | if err != nil {
34 | return nil, fmt.Errorf("error running DynamoDB Local (%s), output:\n%s", err.Error(), buf)
35 | }
36 |
37 | ctrID := strings.TrimSpace(buf.String())
38 |
39 | cleanupFunc := func() {
40 | cmd := exec.Command("docker", "kill", ctrID)
41 | if err := cmd.Run(); err != nil {
42 | fmt.Printf("error killing %s: %s\n", ctrID, err)
43 | }
44 | }
45 |
46 | // wait for DynamoDB to respond
47 | for {
48 | select {
49 | case <-ctx.Done():
50 | cleanupFunc()
51 | return nil, ctx.Err()
52 | default:
53 | }
54 |
55 | _, err := ddbClient.ListTables(ctx, &dynamodb.ListTablesInput{})
56 | if err == nil {
57 | break
58 | }
59 | }
60 |
61 | return cleanupFunc, err
62 | }
63 |
64 | func newDDBClient() *dynamodb.Client {
65 | resolver := dynamodb.EndpointResolverFunc(func(region string, options dynamodb.EndpointResolverOptions) (aws.Endpoint, error) {
66 | return aws.Endpoint{
67 | PartitionID: "aws",
68 | URL: "http://localhost:8000",
69 | SigningRegion: region,
70 | }, nil
71 | })
72 | cfg, err := config.LoadDefaultConfig(context.Background(),
73 | config.WithRegion("us-east-1"),
74 | config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("a", "a", "a")),
75 | )
76 | if err != nil {
77 | panic(err)
78 | }
79 | return dynamodb.NewFromConfig(cfg, dynamodb.WithEndpointResolver(resolver))
80 | }
81 |
82 | func setupTables(ddbClient *dynamodb.Client) error {
83 | _, err := ddbClient.CreateTable(context.Background(), &dynamodb.CreateTableInput{
84 | AttributeDefinitions: []types.AttributeDefinition{
85 | {
86 | AttributeName: aws.String("key"),
87 | AttributeType: types.ScalarAttributeTypeB,
88 | },
89 | {
90 | AttributeName: aws.String("ttl"),
91 | AttributeType: types.ScalarAttributeTypeN,
92 | },
93 | },
94 | KeySchema: []types.KeySchemaElement{
95 | {
96 | AttributeName: aws.String("key"),
97 | KeyType: types.KeyTypeHash,
98 | },
99 | {
100 | AttributeName: aws.String("ttl"),
101 | KeyType: types.KeyTypeRange,
102 | },
103 | },
104 | TableName: &tableName,
105 | BillingMode: types.BillingModeProvisioned,
106 | ProvisionedThroughput: &types.ProvisionedThroughput{
107 | ReadCapacityUnits: aws.Int64(1000),
108 | WriteCapacityUnits: aws.Int64(1000),
109 | },
110 | })
111 | if err != nil {
112 | return err
113 | }
114 | _, err = ddbClient.UpdateTimeToLive(context.Background(), &dynamodb.UpdateTimeToLiveInput{
115 | TableName: &tableName,
116 | TimeToLiveSpecification: &types.TimeToLiveSpecification{
117 | AttributeName: aws.String("ttl"),
118 | Enabled: aws.Bool(true),
119 | },
120 | })
121 | return err
122 | }
123 |
124 | type mockPeerStore struct {
125 | addrs map[string][]multiaddr.Multiaddr
126 | }
127 |
128 | func (m *mockPeerStore) PeerInfo(peerID peer.ID) peer.AddrInfo {
129 | return peer.AddrInfo{
130 | ID: peerID,
131 | Addrs: m.addrs[string(peerID)],
132 | }
133 | }
134 | func (m *mockPeerStore) AddAddrs(p peer.ID, addrs []multiaddr.Multiaddr, ttl time.Duration) {
135 | m.addrs[string(p)] = append(m.addrs[string(p)], addrs...)
136 | }
137 |
138 | func TestProviderStore_ddb_local(t *testing.T) {
139 | ctx, stop := context.WithTimeout(context.Background(), 300*time.Second)
140 | defer stop()
141 |
142 | ddb := newDDBClient()
143 | stopDDBLocal, err := startDDBLocal(ctx, ddb)
144 | assert.NoError(t, err)
145 | t.Cleanup(stopDDBLocal)
146 |
147 | err = setupTables(ddb)
148 | assert.NoError(t, err)
149 |
150 | mockClock := clock.NewMock()
151 | peerStore := &mockPeerStore{addrs: map[string][]multiaddr.Multiaddr{}}
152 | provStore := &dynamoDBProviderStore{
153 | Self: "peer",
154 | Peerstore: peerStore,
155 | DDBClient: ddb,
156 | TableName: tableName,
157 | TTL: 100 * time.Second,
158 | QueryLimit: 10,
159 | clock: mockClock,
160 | }
161 |
162 | key := []byte("foo")
163 | ma, err := multiaddr.NewMultiaddr("/ip4/1.1.1.1")
164 | assert.NoError(t, err)
165 |
166 | // add more providers than the query limit to ensure the limit is enforced
167 | numProvs := int(provStore.QueryLimit * 2)
168 | for i := 0; i < numProvs; i++ {
169 | peerID := i
170 | prov := peer.AddrInfo{
171 | ID: peer.ID(strconv.Itoa(peerID)),
172 | Addrs: []multiaddr.Multiaddr{ma},
173 | }
174 | err = provStore.AddProvider(ctx, key, prov)
175 | if err != nil {
176 | t.Fatal(err)
177 | }
178 | mockClock.Add(1 * time.Second)
179 | }
180 |
181 | provs, err := provStore.GetProviders(ctx, key)
182 | assert.NoError(t, err)
183 | assert.EqualValues(t, provStore.QueryLimit, len(provs))
184 |
185 | for i, prov := range provs {
186 | // peer ids should be decreasing since results should be sorted by most-recently-added-first
187 | assert.NoError(t, err)
188 | expID := strconv.Itoa(numProvs - i - 1)
189 | assert.Equal(t, expID, string(prov.ID))
190 |
191 | assert.Len(t, prov.Addrs, 1)
192 | assert.True(t, ma.Equal(prov.Addrs[0]))
193 | }
194 |
195 | n, err := provStore.CountProviderRecords(ctx)
196 | assert.NoError(t, err)
197 | assert.EqualValues(t, numProvs, n)
198 | }
199 |
200 | type mockDDB struct{ mock.Mock }
201 |
202 | // note that variadic args don't work on these mocks but we don't use them anyway
203 | func (m *mockDDB) Query(ctx context.Context, params *dynamodb.QueryInput, optFns ...func(*dynamodb.Options)) (*dynamodb.QueryOutput, error) {
204 | args := m.Called(ctx, params, optFns)
205 | return args.Get(0).(*dynamodb.QueryOutput), args.Error(1)
206 | }
207 |
208 | func (m *mockDDB) DescribeTable(ctx context.Context, params *dynamodb.DescribeTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DescribeTableOutput, error) {
209 | args := m.Called(ctx, params, optFns)
210 | return args.Get(0).(*dynamodb.DescribeTableOutput), args.Error(1)
211 | }
212 |
213 | func (m *mockDDB) PutItem(ctx context.Context, params *dynamodb.PutItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.PutItemOutput, error) {
214 | args := m.Called(ctx, params, optFns)
215 | return args.Get(0).(*dynamodb.PutItemOutput), args.Error(1)
216 | }
217 |
218 | func TestProviderStore_pagination(t *testing.T) {
219 | // we can't use DynamoDB Local for this
220 | // because we need to make the response page size much smaller to exercise pagination
221 | ctx, stop := context.WithTimeout(context.Background(), 1*time.Second)
222 | defer stop()
223 | ddbClient := &mockDDB{}
224 | mockClock := clock.NewMock()
225 | peerStore := &mockPeerStore{addrs: map[string][]multiaddr.Multiaddr{}}
226 | provStore := &dynamoDBProviderStore{
227 | Self: "peer",
228 | Peerstore: peerStore,
229 | DDBClient: ddbClient,
230 | TableName: tableName,
231 | TTL: 100 * time.Second,
232 | QueryLimit: 10,
233 | // set page limit low so we exercise pagination
234 | clock: mockClock,
235 | }
236 |
237 | // return 2 pages to exercise pagination logic
238 |
239 | ddbClient.
240 | On("Query", ctx, mock.Anything, mock.Anything).
241 | Return(&dynamodb.QueryOutput{
242 | Items: []map[string]types.AttributeValue{
243 | {"prov": &types.AttributeValueMemberB{Value: []byte("1")}},
244 | },
245 | LastEvaluatedKey: map[string]types.AttributeValue{
246 | "prov": &types.AttributeValueMemberB{Value: []byte("1")},
247 | },
248 | }, nil).
249 | Times(1)
250 | ddbClient.
251 | On("Query", ctx, mock.Anything, mock.Anything).
252 | Return(&dynamodb.QueryOutput{
253 | Items: []map[string]types.AttributeValue{
254 | {"prov": &types.AttributeValueMemberB{Value: []byte("2")}},
255 | },
256 | }, nil).
257 | Times(1)
258 |
259 | provs, err := provStore.GetProviders(ctx, []byte("key"))
260 | assert.NoError(t, err)
261 | assert.EqualValues(t, 2, len(provs))
262 | assert.EqualValues(t, "1", provs[0].ID)
263 | assert.EqualValues(t, "2", provs[1].ID)
264 | }
265 |
266 | func TestProviderStore_pagination_no_results(t *testing.T) {
267 | // we can't use DynamoDB Local for this
268 | // because we need to make the response page size much smaller to exercise pagination
269 | ctx, stop := context.WithTimeout(context.Background(), 1*time.Second)
270 | defer stop()
271 | ddbClient := &mockDDB{}
272 | mockClock := clock.NewMock()
273 | peerStore := &mockPeerStore{addrs: map[string][]multiaddr.Multiaddr{}}
274 | provStore := &dynamoDBProviderStore{
275 | Self: "peer",
276 | Peerstore: peerStore,
277 | DDBClient: ddbClient,
278 | TableName: tableName,
279 | TTL: 100 * time.Second,
280 | QueryLimit: 10,
281 | // set page limit low so we exercise pagination
282 | clock: mockClock,
283 | }
284 |
285 | // return 2 pages to exercise pagination logic
286 |
287 | ddbClient.
288 | On("Query", ctx, mock.Anything, mock.Anything).
289 | Return(&dynamodb.QueryOutput{}, nil).
290 | Times(1)
291 |
292 | provs, err := provStore.GetProviders(ctx, []byte("key"))
293 | assert.NoError(t, err)
294 | assert.EqualValues(t, 0, len(provs))
295 | }
296 |
--------------------------------------------------------------------------------
/providers/finder.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "context"
5 | "sync"
6 | "time"
7 |
8 | "github.com/benbjohnson/clock"
9 | "github.com/ipfs/go-cid"
10 | logging "github.com/ipfs/go-log"
11 | "github.com/libp2p/go-libp2p/core/peer"
12 | "github.com/libp2p/hydra-booster/metrics"
13 | "github.com/multiformats/go-multicodec"
14 | "github.com/multiformats/go-multihash"
15 | "github.com/whyrusleeping/timecache"
16 | "go.opencensus.io/stats"
17 | "go.opencensus.io/tag"
18 | )
19 |
20 | const (
21 | metricsPublishingInterval = 10 * time.Second
22 | )
23 |
24 | // ProvidersFinder finds providers for the given key using the given content router, passing each to the callback.
25 | type ProvidersFinder interface {
26 | Find(ctx context.Context, router ReadContentRouting, key []byte, onProvider onProviderFunc) error
27 | }
28 |
29 | func NewAsyncProvidersFinder(timeout time.Duration, queueSize int, negativeCacheTTL time.Duration) *asyncProvidersFinder {
30 | clock := clock.New()
31 | return &asyncProvidersFinder{
32 | log: logging.Logger("hydra/prefetch"),
33 | clock: clock,
34 | metricsTicker: clock.Ticker(metricsPublishingInterval),
35 | workQueueSize: queueSize,
36 | workQueue: make(chan findRequest, queueSize),
37 | pending: map[string]bool{},
38 | timeout: timeout,
39 | negativeCacheTTL: negativeCacheTTL,
40 | negativeCache: &idempotentTimeCache{cache: timecache.NewTimeCache(negativeCacheTTL)},
41 | onReqDone: func(r findRequest) {},
42 | onMetricsPublished: func() {},
43 | }
44 |
45 | }
46 |
47 | type ReadContentRouting interface {
48 | FindProvidersAsync(ctx context.Context, cid cid.Cid, numResults int) <-chan peer.AddrInfo
49 | }
50 |
51 | type onProviderFunc func(peer.AddrInfo)
52 |
53 | type findRequest struct {
54 | ctx context.Context
55 | router ReadContentRouting
56 | key []byte
57 | onProvider onProviderFunc
58 | }
59 |
60 | // asyncProvidersFinder finds providers asynchronously using a bounded work queue and a bounded number of workers.
61 | type asyncProvidersFinder struct {
62 | log logging.EventLogger
63 | clock clock.Clock
64 | metricsTicker *clock.Ticker
65 | workQueueSize int
66 | workQueue chan findRequest
67 | pendingMut sync.RWMutex
68 | pending map[string]bool
69 | timeout time.Duration
70 | negativeCacheTTL time.Duration
71 | negativeCache *idempotentTimeCache
72 | ctx context.Context
73 |
74 | // callbacks used for testing
75 | onReqDone func(r findRequest)
76 | onMetricsPublished func()
77 | }
78 |
79 | // Find finds the providers for a given key using the passed content router asynchronously.
80 | // It schedules work and returns immediately, invoking the callback concurrently as results are found.
81 | // If the work queue is full, this does not block--it drops the request on the floor and immediately returns.
82 | func (a *asyncProvidersFinder) Find(ctx context.Context, router ReadContentRouting, key []byte, onProvider onProviderFunc) error {
83 | a.pendingMut.Lock()
84 | defer a.pendingMut.Unlock()
85 | ks := string(key)
86 | pending := a.pending[ks]
87 | if pending {
88 | return nil
89 | }
90 | if a.negativeCache.Has(ks) {
91 | recordPrefetches(ctx, "failed-cached")
92 | return nil
93 | }
94 | select {
95 | case a.workQueue <- findRequest{ctx: ctx, router: router, key: key, onProvider: onProvider}:
96 | a.pending[ks] = true
97 | return nil
98 | default:
99 | recordPrefetches(ctx, "discarded")
100 | return nil
101 | }
102 | }
103 |
104 | // Run runs a set of goroutine workers that process Find() calls asynchronously.
105 | // The workers shut down gracefully when the context is canceled.
106 | func (a *asyncProvidersFinder) Run(ctx context.Context, numWorkers int) {
107 | a.ctx = ctx
108 | for i := 0; i < numWorkers; i++ {
109 | go func() {
110 | for {
111 | select {
112 | case <-ctx.Done():
113 | return
114 | case req := <-a.workQueue:
115 | a.handleRequest(ctx, req)
116 | }
117 | }
118 | }()
119 | }
120 | // periodic metric publishing
121 | go func() {
122 | for {
123 | defer a.metricsTicker.Stop()
124 | select {
125 | case <-ctx.Done():
126 | return
127 | case <-a.metricsTicker.C:
128 | a.pendingMut.RLock()
129 | pending := len(a.pending)
130 | a.pendingMut.RUnlock()
131 |
132 | stats.Record(ctx, metrics.PrefetchesPending.M(int64(pending)))
133 | stats.Record(ctx, metrics.PrefetchNegativeCacheSize.M(int64(a.negativeCache.Len())))
134 | stats.Record(ctx, metrics.PrefetchNegativeCacheTTLSeconds.M(int64(a.negativeCacheTTL.Seconds())))
135 | stats.Record(ctx, metrics.PrefetchesPendingLimit.M(int64(a.workQueueSize)))
136 |
137 | a.onMetricsPublished()
138 | }
139 | }
140 | }()
141 | }
142 |
143 | func (a *asyncProvidersFinder) handleRequest(ctx context.Context, req findRequest) {
144 | defer func() {
145 | a.onReqDone(req)
146 | a.pendingMut.Lock()
147 | delete(a.pending, string(req.key))
148 | a.pendingMut.Unlock()
149 | }()
150 |
151 | // since this is async work, we don't want to use the deadline of the request's context
152 | ctx = tag.NewContext(ctx, tag.FromContext(req.ctx))
153 |
154 | mh := multihash.Multihash(req.key)
155 | // hack: we're using a raw encoding here so that we can construct a CIDv1 to make the type system happy
156 | // the DHT doesn't actually care about the CID, it cares about the multihash
157 | // ideally FindProvidersAsync would take in a multihash, not a CID
158 | cid := cid.NewCidV1(uint64(multicodec.Raw), mh)
159 | ctx, stop := context.WithTimeout(ctx, a.timeout)
160 | defer stop()
161 | foundProviders := false
162 | startTime := a.clock.Now()
163 | for addrInfo := range req.router.FindProvidersAsync(ctx, cid, 1) {
164 | req.onProvider(addrInfo)
165 | foundProviders = true
166 | }
167 | findTime := a.clock.Since(startTime)
168 |
169 | if !foundProviders {
170 | a.negativeCache.Add(string(req.key))
171 | recordPrefetches(ctx, "failed", metrics.PrefetchDuration.M(float64(findTime.Milliseconds())))
172 | return
173 | }
174 |
175 | recordPrefetches(ctx, "succeeded", metrics.PrefetchDuration.M(float64(findTime.Milliseconds())))
176 | }
177 |
178 | func recordPrefetches(ctx context.Context, status string, extraMeasures ...stats.Measurement) {
179 | stats.RecordWithTags(
180 | ctx,
181 | []tag.Mutator{tag.Upsert(metrics.KeyStatus, status)},
182 | append([]stats.Measurement{metrics.Prefetches.M(1)}, extraMeasures...)...,
183 | )
184 | }
185 |
186 | // idempotentTimeCache wraps a timecache and adds thread safety and idempotency.
187 | type idempotentTimeCache struct {
188 | mut sync.RWMutex
189 | cache *timecache.TimeCache
190 | }
191 |
192 | // Add adds an element to the cache.
193 | // If the element is already in the cache, it is left untouched (the time is not updated).
194 | func (c *idempotentTimeCache) Add(s string) {
195 | c.mut.Lock()
196 | defer c.mut.Unlock()
197 | if !c.cache.Has(s) {
198 | c.cache.Add(s)
199 | }
200 | }
201 |
202 | func (c *idempotentTimeCache) Len() int {
203 | c.mut.RLock()
204 | defer c.mut.RUnlock()
205 | return len(c.cache.M)
206 | }
207 |
208 | func (c *idempotentTimeCache) Has(s string) bool {
209 | c.mut.RLock()
210 | defer c.mut.RUnlock()
211 | return c.cache.Has(s)
212 | }
213 |
--------------------------------------------------------------------------------
/providers/finder_test.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "context"
5 | "reflect"
6 | "sync"
7 | "testing"
8 | "time"
9 |
10 | "github.com/benbjohnson/clock"
11 | "github.com/ipfs/go-cid"
12 | "github.com/libp2p/go-libp2p/core/peer"
13 | "github.com/libp2p/hydra-booster/metrics"
14 | "github.com/stretchr/testify/assert"
15 | "go.opencensus.io/stats/view"
16 | "go.opencensus.io/tag"
17 | )
18 |
19 | type mockRouter struct {
20 | addrInfos map[string][]peer.AddrInfo
21 | }
22 |
23 | func (r *mockRouter) FindProvidersAsync(ctx context.Context, cid cid.Cid, results int) <-chan peer.AddrInfo {
24 | ch := make(chan peer.AddrInfo)
25 | go func() {
26 | ais := r.addrInfos[string(cid.Hash())]
27 | if len(ais) != 0 {
28 | for _, ai := range ais {
29 | ch <- ai
30 | }
31 | }
32 | close(ch)
33 | }()
34 | return ch
35 | }
36 |
37 | func TestAsyncProvidersFinder_Find(t *testing.T) {
38 | ttl := 20 * time.Second
39 | queueSize := 10
40 | cases := []struct {
41 | name string
42 | key string
43 | routerAddrInfos map[string][]peer.AddrInfo // key: multihash bytes
44 |
45 | expAIs []peer.AddrInfo
46 | expCached []string
47 |
48 | expMetricRows map[string][]view.Row
49 | }{
50 | {
51 | name: "single matching addrinfo",
52 | key: "foo",
53 | routerAddrInfos: map[string][]peer.AddrInfo{
54 | "foo": {{ID: peer.ID("peer1")}},
55 | "bar": {{ID: peer.ID("peer2")}},
56 | },
57 | expAIs: []peer.AddrInfo{{ID: peer.ID("peer1")}},
58 | expMetricRows: map[string][]view.Row{
59 | metrics.Prefetches.Name(): {{
60 | Data: &view.SumData{Value: 1},
61 | Tags: []tag.Tag{
62 | {Key: metrics.KeyStatus, Value: "succeeded"},
63 | },
64 | }},
65 | metrics.PrefetchesPending.Name(): {{
66 | Data: &view.LastValueData{Value: 0},
67 | }},
68 | metrics.PrefetchNegativeCacheSize.Name(): {{
69 | Data: &view.LastValueData{Value: 0},
70 | }},
71 | metrics.PrefetchNegativeCacheTTLSeconds.Name(): {{
72 | Data: &view.LastValueData{Value: float64(ttl.Seconds())},
73 | }},
74 | metrics.PrefetchesPendingLimit.Name(): {{
75 | Data: &view.LastValueData{Value: float64(queueSize)},
76 | }},
77 | },
78 | },
79 | {
80 | name: "failed lookups should be cached",
81 | key: "foo",
82 | expCached: []string{"foo"},
83 | expMetricRows: map[string][]view.Row{
84 | metrics.Prefetches.Name(): {{
85 | Data: &view.SumData{Value: 1},
86 | Tags: []tag.Tag{
87 | {Key: metrics.KeyStatus, Value: "failed"},
88 | },
89 | }},
90 | metrics.PrefetchNegativeCacheSize.Name(): {{
91 | Data: &view.LastValueData{Value: 1},
92 | }},
93 | metrics.PrefetchesPending.Name(): {{
94 | Data: &view.LastValueData{Value: 0},
95 | }},
96 | },
97 | },
98 | }
99 |
100 | for _, c := range cases {
101 | t.Run(c.name, func(t *testing.T) {
102 | ctx, stop := context.WithTimeout(context.Background(), 5*time.Second)
103 | defer stop()
104 |
105 | views := []*view.View{
106 | metrics.PrefetchesView,
107 | metrics.PrefetchesPendingView,
108 | metrics.PrefetchNegativeCacheSizeView,
109 | metrics.PrefetchNegativeCacheTTLSecondsView,
110 | metrics.PrefetchesPendingLimitView,
111 | }
112 | view.Register(views...)
113 | defer view.Unregister(views...)
114 |
115 | finder := NewAsyncProvidersFinder(10*time.Second, queueSize, ttl)
116 |
117 | // set a mock clock so we can control the timing of things
118 | clock := clock.NewMock()
119 | finder.clock = clock
120 | finder.metricsTicker = clock.Ticker(metricsPublishingInterval)
121 |
122 | router := &mockRouter{addrInfos: c.routerAddrInfos}
123 |
124 | // wait group so we know when all Find() reqs have been processed
125 | reqWG := &sync.WaitGroup{}
126 | reqWG.Add(1)
127 | finder.onReqDone = func(r findRequest) { reqWG.Done() }
128 |
129 | ais := []peer.AddrInfo{}
130 | // wait group so we know when all AIs have been processed
131 | aiWG := &sync.WaitGroup{}
132 | aiWG.Add(len(c.expAIs))
133 |
134 | // wait group so we know when metric publishing has occurred
135 | metricsWG := &sync.WaitGroup{}
136 | metricsWG.Add(1)
137 | finder.onMetricsPublished = func() { metricsWG.Done() }
138 |
139 | finder.Run(ctx, 10)
140 | err := finder.Find(ctx, router, []byte(c.key), func(ai peer.AddrInfo) {
141 | ais = append(ais, ai)
142 | aiWG.Done()
143 | })
144 | wait(t, ctx, "addrinfos", aiWG)
145 | wait(t, ctx, "requests", reqWG)
146 |
147 | assert.NoError(t, err)
148 | assert.Equal(t, len(c.expAIs), len(ais))
149 |
150 | for i, ai := range c.expAIs {
151 | assert.EqualValues(t, ai.ID, ais[i].ID)
152 | }
153 |
154 | for _, k := range c.expCached {
155 | assert.True(t, finder.negativeCache.Has(k))
156 | }
157 |
158 | // trigger metric publishing and verify
159 | clock.Add(metricsPublishingInterval + time.Second)
160 | wait(t, ctx, "metrics", metricsWG)
161 |
162 | for name, expRows := range c.expMetricRows {
163 | rows, err := view.RetrieveData(name)
164 | assert.NoError(t, err)
165 | assert.True(t, subsetRowVals(expRows, rows))
166 | }
167 | })
168 |
169 | }
170 | }
171 |
172 | // wait waits on a waitgroup with a timeout
173 | func wait(t *testing.T, ctx context.Context, name string, wg *sync.WaitGroup) {
174 | ch := make(chan struct{})
175 | go func() {
176 | wg.Wait()
177 | close(ch)
178 | }()
179 |
180 | select {
181 | case <-ch:
182 | return
183 | case <-ctx.Done():
184 | t.Fatalf("timeout waiting for %s", name)
185 | }
186 | }
187 |
188 | // rowsEqual returns true if two OpenCensus view rows are equal, excluding timestamps
189 | func rowsEqual(row1 view.Row, row2 view.Row) bool {
190 | if !reflect.DeepEqual(row1.Tags, row2.Tags) {
191 | return false
192 | }
193 |
194 | switch row1Data := row1.Data.(type) {
195 | case *view.CountData:
196 | if row2Data, ok := row2.Data.(*view.CountData); ok {
197 | return row1Data.Value == row2Data.Value
198 | }
199 | case *view.SumData:
200 | if row2Data, ok := row2.Data.(*view.SumData); ok {
201 | return row1Data.Value == row2Data.Value
202 | }
203 | case *view.LastValueData:
204 | if row2Data, ok := row2.Data.(*view.LastValueData); ok {
205 | return row1Data.Value == row2Data.Value
206 | }
207 | }
208 | return false
209 | }
210 |
211 | func containsRowVal(row view.Row, rows []*view.Row) bool {
212 | for _, r := range rows {
213 | if rowsEqual(*r, row) {
214 | return true
215 | }
216 | }
217 | return false
218 | }
219 |
220 | func subsetRowVals(subset []view.Row, rows []*view.Row) bool {
221 | for _, expRow := range subset {
222 | if !containsRowVal(expRow, rows) {
223 | return false
224 | }
225 | }
226 | return true
227 | }
228 |
--------------------------------------------------------------------------------
/providers/httpapi.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "net/http"
7 |
8 | "github.com/ipfs/go-cid"
9 | drc "github.com/ipfs/go-libipfs/routing/http/client"
10 | "github.com/ipfs/go-libipfs/routing/http/contentrouter"
11 | "github.com/libp2p/go-libp2p/core/peer"
12 | "github.com/multiformats/go-multihash"
13 | )
14 |
15 | type readContentRouter interface {
16 | FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.AddrInfo
17 | }
18 |
19 | func NewHTTPProviderStore(httpClient *http.Client, endpointURL string) (*httpProvider, error) {
20 | drClient, err := drc.New(endpointURL, drc.WithHTTPClient(httpClient))
21 | if err != nil {
22 | return nil, fmt.Errorf("building delegated routing HTTP client: %w", err)
23 | }
24 | cr := contentrouter.NewContentRoutingClient(drClient)
25 |
26 | return &httpProvider{
27 | cr: cr,
28 | }, nil
29 | }
30 |
31 | type httpProvider struct {
32 | cr readContentRouter
33 | }
34 |
35 | func (p *httpProvider) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error {
36 | return nil
37 | }
38 |
39 | func (p *httpProvider) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) {
40 | mh, err := multihash.Cast(key)
41 | if err != nil {
42 | return nil, err
43 | }
44 | c := cid.NewCidV1(cid.Raw, mh)
45 | provChan := p.cr.FindProvidersAsync(ctx, c, 100)
46 | var provs []peer.AddrInfo
47 | for {
48 | select {
49 | case <-ctx.Done():
50 | return provs, ctx.Err()
51 | case prov, ok := <-provChan:
52 | if !ok {
53 | return provs, nil
54 | }
55 | provs = append(provs, prov)
56 | }
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/providers/noop.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/libp2p/go-libp2p/core/peer"
7 | )
8 |
9 | type NoopProviderStore struct{}
10 |
11 | func (s *NoopProviderStore) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error {
12 | return nil
13 | }
14 |
15 | func (s *NoopProviderStore) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) {
16 | return nil, nil
17 | }
18 |
--------------------------------------------------------------------------------
/providers/unsupported.go:
--------------------------------------------------------------------------------
1 | package providers
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/libp2p/go-libp2p-kad-dht/providers"
7 | "github.com/libp2p/go-libp2p/core/peer"
8 | )
9 |
10 | func AddProviderNotSupported(backend providers.ProviderStore) providers.ProviderStore {
11 | return &AddProviderNotSupportedProviderStore{backend: backend}
12 | }
13 |
14 | type AddProviderNotSupportedProviderStore struct {
15 | backend providers.ProviderStore
16 | }
17 |
18 | func (s *AddProviderNotSupportedProviderStore) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error {
19 | return nil
20 | }
21 |
22 | func (s *AddProviderNotSupportedProviderStore) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) {
23 | return s.backend.GetProviders(ctx, key)
24 | }
25 |
--------------------------------------------------------------------------------
/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -u
3 | set -e
4 | docker build . -t hydra-booster
5 | docker run -it hydra-booster
--------------------------------------------------------------------------------
/testdata/metrics/1head.txt:
--------------------------------------------------------------------------------
1 | # HELP go_gc_duration_seconds A summary of the GC invocation durations.
2 | # TYPE go_gc_duration_seconds summary
3 | go_gc_duration_seconds{quantile="0"} 3.8205e-05
4 | go_gc_duration_seconds{quantile="0.25"} 3.8205e-05
5 | go_gc_duration_seconds{quantile="0.5"} 0.000121299
6 | go_gc_duration_seconds{quantile="0.75"} 0.000121299
7 | go_gc_duration_seconds{quantile="1"} 0.000121299
8 | go_gc_duration_seconds_sum 0.000159504
9 | go_gc_duration_seconds_count 2
10 | # HELP go_goroutines Number of goroutines that currently exist.
11 | # TYPE go_goroutines gauge
12 | go_goroutines 54
13 | # HELP go_info Information about the Go environment.
14 | # TYPE go_info gauge
15 | go_info{version="go1.14"} 1
16 | # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
17 | # TYPE go_memstats_alloc_bytes gauge
18 | go_memstats_alloc_bytes 1.155308e+07
19 | # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
20 | # TYPE go_memstats_alloc_bytes_total counter
21 | go_memstats_alloc_bytes_total 1.9775816e+07
22 | # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
23 | # TYPE go_memstats_buck_hash_sys_bytes gauge
24 | go_memstats_buck_hash_sys_bytes 1.4487e+06
25 | # HELP go_memstats_frees_total Total number of frees.
26 | # TYPE go_memstats_frees_total counter
27 | go_memstats_frees_total 36194
28 | # HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
29 | # TYPE go_memstats_gc_cpu_fraction gauge
30 | go_memstats_gc_cpu_fraction 2.053068523166993e-05
31 | # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
32 | # TYPE go_memstats_gc_sys_bytes gauge
33 | go_memstats_gc_sys_bytes 3.57812e+06
34 | # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
35 | # TYPE go_memstats_heap_alloc_bytes gauge
36 | go_memstats_heap_alloc_bytes 1.155308e+07
37 | # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
38 | # TYPE go_memstats_heap_idle_bytes gauge
39 | go_memstats_heap_idle_bytes 5.193728e+07
40 | # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
41 | # TYPE go_memstats_heap_inuse_bytes gauge
42 | go_memstats_heap_inuse_bytes 1.3828096e+07
43 | # HELP go_memstats_heap_objects Number of allocated objects.
44 | # TYPE go_memstats_heap_objects gauge
45 | go_memstats_heap_objects 19382
46 | # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
47 | # TYPE go_memstats_heap_released_bytes gauge
48 | go_memstats_heap_released_bytes 4.698112e+07
49 | # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
50 | # TYPE go_memstats_heap_sys_bytes gauge
51 | go_memstats_heap_sys_bytes 6.5765376e+07
52 | # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
53 | # TYPE go_memstats_last_gc_time_seconds gauge
54 | go_memstats_last_gc_time_seconds 1.5840999187260811e+09
55 | # HELP go_memstats_lookups_total Total number of pointer lookups.
56 | # TYPE go_memstats_lookups_total counter
57 | go_memstats_lookups_total 0
58 | # HELP go_memstats_mallocs_total Total number of mallocs.
59 | # TYPE go_memstats_mallocs_total counter
60 | go_memstats_mallocs_total 55576
61 | # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
62 | # TYPE go_memstats_mcache_inuse_bytes gauge
63 | go_memstats_mcache_inuse_bytes 13888
64 | # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
65 | # TYPE go_memstats_mcache_sys_bytes gauge
66 | go_memstats_mcache_sys_bytes 16384
67 | # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
68 | # TYPE go_memstats_mspan_inuse_bytes gauge
69 | go_memstats_mspan_inuse_bytes 127296
70 | # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
71 | # TYPE go_memstats_mspan_sys_bytes gauge
72 | go_memstats_mspan_sys_bytes 147456
73 | # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
74 | # TYPE go_memstats_next_gc_bytes gauge
75 | go_memstats_next_gc_bytes 1.7880512e+07
76 | # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
77 | # TYPE go_memstats_other_sys_bytes gauge
78 | go_memstats_other_sys_bytes 2.231548e+06
79 | # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
80 | # TYPE go_memstats_stack_inuse_bytes gauge
81 | go_memstats_stack_inuse_bytes 1.343488e+06
82 | # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
83 | # TYPE go_memstats_stack_sys_bytes gauge
84 | go_memstats_stack_sys_bytes 1.343488e+06
85 | # HELP go_memstats_sys_bytes Number of bytes obtained from system.
86 | # TYPE go_memstats_sys_bytes gauge
87 | go_memstats_sys_bytes 7.4531072e+07
88 | # HELP go_threads Number of OS threads created.
89 | # TYPE go_threads gauge
90 | go_threads 15
91 | # HELP hydrabooster_bootstrapped_heads Bootstrapped heads
92 | # TYPE hydrabooster_bootstrapped_heads counter
93 | hydrabooster_bootstrapped_heads{peer_id="12D3KooWETMx8cDb7JtmpUjPrhXv27mRi7rLmENoK5JT2FYogZvo"} 1
94 | # HELP hydrabooster_connected_peers Peers connected to all heads
95 | # TYPE hydrabooster_connected_peers counter
96 | hydrabooster_connected_peers{peer_id="12D3KooWETMx8cDb7JtmpUjPrhXv27mRi7rLmENoK5JT2FYogZvo"} 4
97 | # HELP hydrabooster_provider_records Number of provider records in the datastore shared by all heads
98 | # TYPE hydrabooster_provider_records gauge
99 | hydrabooster_provider_records 0
100 | # HELP hydrabooster_routing_table_size Number of peers in the routing table
101 | # TYPE hydrabooster_routing_table_size gauge
102 | hydrabooster_routing_table_size 4
103 | # HELP hydrabooster_heads Heads launched by Hydra
104 | # TYPE hydrabooster_heads counter
105 | hydrabooster_heads{peer_id="12D3KooWETMx8cDb7JtmpUjPrhXv27mRi7rLmENoK5JT2FYogZvo"} 1
106 | # HELP hydrabooster_unique_peers_total Total unique peers seen across all heads
107 | # TYPE hydrabooster_unique_peers_total gauge
108 | hydrabooster_unique_peers_total 5
109 |
--------------------------------------------------------------------------------
/testdata/metrics/2heads.txt:
--------------------------------------------------------------------------------
1 | # HELP go_gc_duration_seconds A summary of the GC invocation durations.
2 | # TYPE go_gc_duration_seconds summary
3 | go_gc_duration_seconds{quantile="0"} 4.3403e-05
4 | go_gc_duration_seconds{quantile="0.25"} 4.3403e-05
5 | go_gc_duration_seconds{quantile="0.5"} 6.0062e-05
6 | go_gc_duration_seconds{quantile="0.75"} 7.6953e-05
7 | go_gc_duration_seconds{quantile="1"} 7.6953e-05
8 | go_gc_duration_seconds_sum 0.000180418
9 | go_gc_duration_seconds_count 3
10 | # HELP go_goroutines Number of goroutines that currently exist.
11 | # TYPE go_goroutines gauge
12 | go_goroutines 218
13 | # HELP go_info Information about the Go environment.
14 | # TYPE go_info gauge
15 | go_info{version="go1.14"} 1
16 | # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
17 | # TYPE go_memstats_alloc_bytes gauge
18 | go_memstats_alloc_bytes 7.893688e+06
19 | # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
20 | # TYPE go_memstats_alloc_bytes_total counter
21 | go_memstats_alloc_bytes_total 1.5666776e+07
22 | # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
23 | # TYPE go_memstats_buck_hash_sys_bytes gauge
24 | go_memstats_buck_hash_sys_bytes 1.448556e+06
25 | # HELP go_memstats_frees_total Total number of frees.
26 | # TYPE go_memstats_frees_total counter
27 | go_memstats_frees_total 36392
28 | # HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
29 | # TYPE go_memstats_gc_cpu_fraction gauge
30 | go_memstats_gc_cpu_fraction 7.918612830869877e-05
31 | # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
32 | # TYPE go_memstats_gc_sys_bytes gauge
33 | go_memstats_gc_sys_bytes 3.582216e+06
34 | # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
35 | # TYPE go_memstats_heap_alloc_bytes gauge
36 | go_memstats_heap_alloc_bytes 7.893688e+06
37 | # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
38 | # TYPE go_memstats_heap_idle_bytes gauge
39 | go_memstats_heap_idle_bytes 5.4083584e+07
40 | # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
41 | # TYPE go_memstats_heap_inuse_bytes gauge
42 | go_memstats_heap_inuse_bytes 1.0960896e+07
43 | # HELP go_memstats_heap_objects Number of allocated objects.
44 | # TYPE go_memstats_heap_objects gauge
45 | go_memstats_heap_objects 17362
46 | # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
47 | # TYPE go_memstats_heap_released_bytes gauge
48 | go_memstats_heap_released_bytes 5.132288e+07
49 | # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
50 | # TYPE go_memstats_heap_sys_bytes gauge
51 | go_memstats_heap_sys_bytes 6.504448e+07
52 | # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
53 | # TYPE go_memstats_last_gc_time_seconds gauge
54 | go_memstats_last_gc_time_seconds 1.584101844547604e+09
55 | # HELP go_memstats_lookups_total Total number of pointer lookups.
56 | # TYPE go_memstats_lookups_total counter
57 | go_memstats_lookups_total 0
58 | # HELP go_memstats_mallocs_total Total number of mallocs.
59 | # TYPE go_memstats_mallocs_total counter
60 | go_memstats_mallocs_total 53754
61 | # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
62 | # TYPE go_memstats_mcache_inuse_bytes gauge
63 | go_memstats_mcache_inuse_bytes 13888
64 | # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
65 | # TYPE go_memstats_mcache_sys_bytes gauge
66 | go_memstats_mcache_sys_bytes 16384
67 | # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
68 | # TYPE go_memstats_mspan_inuse_bytes gauge
69 | go_memstats_mspan_inuse_bytes 150280
70 | # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
71 | # TYPE go_memstats_mspan_sys_bytes gauge
72 | go_memstats_mspan_sys_bytes 163840
73 | # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
74 | # TYPE go_memstats_next_gc_bytes gauge
75 | go_memstats_next_gc_bytes 1.5476896e+07
76 | # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
77 | # TYPE go_memstats_other_sys_bytes gauge
78 | go_memstats_other_sys_bytes 2.211212e+06
79 | # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
80 | # TYPE go_memstats_stack_inuse_bytes gauge
81 | go_memstats_stack_inuse_bytes 2.064384e+06
82 | # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
83 | # TYPE go_memstats_stack_sys_bytes gauge
84 | go_memstats_stack_sys_bytes 2.064384e+06
85 | # HELP go_memstats_sys_bytes Number of bytes obtained from system.
86 | # TYPE go_memstats_sys_bytes gauge
87 | go_memstats_sys_bytes 7.4531072e+07
88 | # HELP go_threads Number of OS threads created.
89 | # TYPE go_threads gauge
90 | go_threads 15
91 | # HELP hydrabooster_bootstrapped_heads Bootstrapped heads
92 | # TYPE hydrabooster_bootstrapped_heads counter
93 | hydrabooster_bootstrapped_heads{peer_id="12D3KooWLmQdENVpfyNfjREQKGxuqJNzZKA8SrYpZ3AKxH74qP4d"} 1
94 | hydrabooster_bootstrapped_heads{peer_id="12D3KooWNe6g9t2gnFD13ian5xs7EWPvtSAAC6GvK3qNqF7eXc4C"} 1
95 | # HELP hydrabooster_connected_peers Peers connected to all heads
96 | # TYPE hydrabooster_connected_peers counter
97 | hydrabooster_connected_peers{peer_id="12D3KooWLmQdENVpfyNfjREQKGxuqJNzZKA8SrYpZ3AKxH74qP4d"} 4
98 | hydrabooster_connected_peers{peer_id="12D3KooWNe6g9t2gnFD13ian5xs7EWPvtSAAC6GvK3qNqF7eXc4C"} 7
99 | # HELP hydrabooster_provider_records Number of provider records in the datastore shared by all heads
100 | # TYPE hydrabooster_provider_records gauge
101 | hydrabooster_provider_records 0
102 | # HELP hydrabooster_routing_table_size Number of peers in the routing table
103 | # TYPE hydrabooster_routing_table_size gauge
104 | hydrabooster_routing_table_size 9
105 | # HELP hydrabooster_heads Heads launched by Hydra
106 | # TYPE hydrabooster_heads counter
107 | hydrabooster_heads{peer_id="12D3KooWLmQdENVpfyNfjREQKGxuqJNzZKA8SrYpZ3AKxH74qP4d"} 1
108 | hydrabooster_heads{peer_id="12D3KooWNe6g9t2gnFD13ian5xs7EWPvtSAAC6GvK3qNqF7eXc4C"} 1
109 | # HELP hydrabooster_unique_peers_total Total unique peers seen across all heads
110 | # TYPE hydrabooster_unique_peers_total gauge
111 | hydrabooster_unique_peers_total 9
112 |
--------------------------------------------------------------------------------
/testing/helpers.go:
--------------------------------------------------------------------------------
1 | package testing
2 |
3 | import (
4 | "context"
5 |
6 | "go.opencensus.io/tag"
7 | )
8 |
9 | // ChanWriter is a writer that writes to a channel
10 | type ChanWriter struct {
11 | C chan []byte
12 | }
13 |
14 | // NewChanWriter creates a new channel writer
15 | func NewChanWriter() *ChanWriter {
16 | return &ChanWriter{make(chan []byte)}
17 | }
18 |
19 | // Write writes to the channel
20 | func (w *ChanWriter) Write(p []byte) (int, error) {
21 | d := make([]byte, len(p))
22 | copy(d, p)
23 | w.C <- d
24 | return len(p), nil
25 | }
26 |
27 | func NewContext() context.Context {
28 | ctx := context.Background()
29 | ctx, err := tag.New(ctx, tag.Upsert(tag.MustNewKey("name"), "test"))
30 | if err != nil {
31 | panic(err)
32 | }
33 | return ctx
34 | }
35 |
--------------------------------------------------------------------------------
/ui/gooey.go:
--------------------------------------------------------------------------------
1 | package ui
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "strings"
7 |
8 | net "github.com/libp2p/go-libp2p/core/network"
9 | ma "github.com/multiformats/go-multiaddr"
10 | )
11 |
12 | // ...
13 | const (
14 | QClrLine = "\033[K"
15 | QReset = "\033[2J"
16 | )
17 |
18 | /*
19 | Move the cursor up N lines:
20 | \033[A
21 | - Move the cursor down N lines:
22 | \033[B
23 | - Move the cursor forward N columns:
24 | \033[C
25 | - Move the cursor backward N columns:
26 | \033[D
27 | */
28 |
29 | // ...
30 | const (
31 | Clear = 0
32 | LightBlue = 94
33 | )
34 |
35 | // ...
36 | const (
37 | Black = 30 + iota
38 | Red
39 | Green
40 | Yellow
41 | Blue
42 | Magenta
43 | Cyan
44 | LightGray
45 | )
46 |
47 | const width = 25
48 |
49 | func padPrint(writer io.Writer, line int, label, value string) {
50 | putMessage(writer, line, fmt.Sprintf("%s%s%s", label, strings.Repeat(" ", width-len(label)), value))
51 | }
52 |
53 | func putMessage(writer io.Writer, line int, mes string) {
54 | fmt.Fprintf(writer, "\033[%d;0H%s%s", line, QClrLine, mes)
55 | }
56 |
57 | // GooeyApp ..
58 | type GooeyApp struct {
59 | Title string
60 | DataFields []*DataLine
61 | Log *Log
62 | writer io.Writer
63 | }
64 |
65 | // Print ...
66 | func (a *GooeyApp) Print() {
67 | fmt.Fprintln(a.writer, QReset)
68 | putMessage(a.writer, 1, a.Title)
69 | for _, dl := range a.DataFields {
70 | dl.Print()
71 | }
72 | a.Log.Print()
73 | }
74 |
75 | // NewDataLine ...
76 | func (a *GooeyApp) NewDataLine(line int, label, defval string) *DataLine {
77 | dl := &DataLine{
78 | Default: defval,
79 | Label: label,
80 | Line: line,
81 | writer: a.writer,
82 | }
83 | a.DataFields = append(a.DataFields, dl)
84 |
85 | return dl
86 | }
87 |
88 | // DataLine ...
89 | type DataLine struct {
90 | Label string
91 | Line int
92 | Default string
93 | LastVal string
94 | writer io.Writer
95 | }
96 |
97 | // SetVal ...
98 | func (dl *DataLine) SetVal(s string) {
99 | dl.LastVal = s
100 | dl.Print()
101 | }
102 |
103 | // Print ...
104 | func (dl *DataLine) Print() {
105 | s := dl.Default
106 | if dl.LastVal != "" {
107 | s = dl.LastVal
108 | }
109 |
110 | padPrint(dl.writer, dl.Line, dl.Label, s)
111 | }
112 |
113 | // Log ...
114 | type Log struct {
115 | Size int
116 | StartLine int
117 | Messages []string
118 | beg int
119 | end int
120 | writer io.Writer
121 | }
122 |
123 | // NewLog ...
124 | func NewLog(writer io.Writer, line, size int) *Log {
125 | return &Log{
126 | Size: size,
127 | StartLine: line,
128 | Messages: make([]string, size),
129 | end: -1,
130 | writer: writer,
131 | }
132 | }
133 |
134 | // Add ...
135 | func (l *Log) Add(m string) {
136 | l.end = (l.end + 1) % l.Size
137 | if l.Messages[l.end] != "" {
138 | l.beg++
139 | }
140 | l.Messages[l.end] = m
141 | }
142 |
143 | // Print ...
144 | func (l *Log) Print() {
145 | for i := 0; i < l.Size; i++ {
146 | putMessage(l.writer, l.StartLine+i, l.Messages[(l.beg+i)%l.Size])
147 | }
148 | }
149 |
150 | // LogNotifee ...
151 | type LogNotifee struct {
152 | addMes chan<- string
153 | }
154 |
155 | // Listen ...
156 | func (ln *LogNotifee) Listen(net.Network, ma.Multiaddr) {}
157 |
158 | // ListenClose ...
159 | func (ln *LogNotifee) ListenClose(net.Network, ma.Multiaddr) {}
160 |
161 | // Connected ...
162 | func (ln *LogNotifee) Connected(_ net.Network, c net.Conn) {
163 | ln.addMes <- fmt.Sprintf("New connection from %s", c.RemotePeer().Pretty())
164 | }
165 |
166 | // Disconnected ...
167 | func (ln *LogNotifee) Disconnected(_ net.Network, c net.Conn) {
168 | ln.addMes <- fmt.Sprintf("Lost connection to %s", c.RemotePeer().Pretty())
169 | }
170 |
171 | // OpenedStream ...
172 | func (ln *LogNotifee) OpenedStream(net.Network, net.Stream) {}
173 |
174 | // ClosedStream ...
175 | func (ln *LogNotifee) ClosedStream(net.Network, net.Stream) {}
176 |
--------------------------------------------------------------------------------
/ui/opts/options.go:
--------------------------------------------------------------------------------
1 | package opts
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "os"
7 | "time"
8 | )
9 |
10 | // Options are the UI options
11 | type Options struct {
12 | MetricsURL string
13 | Start time.Time
14 | Writer io.Writer
15 | RefreshPeriod time.Duration
16 | }
17 |
18 | // Option is the UI option type.
19 | type Option func(*Options) error
20 |
21 | // Apply applies the given options to this Option.
22 | func (o *Options) Apply(opts ...Option) error {
23 | for i, opt := range opts {
24 | if err := opt(o); err != nil {
25 | return fmt.Errorf("UI option %d failed: %s", i, err)
26 | }
27 | }
28 | return nil
29 | }
30 |
31 | // Defaults are the default UI options. This option will be automatically
32 | // prepended to any options you pass to the NewUI constructor.
33 | var Defaults = func(o *Options) error {
34 | o.MetricsURL = "http://127.0.0.1:8888/metrics"
35 | o.Start = time.Now()
36 | o.Writer = os.Stderr
37 | o.RefreshPeriod = time.Second * 5
38 | return nil
39 | }
40 |
41 | // MetricsURL configures the URL the Prometheus /metrics endpoint is at
42 | // Defaults to http://127.0.0.1:8888/metrics.
43 | func MetricsURL(url string) Option {
44 | return func(o *Options) error {
45 | o.MetricsURL = url
46 | return nil
47 | }
48 | }
49 |
50 | // Start configures the start time for the UI to calculate the uptime vaue from.
51 | // Defaults to an time.Now().
52 | func Start(t time.Time) Option {
53 | return func(o *Options) error {
54 | o.Start = t
55 | return nil
56 | }
57 | }
58 |
59 | // Writer configures where the output should be written to.
60 | // The default value is os.Stderr.
61 | func Writer(w io.Writer) Option {
62 | return func(o *Options) error {
63 | o.Writer = w
64 | return nil
65 | }
66 | }
67 |
68 | // RefreshPeriod configures the period beiween UI refeshes.
69 | // Defaults to 5s.
70 | func RefreshPeriod(rp time.Duration) Option {
71 | return func(o *Options) error {
72 | o.RefreshPeriod = rp
73 | return nil
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/ui/ui.go:
--------------------------------------------------------------------------------
1 | package ui
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | pmc "github.com/alanshaw/prom-metrics-client"
9 | "github.com/dustin/go-humanize"
10 | "github.com/libp2p/hydra-booster/metrics"
11 | uiopts "github.com/libp2p/hydra-booster/ui/opts"
12 | "go.opencensus.io/stats"
13 | )
14 |
15 | // Theme is the style of UI to render
16 | type Theme int
17 |
18 | const (
19 | // Logey is a UI theme that simply logs data periodically to stdout
20 | Logey Theme = iota
21 | // Gooey is a UI theme that refreshes values in place
22 | Gooey
23 | )
24 |
25 | // UI is a simple command line interface to the Prometheus /metrics endpoint
26 | type UI struct {
27 | theme Theme
28 | options uiopts.Options
29 | }
30 |
31 | // NewUI constructs a new "UI" for the Prometheus /metrics endpoint
32 | func NewUI(theme Theme, opts ...uiopts.Option) (*UI, error) {
33 | options := uiopts.Options{}
34 | options.Apply(append([]uiopts.Option{uiopts.Defaults}, opts...)...)
35 | return &UI{theme: theme, options: options}, nil
36 | }
37 |
38 | // Render displays and updates a "UI" for the Prometheus /metrics endpoint
39 | func (ui *UI) Render(ctx context.Context) error {
40 | client := pmc.PromMetricsClient{URL: ui.options.MetricsURL}
41 | mC := make(chan []*pmc.Metric)
42 |
43 | go func() {
44 | timer := time.NewTimer(0)
45 | defer timer.Stop()
46 |
47 | for {
48 | select {
49 | case <-timer.C:
50 | ms, err := client.GetMetrics()
51 | if err != nil {
52 | fmt.Println(err)
53 | } else {
54 | mC <- ms
55 | }
56 | timer.Reset(ui.options.RefreshPeriod)
57 | case <-ctx.Done():
58 | return
59 | }
60 | }
61 | }()
62 |
63 | switch ui.theme {
64 | case Logey:
65 | for {
66 | select {
67 | case ms := <-mC:
68 | fmt.Fprintf(
69 | ui.options.Writer,
70 | "[NumHeads: %v, Uptime: %s, MemoryUsage: %s, PeersConnected: %v, TotalUniquePeersSeen: %v, BootstrapsDone: %v, ProviderRecords: %v, RoutingTableSize: %v]\n",
71 | sumSamples(findByName(ms, nsName(metrics.Heads))),
72 | time.Second*time.Duration(int(time.Since(ui.options.Start).Seconds())),
73 | humanize.Bytes(uint64(sumSamples(findByName(ms, "go_memstats_alloc_bytes")))),
74 | sumSamples(findByName(ms, nsName(metrics.ConnectedPeers))),
75 | sumSamples(findByName(ms, nsName(metrics.UniquePeers))),
76 | sumSamples(findByName(ms, nsName(metrics.BootstrappedHeads))),
77 | sumSamples(findByName(ms, nsName(metrics.ProviderRecords))),
78 | sumSamples(findByName(ms, nsName(metrics.RoutingTableSize))),
79 | )
80 | case <-ctx.Done():
81 | return nil
82 | }
83 | }
84 | case Gooey:
85 | ga := &GooeyApp{Title: "Hydra Booster", Log: NewLog(ui.options.Writer, 15, 15), writer: ui.options.Writer}
86 | ehds := ga.NewDataLine(3, "Head ID(s)", "")
87 | econs := ga.NewDataLine(4, "Connections", "0")
88 | uniqprs := ga.NewDataLine(5, "Unique Peers Seen", "0")
89 | emem := ga.NewDataLine(6, "Memory Allocated", "0MB")
90 | eprov := ga.NewDataLine(7, "Stored Provider Records", "0")
91 | erts := ga.NewDataLine(8, "Routing Table Size", "0")
92 | etime := ga.NewDataLine(9, "Uptime", "0h 0m 0s")
93 | ga.Print()
94 |
95 | seconds := time.NewTicker(time.Second)
96 | defer seconds.Stop()
97 |
98 | for {
99 | select {
100 | // case m := <-messages:
101 | // ga.Log.Add(m)
102 | // ga.Log.Print()
103 | case ms := <-mC:
104 | ehds.SetVal(fmt.Sprintf("%v", labelValues(findByName(ms, nsName(metrics.Heads)), "peer_id")))
105 | emem.SetVal(humanize.Bytes(uint64(sumSamples(findByName(ms, "go_memstats_alloc_bytes")))))
106 | econs.SetVal(fmt.Sprintf("%v peers", sumSamples(findByName(ms, nsName(metrics.ConnectedPeers)))))
107 | uniqprs.SetVal(fmt.Sprint(sumSamples(findByName(ms, nsName(metrics.UniquePeers)))))
108 | eprov.SetVal(fmt.Sprint(sumSamples(findByName(ms, nsName(metrics.ProviderRecords)))))
109 | erts.SetVal(fmt.Sprint(sumSamples(findByName(ms, nsName(metrics.RoutingTableSize)))))
110 | case <-seconds.C:
111 | t := time.Since(ui.options.Start)
112 | h := int(t.Hours())
113 | m := int(t.Minutes()) % 60
114 | s := int(t.Seconds()) % 60
115 | etime.SetVal(fmt.Sprintf("%dh %dm %ds", h, m, s))
116 | case <-ctx.Done():
117 | return nil
118 | }
119 | ga.Print()
120 | }
121 | }
122 |
123 | return nil
124 | }
125 |
126 | func nsName(m stats.Measure) string {
127 | return fmt.Sprintf("%s_%s", metrics.PrometheusNamespace, m.Name())
128 | }
129 |
130 | func findByName(ms []*pmc.Metric, metricName string) *pmc.Metric {
131 | for _, m := range ms {
132 | if m.Name == metricName {
133 | return m
134 | }
135 | }
136 | return nil
137 | }
138 |
139 | func labelValues(m *pmc.Metric, labelKey string) []string {
140 | var vals []string
141 | if m != nil {
142 | for _, s := range m.Samples {
143 | vals = append(vals, s.Labels[labelKey])
144 | }
145 | }
146 | return vals
147 | }
148 |
149 | func sumSamples(m *pmc.Metric) float64 {
150 | var val float64
151 | if m != nil {
152 | for _, s := range m.Samples {
153 | val += s.Value
154 | }
155 | }
156 | return val
157 | }
158 |
--------------------------------------------------------------------------------
/ui/ui_test.go:
--------------------------------------------------------------------------------
1 | package ui
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "fmt"
7 | "net"
8 | "net/http"
9 | "strings"
10 | "testing"
11 | "time"
12 |
13 | hytesting "github.com/libp2p/hydra-booster/testing"
14 | "github.com/libp2p/hydra-booster/ui/opts"
15 | )
16 |
17 | func newMockMetricsServeMux(t *testing.T, name string) (net.Listener, *http.ServeMux) {
18 | listener, err := net.Listen("tcp", ":0")
19 | if err != nil {
20 | t.Fatal(err)
21 | }
22 |
23 | mux := http.NewServeMux()
24 | mux.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
25 | http.ServeFile(w, req, name)
26 | })
27 |
28 | return listener, mux
29 | }
30 |
31 | func TestGooeyUI(t *testing.T) {
32 | ctx, cancel := context.WithCancel(context.Background())
33 | defer cancel()
34 |
35 | listener, mux := newMockMetricsServeMux(t, "../testdata/metrics/1head.txt")
36 | go http.Serve(listener, mux)
37 | defer listener.Close()
38 |
39 | cw := hytesting.NewChanWriter()
40 |
41 | ui, err := NewUI(Gooey, opts.Writer(cw), opts.MetricsURL(fmt.Sprintf("http://%v/metrics", listener.Addr().String())))
42 | if err != nil {
43 | t.Fatal(err)
44 | }
45 |
46 | go ui.Render(ctx)
47 |
48 | var out bytes.Buffer
49 | for c := range cw.C {
50 | out.Write(c)
51 | if !strings.Contains(out.String(), "12D3KooWETMx8cDb7JtmpUjPrhXv27mRi7rLmENoK5JT2FYogZvo") {
52 | continue
53 | }
54 | // ensure uptime got updated
55 | if !strings.Contains(out.String(), "0h 0m 1s") {
56 | continue
57 | }
58 | break
59 | }
60 | }
61 |
62 | func TestCancelByContext(t *testing.T) {
63 | ctx, cancel := context.WithCancel(context.Background())
64 |
65 | listener, mux := newMockMetricsServeMux(t, "../testdata/metrics/1head.txt")
66 | go http.Serve(listener, mux)
67 | defer listener.Close()
68 |
69 | var b bytes.Buffer
70 |
71 | ui, err := NewUI(Gooey, opts.Writer(&b), opts.MetricsURL(fmt.Sprintf("http://%v/metrics", listener.Addr().String())))
72 | if err != nil {
73 | t.Fatal(err)
74 | }
75 |
76 | go func() {
77 | time.Sleep(time.Second)
78 | cancel()
79 | }()
80 |
81 | err = ui.Render(ctx)
82 | if err != nil {
83 | t.Fatal("unexpected err", err)
84 | }
85 | }
86 |
87 | func TestLogeyUI(t *testing.T) {
88 | ctx, cancel := context.WithCancel(context.Background())
89 | defer cancel()
90 |
91 | listener, mux := newMockMetricsServeMux(t, "../testdata/metrics/2heads.txt")
92 | go http.Serve(listener, mux)
93 | defer listener.Close()
94 |
95 | cw := hytesting.NewChanWriter()
96 |
97 | ui, err := NewUI(Logey, opts.Writer(cw), opts.MetricsURL(fmt.Sprintf("http://%v/metrics", listener.Addr().String())))
98 | if err != nil {
99 | t.Fatal(err)
100 | }
101 |
102 | go ui.Render(ctx)
103 |
104 | // give it time to render once!
105 | time.Sleep(time.Millisecond * 100)
106 |
107 | expects := []string{
108 | "NumHeads: 2",
109 | "BootstrapsDone: 2",
110 | "PeersConnected: 11",
111 | "TotalUniquePeersSeen: 9",
112 | }
113 |
114 | for c := range cw.C {
115 | found := true
116 | for _, str := range expects {
117 | if !strings.Contains(string(c), str) {
118 | found = false
119 | break
120 | }
121 | }
122 |
123 | if found {
124 | break
125 | }
126 | }
127 | }
128 |
129 | func TestRefreshPeriod(t *testing.T) {
130 | ctx, cancel := context.WithCancel(context.Background())
131 | defer cancel()
132 |
133 | listener, mux := newMockMetricsServeMux(t, "../testdata/metrics/1head.txt")
134 | go http.Serve(listener, mux)
135 | defer listener.Close()
136 |
137 | cw := hytesting.NewChanWriter()
138 |
139 | ui, err := NewUI(
140 | Logey,
141 | opts.Writer(cw),
142 | opts.MetricsURL(fmt.Sprintf("http://%v/metrics", listener.Addr().String())),
143 | opts.RefreshPeriod(time.Second),
144 | )
145 | if err != nil {
146 | t.Fatal(err)
147 | }
148 |
149 | go ui.Render(ctx)
150 |
151 | var lines int
152 | for c := range cw.C {
153 | if strings.Index(string(c), "[") == 0 {
154 | lines++
155 | }
156 | if lines >= 2 {
157 | break
158 | }
159 | }
160 | }
161 |
--------------------------------------------------------------------------------
/utils/opts.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "errors"
5 | "strings"
6 | )
7 |
8 | func ParseOptsString(s string) (map[string]string, error) {
9 | m := map[string]string{}
10 | opts := strings.Split(s, ",")
11 | for _, opt := range opts {
12 | entry := strings.Split(opt, "=")
13 | if len(entry) != 2 {
14 | return nil, errors.New("option config must be key=value pairs")
15 | }
16 | m[entry[0]] = entry[1]
17 | }
18 | return m, nil
19 | }
20 |
--------------------------------------------------------------------------------
/utils/port-selector.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | // PortSelector returns a func that yields a new port every time
4 | func PortSelector(beg int) func() int {
5 | port := beg
6 | return func() int {
7 | if port == 0 {
8 | return 0
9 | }
10 |
11 | out := port
12 | port++
13 | return out
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/utils/port-selector_test.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestPortSelector(t *testing.T) {
8 | begin := 5
9 | getPort := PortSelector(begin)
10 | for i := begin; i < begin*1000; i++ {
11 | port := getPort()
12 | if port != i {
13 | t.Fatalf("expected next port to be %v but got %v", i+1, port)
14 | }
15 | }
16 | }
17 |
18 | func TestPortSelectorBeginZero(t *testing.T) {
19 | begin := 0
20 | getPort := PortSelector(begin)
21 | for i := begin; i < 1000; i++ {
22 | port := getPort()
23 | if port != 0 {
24 | t.Fatalf("expected next port to be 0 but got %v", port)
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/version.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "v0.7.4"
3 | }
4 |
--------------------------------------------------------------------------------
/version/version.go:
--------------------------------------------------------------------------------
1 | package version
2 |
3 | const (
4 | // Version number of the Hydra Booster node, it should be kept in sync with the current release tag.
5 | Version = "0.7.4"
6 | // UserAgent is the string passed by the identify protocol to other nodes in the network.
7 | UserAgent = "hydra-booster/" + Version
8 | )
9 |
--------------------------------------------------------------------------------