├── .github └── workflows │ └── test.yaml ├── .gitignore ├── .golangci.yml ├── .vscode ├── launch.json └── settings.json ├── COPYING ├── COPYING.LESSER ├── Dockerfile ├── LICENSE ├── README.md ├── cmd └── crawler │ ├── api.go │ ├── crawlercmd.go │ ├── flags.go │ ├── main.go │ ├── setup.go │ └── utils.go ├── docker-compose.yml ├── docs └── api.md ├── flake.lock ├── flake.nix ├── frontend ├── .env ├── Dockerfile ├── clients.json ├── deploy.bat ├── nginx.conf ├── package-lock.json ├── package.json ├── public │ ├── android-chrome-192x192.png │ ├── android-chrome-512x512.png │ ├── apple-touch-icon.png │ ├── browserconfig.xml │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ ├── favicon.ico │ ├── index.html │ ├── mstile-144x144.png │ ├── mstile-150x150.png │ ├── mstile-310x150.png │ ├── mstile-310x310.png │ ├── mstile-70x70.png │ ├── robots.txt │ ├── safari-pinned-tab.svg │ └── site.webmanifest ├── src │ ├── atoms │ │ ├── Card.tsx │ │ ├── CustomResponsiveContainer.tsx │ │ ├── Logo.tsx │ │ ├── TablePlus.tsx │ │ └── TooltipCard.tsx │ ├── config.ts │ ├── data │ │ ├── DataMassager.ts │ │ ├── DataProcessor.test.ts │ │ ├── DataProcessor.ts │ │ ├── FilterRunner.test.ts │ │ ├── FilterRunner.ts │ │ ├── FilterUtils.ts │ │ └── SortedMap.ts │ ├── index.tsx │ ├── organisms │ │ ├── ColorModeSwitcher.tsx │ │ ├── Filtering.tsx │ │ ├── Footer.tsx │ │ ├── Header.tsx │ │ └── Loader.tsx │ ├── pages │ │ └── Home.tsx │ ├── react-app-env.d.ts │ ├── setupProxy.js │ ├── setupTests.ts │ ├── templates │ │ ├── Layout.tsx │ │ └── Routing.tsx │ └── theme.ts └── tsconfig.json ├── go.mod ├── go.sum └── pkg ├── api └── api.go ├── apidb └── database.go ├── common ├── client_info.go └── nodes.go ├── crawler ├── connection.go ├── crawl.go ├── enr.go ├── handshake.go └── helper.go ├── crawlerdb ├── crawlerdb.go └── db.go └── vparser ├── vparser.go └── vparser_test.go /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: node-crawler workflow 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | tags: 8 | - '**' 9 | pull_request: 10 | branches: 11 | - main 12 | workflow_dispatch: 13 | 14 | jobs: 15 | lint: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: Set up Go 20 | uses: actions/setup-go@v5 21 | with: 22 | go-version: 1.24.2 23 | - name: Download golangci-lint 24 | run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s latest 25 | - name: Lint 26 | run: ./bin/golangci-lint run --config .golangci.yml 27 | - name: Vet 28 | run: go vet ./... 29 | 30 | test: 31 | runs-on: ubuntu-latest 32 | needs: lint 33 | steps: 34 | - uses: actions/checkout@v4 35 | - name: Set up Go 36 | uses: actions/setup-go@v5 37 | with: 38 | go-version: 1.24.2 39 | - name: Test 40 | run: go test -v ./... 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # dependencies 2 | node_modules 3 | 4 | # testing 5 | /coverage 6 | 7 | # production 8 | build 9 | 10 | # misc 11 | .DS_Store 12 | .env.local 13 | .env.development.local 14 | .env.test.local 15 | .env.production.local 16 | 17 | npm-debug.log* 18 | 19 | nodetable 20 | nodes 21 | node-crawler-backend.exe 22 | node-crawler-backend 23 | /data/ 24 | 25 | # nix 26 | result 27 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | run: 3 | tests: true 4 | linters: 5 | default: none 6 | enable: 7 | - bidichk 8 | - durationcheck 9 | - goconst 10 | - govet 11 | - ineffassign 12 | - misspell 13 | - staticcheck 14 | - unconvert 15 | - whitespace 16 | settings: 17 | goconst: 18 | min-len: 3 19 | min-occurrences: 6 20 | exclusions: 21 | generated: lax 22 | presets: 23 | - comments 24 | - common-false-positives 25 | - legacy 26 | - std-error-handling 27 | paths: 28 | - third_party$ 29 | - builtin$ 30 | - examples$ 31 | formatters: 32 | enable: 33 | - goimports 34 | settings: 35 | gofmt: 36 | simplify: true 37 | exclusions: 38 | generated: lax 39 | paths: 40 | - third_party$ 41 | - builtin$ 42 | - examples$ 43 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Launch Frontend", 6 | "type": "pwa-msedge", 7 | "request": "launch", 8 | "url": "http://localhost:3000", 9 | "webRoot": "${workspaceFolder}/frontend" 10 | }, 11 | { 12 | "name": "Launch Api", 13 | "type": "go", 14 | "request": "launch", 15 | "mode": "auto", 16 | "program": "${workspaceFolder}/api" 17 | } 18 | ] 19 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.tabSize": 2, 3 | "editor.insertSpaces": true 4 | } -------------------------------------------------------------------------------- /COPYING.LESSER: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Compile api 2 | FROM golang:1.24.2-alpine AS builder 3 | WORKDIR /app 4 | 5 | COPY go.mod go.sum ./ 6 | RUN go mod download 7 | 8 | COPY ./ ./ 9 | RUN go build ./cmd/crawler 10 | 11 | # Copy compiled stuff and run it 12 | FROM alpine 13 | 14 | COPY --from=builder /app/crawler /app/crawler 15 | 16 | ENTRYPOINT ["/app/crawler"] 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Mohamed Mansour 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ethereum Node Crawler 2 | 3 | Crawls the network and visualizes collected data. This repository includes backend, API and frontend for Ethereum network crawler. 4 | 5 | [Backend](./crawler) is based on [devp2p](https://github.com/ethereum/go-ethereum/tree/master/cmd/devp2p) tool. It tries to connect to discovered nodes, fetches info about them and creates a database. [API](./api) software reads raw node database, filters it, caches and serves as API. [Frontend](./frontend) is a web application which reads data from the API and visualizes them as a dashboard. 6 | 7 | Features: 8 | 9 | - Advanced filtering, allows you to add filters for a customized dashboard 10 | - Drilldown support, allows you to drill down the data to find interesting trends 11 | - Network upgrade readiness overview 12 | - Responsive mobile design 13 | 14 | ## Contribute 15 | 16 | Project is still in an early stage, contribution and testing is welcomed. You can run manually each part of the software for development purposes or deploy whole production ready stack with Docker. 17 | 18 | ### Frontend 19 | 20 | #### Development 21 | 22 | For local development with debugging, remoting, etc: 23 | 24 | 1. Copy `.env` into `.env.local` and replace the variables. 25 | 1. And then `npm install` then `npm start` 26 | 1. Run tests to make sure the data processing is working good. `npm test` 27 | 28 | #### Production 29 | 30 | To deploy this web app: 31 | 32 | 1. Build the production bits by `npm install` then `npm run build` the contents will be located in `build` folder. 33 | 1. Use your favorite web server, in this example we will be using nginx. 34 | 1. The nginx config for that website could be which proxies the api to endpoint `/v1`. 35 | Review the `frontent/nginx.conf` file for an example. 36 | 37 | ### Backend API 38 | 39 | The API is using 2 databases. 1 of them is the raw data from the crawler and the other one is the API database. 40 | Data will be moved from the crawler DB to the API DB regularly by this binary. 41 | Make sure to start the crawler before the API if you intend to run them together during development. 42 | 43 | #### Dependencies 44 | 45 | - golang 46 | - sqlite3 47 | 48 | #### Development 49 | 50 | ``` 51 | go run ./cmd/crawler 52 | ``` 53 | 54 | #### Production 55 | 56 | 1. Build the assembly into `/usr/bin` 57 | ``` 58 | go build ./cmd/cralwer -o /usr/bin/node-crawler 59 | ``` 60 | 1. Create a system user for running the application 61 | ``` 62 | useradd --system --create-home --home-dir /var/lib/node-crawler node-crawler 63 | ``` 64 | 1. Make sure database is in `/var/lib/node-crawler/crawler.db` 65 | 1. Create a systemd service in `/etc/systemd/system/node-crawler.service`: 66 | ``` 67 | [Unit] 68 | Description = eth node crawler api 69 | Wants = network-online.target 70 | After = network-online.target 71 | 72 | [Service] 73 | User = node-crawler 74 | ExecStart = /usr/bin/node-crawler api --crawler-db /var/lib/node-crawler/crawler.db --api-db /var/lib/node-crawler/api.db 75 | Restart = on-failure 76 | RestartSec = 3 77 | TimeoutSec = 300 78 | 79 | [Install] 80 | WantedBy = multi-user.target 81 | ``` 82 | 1. Then enable it and start it. 83 | ``` 84 | systemctl enable node-crawler 85 | systemctl start node-crawler 86 | systemctl status node-crawler 87 | ``` 88 | 89 | ### Crawler 90 | 91 | #### Dependencies 92 | 93 | - golang 94 | - sqlite3 95 | 96 | ##### Country location 97 | 98 | - `GeoLite2-Country.mmdb` file from [https://dev.maxmind.com/geoip/geolite2-free-geolocation-data?lang=en](https://dev.maxmind.com/geoip/geolite2-free-geolocation-data?lang=en) 99 | - you will have to create an account to get access to this file 100 | 101 | #### Development 102 | 103 | ``` 104 | go run ./cmd/crawler 105 | ``` 106 | 107 | Run crawler using `crawl` command. 108 | 109 | ``` 110 | go run ./cmd/crawler crawl 111 | ``` 112 | 113 | #### Production 114 | 115 | Build crawler and copy the binary to `/usr/bin`. 116 | 117 | ``` 118 | go build ./cmd/crawler -o /usr/bin/node-crawler 119 | ``` 120 | 121 | Create a systemd service similarly to above API example. In executed command, override default settings by pointing crawler database to chosen path and setting period to write crawled nodes. 122 | If you want to get the country that a Node is in you have to specify the location the geoIP database as well. 123 | 124 | ##### No GeoIP 125 | 126 | ``` 127 | node-crawler crawl --timeout 10m --crawler-db /path/to/database 128 | ``` 129 | 130 | ##### With GeoIP 131 | 132 | ``` 133 | node-crawler crawl --timeout 10m --crawler /path/to/database --geoipdb GeoLite2-Country.mmdb 134 | ``` 135 | 136 | ### Docker setup 137 | 138 | Production build of preconfigured software stack can be easily deployed with Docker. To achieve this, clone this repository and access `docker` directory. 139 | 140 | Make sure you have [Docker](https://github.com/docker/docker-ce/releases) and [docker-compose](https://github.com/docker/compose/releases) tools installed. 141 | 142 | The docker compose uses a local `./data` directory to store the database and GeoIP file. 143 | It's best to create this directory and add the GeoIP file before starting the system. 144 | You can read the `./docker-compose.yml` file for more details. 145 | 146 | ``` 147 | docker-compose up 148 | ``` 149 | 150 | ## Developing with Nix 151 | 152 | [Nix](https://nixos.org/) is a package manager and system configuration tool 153 | and language for reproducible, declarative, and reliable systems. 154 | 155 | The Nix [Flake](https://wiki.nixos.org/wiki/Flakes) in this repo contains all the 156 | dependencies needed to build the frontend and crawler. 157 | 158 | The `flake.lock` file locks the commit which the package manager uses to build 159 | the packages. Essentially locking the dependencies in time, not in version. 160 | 161 | To update the lock file, use `nix flake update --commit-lock-file` this will 162 | update the git commits in the lock file, and commit the new lock file with a 163 | nice, standard commit message which shows the change in commit hashes for each 164 | input. 165 | 166 | To activate the development environment with all the packages available, you 167 | can use the command `nix develop`. To automate this process, you can use 168 | [direnv](https://direnv.net/) with `use flake` in your `.envrc`. You can learn 169 | more about Nix and direnv [here](https://github.com/direnv/direnv/wiki/Nix). 170 | 171 | ## Deploying with NixOS 172 | 173 | [Nix](https://nixos.org/) is a package manager and system configuration tool 174 | and language for reproducible, declarative, and reliable systems. 175 | 176 | The Nix [Flake](https://wiki.nixos.org/wiki/Flakes) in this repo also contains a 177 | NixOS module for configuring and deploying the node-crawler, API, and Nginx. 178 | 179 | There is just a little bit of extra configuration which is needed to bring 180 | everything together. 181 | 182 | An example production configuration: 183 | 184 | Your NixOS `flake.nix`: 185 | 186 | ```nix 187 | { 188 | inputs = { 189 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 190 | node-crawler.url = "github:ethereum/node-crawler"; 191 | }; 192 | outputs = { 193 | nixpkgs, 194 | node-crawler, 195 | }: 196 | { 197 | nixosConfigurations = { 198 | crawlerHostName = nixpkgs.lib.nixosSystem { 199 | specialArgs = { 200 | inherit node-crawler 201 | }; 202 | modules = [ 203 | ./configuration.nix 204 | 205 | node-crawler.nixosModules.nodeCrawler 206 | ]; 207 | }; 208 | }; 209 | }; 210 | } 211 | ``` 212 | 213 | Your example `configuration.nix`: 214 | 215 | ```nix 216 | { node-crawler, ... }: 217 | 218 | { 219 | # Add the overlay from the node-crawler flake 220 | # to get the added packages. 221 | nixpkgs.overlays = [ 222 | node-crawler.overlays.default 223 | ]; 224 | 225 | # It's a good idea to have your firewall 226 | # enabled. Make sure you have SSH allowed 227 | # so you don't lock yourself out. The openssh 228 | # service should do this by default. 229 | networking = { 230 | firewall = { 231 | enable = true; 232 | allowedTCPPorts = [ 233 | 80 234 | 443 235 | ]; 236 | }; 237 | }; 238 | 239 | services = { 240 | nodeCrawler = { 241 | enable = true; 242 | hostName = "server hostname"; 243 | nginx = { 244 | forceSSL = true; 245 | enableACME = true; 246 | }; 247 | }; 248 | 249 | # Needed for the node crawler to get the country 250 | # of the crawled IP address. 251 | geoipupdate = { 252 | enable = true; 253 | settings = { 254 | EditionIDs = [ 255 | "GeoLite2-Country" 256 | ]; 257 | AccountID = account_id; 258 | LicenseKey = "location of licence key on server"; 259 | }; 260 | }; 261 | }; 262 | 263 | # Needed to enable ACME for automatic SSL certificate 264 | # creation for Nginx. 265 | security.acme = { 266 | acceptTerms = true; 267 | defaults.email = "admin+acme@example.com"; 268 | }; 269 | } 270 | ``` 271 | -------------------------------------------------------------------------------- /cmd/crawler/api.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "os" 7 | "sync" 8 | "time" 9 | 10 | _ "modernc.org/sqlite" 11 | 12 | "github.com/ethereum/go-ethereum/log" 13 | "github.com/ethereum/node-crawler/pkg/api" 14 | "github.com/ethereum/node-crawler/pkg/apidb" 15 | "github.com/ethereum/node-crawler/pkg/crawlerdb" 16 | "github.com/urfave/cli/v2" 17 | ) 18 | 19 | var ( 20 | apiCommand = &cli.Command{ 21 | Name: "api", 22 | Usage: "API server for the crawler", 23 | Action: startAPI, 24 | Flags: []cli.Flag{ 25 | apiDBFlag, 26 | apiListenAddrFlag, 27 | autovacuumFlag, 28 | busyTimeoutFlag, 29 | crawlerDBFlag, 30 | dropNodesTimeFlag, 31 | }, 32 | } 33 | ) 34 | 35 | func startAPI(ctx *cli.Context) error { 36 | var ( 37 | crawlerDBPath = ctx.String(crawlerDBFlag.Name) 38 | apiDBPath = ctx.String(apiDBFlag.Name) 39 | autovacuum = ctx.String(autovacuumFlag.Name) 40 | busyTimeout = ctx.Uint64(busyTimeoutFlag.Name) 41 | ) 42 | 43 | crawlerDB, err := openSQLiteDB( 44 | crawlerDBPath, 45 | autovacuum, 46 | busyTimeout, 47 | ) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | shouldInit := false 53 | if _, err := os.Stat(apiDBPath); os.IsNotExist(err) { 54 | shouldInit = true 55 | } 56 | nodeDB, err := openSQLiteDB( 57 | apiDBPath, 58 | autovacuum, 59 | busyTimeout, 60 | ) 61 | if err != nil { 62 | return err 63 | } 64 | if shouldInit { 65 | log.Info("DB did not exist, init") 66 | if err := apidb.CreateDB(nodeDB); err != nil { 67 | return err 68 | } 69 | } 70 | 71 | // Start daemons 72 | var wg sync.WaitGroup 73 | wg.Add(3) 74 | 75 | // Start reading daemon 76 | go func() { 77 | defer wg.Done() 78 | newNodeDaemon(crawlerDB, nodeDB) 79 | }() 80 | // Start the drop daemon 81 | go func() { 82 | defer wg.Done() 83 | dropDaemon(nodeDB, ctx.Duration(dropNodesTimeFlag.Name)) 84 | }() 85 | // Start the API deamon 86 | apiAddress := ctx.String(apiListenAddrFlag.Name) 87 | apiDaemon := api.New(apiAddress, nodeDB) 88 | go func() { 89 | defer wg.Done() 90 | apiDaemon.HandleRequests() 91 | }() 92 | wg.Wait() 93 | 94 | return nil 95 | } 96 | 97 | func transferNewNodes(crawlerDB, nodeDB *sql.DB) error { 98 | crawlerDBTx, err := crawlerDB.Begin() 99 | if err != nil { 100 | // Sometimes error occur trying to read the crawler database, but 101 | // they are normally recoverable, and a lot of the time, it's 102 | // because the database is locked by the crawler. 103 | return fmt.Errorf("error starting transaction to read nodes: %w", err) 104 | } 105 | defer crawlerDBTx.Rollback() 106 | 107 | nodes, err := crawlerdb.ReadAndDeleteUnseenNodes(crawlerDBTx) 108 | if err != nil { 109 | // Simiar to nodeDB.Begin() error 110 | return fmt.Errorf("error reading nodes: %w", err) 111 | } 112 | 113 | if len(nodes) > 0 { 114 | err := apidb.InsertCrawledNodes(nodeDB, nodes) 115 | if err != nil { 116 | // This shouldn't happen because the database is not shared in this 117 | // instance, so there shouldn't be lock errors, but anything can 118 | // happen. We will still try again. 119 | return fmt.Errorf("error inserting nodes: %w", err) 120 | } 121 | log.Info("Nodes inserted", "len", len(nodes)) 122 | } 123 | 124 | crawlerDBTx.Commit() 125 | return nil 126 | } 127 | 128 | // newNodeDaemon reads new nodes from the crawler and puts them in the db 129 | // Might trigger the invalidation of caches for the api in the future 130 | func newNodeDaemon(crawlerDB, nodeDB *sql.DB) { 131 | // Exponentially increase the backoff time 132 | retryTimeout := time.Minute 133 | 134 | for { 135 | err := transferNewNodes(crawlerDB, nodeDB) 136 | if err != nil { 137 | log.Error("Failure in transferring new nodes", "err", err) 138 | time.Sleep(retryTimeout) 139 | retryTimeout *= 2 140 | continue 141 | } 142 | 143 | retryTimeout = time.Minute 144 | time.Sleep(time.Second) 145 | } 146 | } 147 | 148 | func dropDaemon(db *sql.DB, dropTimeout time.Duration) { 149 | ticker := time.NewTicker(10 * time.Minute) 150 | defer ticker.Stop() 151 | 152 | for { 153 | <-ticker.C 154 | err := apidb.DropOldNodes(db, dropTimeout) 155 | if err != nil { 156 | panic(err) 157 | } 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /cmd/crawler/crawlercmd.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The go-ethereum Authors 2 | // This file is part of go-ethereum. 3 | // 4 | // go-ethereum is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | // 9 | // go-ethereum is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | // 14 | // You should have received a copy of the GNU General Public License 15 | // along with go-ethereum. If not, see . 16 | 17 | package main 18 | 19 | import ( 20 | "database/sql" 21 | "os" 22 | 23 | _ "modernc.org/sqlite" 24 | 25 | "github.com/oschwald/geoip2-golang" 26 | 27 | "github.com/ethereum/go-ethereum/cmd/utils" 28 | gethCommon "github.com/ethereum/go-ethereum/common" 29 | "github.com/ethereum/go-ethereum/log" 30 | "github.com/ethereum/go-ethereum/p2p/enode" 31 | "github.com/ethereum/node-crawler/pkg/common" 32 | "github.com/ethereum/node-crawler/pkg/crawler" 33 | "github.com/ethereum/node-crawler/pkg/crawlerdb" 34 | 35 | "github.com/urfave/cli/v2" 36 | ) 37 | 38 | var ( 39 | crawlerCommand = &cli.Command{ 40 | Name: "crawl", 41 | Usage: "Crawl the ethereum network", 42 | Action: crawlNodes, 43 | Flags: []cli.Flag{ 44 | autovacuumFlag, 45 | bootnodesFlag, 46 | busyTimeoutFlag, 47 | crawlerDBFlag, 48 | geoipdbFlag, 49 | listenAddrFlag, 50 | nodeFileFlag, 51 | nodeURLFlag, 52 | nodedbFlag, 53 | nodekeyFlag, 54 | timeoutFlag, 55 | workersFlag, 56 | utils.HoodiFlag, 57 | utils.NetworkIdFlag, 58 | utils.SepoliaFlag, 59 | }, 60 | } 61 | ) 62 | 63 | func crawlNodes(ctx *cli.Context) error { 64 | var inputSet common.NodeSet 65 | var geoipDB *geoip2.Reader 66 | 67 | nodesFile := ctx.String(nodeFileFlag.Name) 68 | 69 | if nodesFile != "" && gethCommon.FileExist(nodesFile) { 70 | inputSet = common.LoadNodesJSON(nodesFile) 71 | } 72 | 73 | var db *sql.DB 74 | if ctx.IsSet(crawlerDBFlag.Name) { 75 | name := ctx.String(crawlerDBFlag.Name) 76 | shouldInit := false 77 | if _, err := os.Stat(name); os.IsNotExist(err) { 78 | shouldInit = true 79 | } 80 | 81 | var err error 82 | db, err = openSQLiteDB( 83 | name, 84 | ctx.String(autovacuumFlag.Name), 85 | ctx.Uint64(busyTimeoutFlag.Name), 86 | ) 87 | if err != nil { 88 | panic(err) 89 | } 90 | log.Info("Connected to db") 91 | if shouldInit { 92 | log.Info("DB did not exist, init") 93 | if err := crawlerdb.CreateDB(db); err != nil { 94 | panic(err) 95 | } 96 | } 97 | } 98 | 99 | nodeDB, err := enode.OpenDB(ctx.String(nodedbFlag.Name)) 100 | if err != nil { 101 | panic(err) 102 | } 103 | 104 | if geoipFile := ctx.String(geoipdbFlag.Name); geoipFile != "" { 105 | geoipDB, err = geoip2.Open(geoipFile) 106 | if err != nil { 107 | return err 108 | } 109 | defer func() { _ = geoipDB.Close() }() 110 | } 111 | 112 | crawler := crawler.Crawler{ 113 | NetworkID: ctx.Uint64(utils.NetworkIdFlag.Name), 114 | NodeURL: ctx.String(nodeURLFlag.Name), 115 | ListenAddr: ctx.String(listenAddrFlag.Name), 116 | NodeKey: ctx.String(nodekeyFlag.Name), 117 | Bootnodes: ctx.StringSlice(bootnodesFlag.Name), 118 | Timeout: ctx.Duration(timeoutFlag.Name), 119 | Workers: ctx.Uint64(workersFlag.Name), 120 | Sepolia: ctx.Bool(utils.SepoliaFlag.Name), 121 | Hoodi: ctx.Bool(utils.HoodiFlag.Name), 122 | NodeDB: nodeDB, 123 | } 124 | 125 | for { 126 | updatedSet := crawler.CrawlRound(inputSet, db, geoipDB) 127 | if nodesFile != "" { 128 | updatedSet.WriteNodesJSON(nodesFile) 129 | } 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /cmd/crawler/flags.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/urfave/cli/v2" 7 | ) 8 | 9 | var ( 10 | apiDBFlag = &cli.StringFlag{ 11 | Name: "api-db", 12 | Usage: "API SQLite file name", 13 | Required: true, 14 | } 15 | apiListenAddrFlag = &cli.StringFlag{ 16 | Name: "addr", 17 | Usage: "Listening address", 18 | Value: "0.0.0.0:10000", 19 | } 20 | autovacuumFlag = &cli.StringFlag{ 21 | Name: "autovacuum", 22 | Usage: ("Sets the autovacuum value for the databases. Possible values: " + 23 | "NONE, FULL, or INCREMENTAL. " + 24 | "https://www.sqlite.org/pragma.html#pragma_auto_vacuum"), 25 | Value: "INCREMENTAL", 26 | } 27 | bootnodesFlag = &cli.StringSliceFlag{ 28 | Name: "bootnodes", 29 | Usage: ("Comma separated nodes used for bootstrapping. " + 30 | "Defaults to hard-coded values for the selected network"), 31 | } 32 | busyTimeoutFlag = &cli.Uint64Flag{ 33 | Name: "busy-timeout", 34 | Usage: ("Sets the busy_timeout value for the database in milliseconds. " + 35 | "https://www.sqlite.org/pragma.html#pragma_busy_timeout"), 36 | Value: 3000, 37 | } 38 | crawlerDBFlag = &cli.StringFlag{ 39 | Name: "crawler-db", 40 | Usage: "Crawler SQLite file name", 41 | Required: true, 42 | } 43 | dropNodesTimeFlag = &cli.DurationFlag{ 44 | Name: "drop-time", 45 | Usage: "Time to drop crawled nodes without any updates", 46 | Value: 24 * time.Hour, 47 | } 48 | geoipdbFlag = &cli.StringFlag{ 49 | Name: "geoipdb", 50 | Usage: "geoip2 database location", 51 | } 52 | listenAddrFlag = &cli.StringFlag{ 53 | Name: "addr", 54 | Usage: "Listening address", 55 | Value: "0.0.0.0:0", 56 | } 57 | nodedbFlag = &cli.StringFlag{ 58 | Name: "nodedb", 59 | Usage: "Nodes database location. Defaults to in memory database", 60 | } 61 | nodeFileFlag = &cli.StringFlag{ 62 | Name: "nodefile", 63 | Usage: "Path to a node file containing nodes to be crawled", 64 | } 65 | nodekeyFlag = &cli.StringFlag{ 66 | Name: "nodekey", 67 | Usage: "Hex-encoded node key", 68 | } 69 | nodeURLFlag = &cli.StringFlag{ 70 | Name: "nodeURL", 71 | Usage: "URL of the node you want to connect to", 72 | // Value: "http://localhost:8545", 73 | } 74 | timeoutFlag = &cli.DurationFlag{ 75 | Name: "timeout", 76 | Usage: "Timeout for the crawling in a round", 77 | Value: 5 * time.Minute, 78 | } 79 | workersFlag = &cli.Uint64Flag{ 80 | Name: "workers", 81 | Usage: "Number of workers to start for updating nodes", 82 | Value: 16, 83 | } 84 | ) 85 | -------------------------------------------------------------------------------- /cmd/crawler/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The go-ethereum Authors 2 | // This file is part of the go-ethereum library. 3 | // 4 | // The go-ethereum library is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU Lesser General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | // 9 | // The go-ethereum library is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU Lesser General Public License for more details. 13 | // 14 | // You should have received a copy of the GNU Lesser General Public License 15 | // along with the go-ethereum library. If not, see . 16 | 17 | package main 18 | 19 | import ( 20 | "fmt" 21 | "os" 22 | "path/filepath" 23 | 24 | "github.com/urfave/cli/v2" 25 | ) 26 | 27 | var ( 28 | app = &cli.App{ 29 | Name: filepath.Base(os.Args[0]), 30 | Usage: "go-ethereum crawler", 31 | Version: "v.0.0.1", 32 | Writer: os.Stdout, 33 | HideVersion: true, 34 | } 35 | ) 36 | 37 | func init() { 38 | app.Flags = append(app.Flags, Flags...) 39 | app.Before = func(ctx *cli.Context) error { 40 | return Setup(ctx) 41 | } 42 | // Add subcommands. 43 | app.Commands = []*cli.Command{ 44 | apiCommand, 45 | crawlerCommand, 46 | } 47 | } 48 | 49 | func main() { 50 | err := app.Run(os.Args) 51 | if err != nil { 52 | fmt.Println(err) 53 | os.Exit(-127) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /cmd/crawler/setup.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net/http" 7 | "os" 8 | "runtime" 9 | 10 | "github.com/ethereum/go-ethereum/log" 11 | "github.com/ethereum/go-ethereum/metrics" 12 | "github.com/ethereum/go-ethereum/metrics/exp" 13 | "github.com/fjl/memsize/memsizeui" 14 | "github.com/mattn/go-colorable" 15 | "github.com/mattn/go-isatty" 16 | "github.com/urfave/cli/v2" 17 | ) 18 | 19 | var Memsize memsizeui.Handler 20 | 21 | var ( 22 | verbosityFlag = cli.IntFlag{ 23 | Name: "verbosity", 24 | Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail", 25 | Value: 3, 26 | } 27 | vmoduleFlag = cli.StringFlag{ 28 | Name: "vmodule", 29 | Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)", 30 | Value: "", 31 | } 32 | logjsonFlag = cli.BoolFlag{ 33 | Name: "log.json", 34 | Usage: "Format logs with JSON", 35 | } 36 | pprofFlag = cli.BoolFlag{ 37 | Name: "pprof", 38 | Usage: "Enable the pprof HTTP server", 39 | } 40 | pprofPortFlag = cli.IntFlag{ 41 | Name: "pprof.port", 42 | Usage: "pprof HTTP server listening port", 43 | Value: 6060, 44 | } 45 | pprofAddrFlag = cli.StringFlag{ 46 | Name: "pprof.addr", 47 | Usage: "pprof HTTP server listening interface", 48 | Value: "127.0.0.1", 49 | } 50 | memprofilerateFlag = cli.IntFlag{ 51 | Name: "pprof.memprofilerate", 52 | Usage: "Turn on memory profiling with the given rate", 53 | Value: runtime.MemProfileRate, 54 | } 55 | blockprofilerateFlag = cli.IntFlag{ 56 | Name: "pprof.blockprofilerate", 57 | Usage: "Turn on block profiling with the given rate", 58 | } 59 | cpuprofileFlag = cli.StringFlag{ 60 | Name: "pprof.cpuprofile", 61 | Usage: "Write CPU profile to the given file", 62 | } 63 | traceFlag = cli.StringFlag{ 64 | Name: "trace", 65 | Usage: "Write execution trace to the given file", 66 | } 67 | ) 68 | 69 | // Flags holds all command-line flags required for debugging. 70 | var Flags = []cli.Flag{ 71 | &blockprofilerateFlag, 72 | &cpuprofileFlag, 73 | &logjsonFlag, 74 | &memprofilerateFlag, 75 | &pprofAddrFlag, 76 | &pprofFlag, 77 | &pprofPortFlag, 78 | &traceFlag, 79 | &verbosityFlag, 80 | &vmoduleFlag, 81 | } 82 | 83 | var glogger *log.GlogHandler 84 | 85 | func init() { 86 | terminalOutput := io.Writer(os.Stderr) 87 | glogger := log.NewGlogHandler(log.NewTerminalHandler(terminalOutput, false)) 88 | glogger.Verbosity(log.LvlInfo) 89 | log.SetDefault(log.NewLogger(glogger)) 90 | } 91 | 92 | // Setup initializes profiling and logging based on the CLI flags. 93 | // It should be called as early as possible in the program. 94 | func Setup(ctx *cli.Context) error { 95 | output := io.Writer(os.Stderr) 96 | if ctx.Bool(logjsonFlag.Name) { 97 | glogger = log.NewGlogHandler(log.JSONHandler(output)) 98 | } else { 99 | usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb" 100 | if usecolor { 101 | output = colorable.NewColorableStderr() 102 | } 103 | glogger = log.NewGlogHandler(log.NewTerminalHandler(output, usecolor)) 104 | } 105 | 106 | // logging 107 | verbosity := ctx.Int(verbosityFlag.Name) 108 | glogger.Verbosity(log.FromLegacyLevel(verbosity)) 109 | vmodule := ctx.String(vmoduleFlag.Name) 110 | glogger.Vmodule(vmodule) 111 | 112 | log.SetDefault(log.NewLogger(glogger)) 113 | 114 | // profiling, tracing 115 | runtime.MemProfileRate = memprofilerateFlag.Value 116 | if ctx.IsSet(memprofilerateFlag.Name) { 117 | runtime.MemProfileRate = ctx.Int(memprofilerateFlag.Name) 118 | } 119 | 120 | // pprof server 121 | if ctx.Bool(pprofFlag.Name) { 122 | listenHost := ctx.String(pprofAddrFlag.Name) 123 | 124 | port := ctx.Int(pprofPortFlag.Name) 125 | 126 | address := fmt.Sprintf("%s:%d", listenHost, port) 127 | // This context value ("metrics.addr") represents the utils.MetricsHTTPFlag.Name. 128 | // It cannot be imported because it will cause a cyclical dependency. 129 | StartPProf(address, !ctx.IsSet("metrics.addr")) 130 | } 131 | return nil 132 | } 133 | 134 | func StartPProf(address string, withMetrics bool) { 135 | // Hook go-metrics into expvar on any /debug/metrics request, load all vars 136 | // from the registry into expvar, and execute regular expvar handler. 137 | if withMetrics { 138 | exp.Exp(metrics.DefaultRegistry) 139 | } 140 | http.Handle("/memsize/", http.StripPrefix("/memsize", &Memsize)) 141 | log.Info("Starting pprof server", "addr", fmt.Sprintf("http://%s/debug/pprof", address)) 142 | go func() { 143 | if err := http.ListenAndServe(address, nil); err != nil { 144 | log.Error("Failure in running pprof server", "err", err) 145 | } 146 | }() 147 | } 148 | -------------------------------------------------------------------------------- /cmd/crawler/utils.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | ) 7 | 8 | func openSQLiteDB( 9 | name, 10 | autovacuum string, 11 | busyTimeout uint64, 12 | ) (*sql.DB, error) { 13 | db, err := sql.Open("sqlite", name) 14 | if err != nil { 15 | return nil, fmt.Errorf("error opening database: %w", err) 16 | } 17 | _, err = db.Exec("PRAGMA auto_vacuum = " + autovacuum) 18 | if err != nil { 19 | return nil, fmt.Errorf("error setting auto_vacuum: %w", err) 20 | } 21 | _, err = db.Exec(fmt.Sprintf("PRAGMA busy_timeout = %d", busyTimeout)) 22 | if err != nil { 23 | return nil, fmt.Errorf("error setting busy_timeout: %w", err) 24 | } 25 | 26 | return db, nil 27 | } 28 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | crawler: 5 | build: ./ 6 | volumes: 7 | - ./data:/data 8 | command: 9 | - crawl 10 | - --timeout 11 | - 10m 12 | - --crawler-db 13 | - /data/crawler.db 14 | # Make sure you have the GeoLite DB in the ./data directory. 15 | # Comment out the following two lines to disable the GeoLite DB. 16 | - --geoipdb 17 | - /data/GeoLite2-Country.mmdb 18 | 19 | api: 20 | build: ./ 21 | volumes: 22 | - ./data:/data 23 | depends_on: 24 | - crawler 25 | command: "api --crawler-db /data/crawler.db --api-db /data/api.db" 26 | 27 | frontend: 28 | build: ./frontend 29 | volumes: 30 | - ./frontend/nginx.conf:/etc/nginx/conf.d/default.conf 31 | - ./data:/data 32 | depends_on: 33 | - api 34 | ports: 35 | - 80:80 36 | -------------------------------------------------------------------------------- /docs/api.md: -------------------------------------------------------------------------------- 1 | # Api Specifications 2 | Showcases what the API for querying should be. 3 | 4 | ### MVP: Raw Output (for debug and dev) 5 | 6 | This is a debug endpoint that has the raw `clientId` details. 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 26 | 27 |
MethodGET
Endpoint/api/v1/debug/clients
Response 20 |
 21 | [
 22 |   { clientId: "Geth/goerli/v1.10.4-unstable-966ee3ae-20210528/linux-amd64/go1.16.4", count: 12 }, 
 23 |   { clientId: "Geth/v1.10.4-stable/linux-x86_64/go1.16.4", count: 1 }
 24 | ]
 25 |       
28 | 29 | ### MVP: Parsed Output 30 | 31 | Retrieves the list of clients structured and strongly typed. Ordered by count. 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 101 | 102 |
MethodGET
Endpoint/api/v1/clients
Response 45 |
 46 | [
 47 |   {
 48 |     count: 12,
 49 |     client: {
 50 |       name: 'Geth',
 51 |       label: 'goerli',
 52 |       version: { 
 53 |         major: 1,
 54 |         minor: 10,
 55 |         patch: 4,
 56 |         tag: 'unstable',
 57 |         build: '966ee3ae',
 58 |         date: '20210528',
 59 |       },
 60 |       os: {
 61 |         vendor: 'linux',
 62 |         architecture: 'amd64'
 63 |       },
 64 |       runtime: {
 65 |         name: 'go',
 66 |         version: { 
 67 |           major: 1,
 68 |           minor: 16,
 69 |           patch: 4
 70 |         }
 71 |       }
 72 |     },
 73 |   },
 74 |   {
 75 |     count: 1,
 76 |     client: {
 77 |       name: 'Geth',
 78 |       version: { 
 79 |         major: 1,
 80 |         minor: 10,
 81 |         patch: 4,
 82 |         tag: 'stable',
 83 |       },
 84 |       os: {
 85 |         vendor: 'linux',
 86 |         architecture: 'x86_64'
 87 |       },
 88 |       runtime: {
 89 |         name: 'go',
 90 |         version: { 
 91 |           major: 1,
 92 |           minor: 16,
 93 |           patch: 4
 94 |         }
 95 |       }
 96 |     }
 97 |   }
 98 | ]
 99 |       
100 |
103 | 104 | ### Stretch: Filtered Clients 105 | 106 | Gets the list of clients filtered. Ordered by count. You can add more than one filter. 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 129 | 130 | 131 | 132 | 134 | 135 | 136 | 137 | 144 | 145 |
MethodGET
Query Params 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 |
ParamTypeRequired
filterFiltertrue
128 |
Endpoint/api/v1/clients?filter=TBD
Response 138 |
139 | [
140 |   { name: "Geth", count: 1000  }, 
141 |   { name: "Nethermind", count: 12 }, 
142 | ]
143 |       
146 | 147 | ### Stretch: Client Details 148 | 149 | Get the details of a specific client 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 212 | 213 |
MethodGET
Endpoint/api/v1/clients/geth
Response 163 |
164 | {
165 |   nodeCount: 3389,
166 |   versions: [
167 |      {
168 |        version: "1.10.3",
169 |        count: 1256
170 |      }
171 |   },
172 |   syncStatus: {
173 |       synced: 3488,
174 |       syncing: 707
175 |   },
176 |   os: [
177 |       {
178 |         name: "Linux",
179 |         count: 3868,
180 |         architecture: [
181 |           {
182 |             name: "x86_64",
183 |             count: 1200
184 |            }
185 |         ]
186 |       },
187 |       {
188 |         name: "Darwin",
189 |         count: 43,
190 |         architecture: [
191 |           {
192 |             name: "amd64",
193 |             count: 40
194 |            }
195 |         ]
196 |       }
197 |   ],
198 |   runtime: [
199 |       {
200 |         name: "go",
201 |         version: 1.14,
202 |         count: 10
203 |       },
204 |       {
205 |         name: "go",
206 |         version: 1.15,
207 |         count: 5
208 |       }
209 |   ]
210 | }
211 |       
214 | 215 | 216 | ### Stretch: Historical Data 217 | 218 | Get the list of historical entry points given a specific filter. Note filter version is optional so we can send `filter=geth` for all versions or `filter=geth:1.10.2` for all version that includes that or greater! 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 246 | 247 | 248 | 249 | 251 | 252 | 253 | 254 | 262 | 263 |
MethodGET
Query Params 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 |
ParamTypeRequired
since[month|day|year|all]false (default=month)
filterFiltertrue
245 |
Endpoint/api/v1/historical?since=month&filter=TBD
Response 255 |
256 | [
257 |   { date: "06-26-2021", geth: 4000, nethermind: 1000 }, 
258 |   { date: "06-25-2021", geth: 4100, nethermind: 900 }, 
259 |   { date: "06-24-2021", geth: 4200, nethermind: 800 }, 
260 | ]
261 |       
264 | 265 | 266 | ## Filter Schema design 267 | 268 | Filter allows `AND` and `OR` querying with an operator based baked into each field. 269 | 270 | For example, for an `AND` query, find Geth versions greater than 1.2.45: 271 | 272 | ``` 273 | [ 274 | [ 275 | ["name:geth"],["version:1.2.45:gt"] 276 | ] 277 | ] 278 | ``` 279 | Another example is an `OR` query, filtering by Geth or Nethermind: 280 | 281 | ``` 282 | [ 283 | [ 284 | ["name:geth"],["version:1.2.45:gt"] 285 | ], 286 | [ 287 | ["name:nethermind"],["version:0.2.45:gt"] 288 | ] 289 | ] 290 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "devshell": { 4 | "inputs": { 5 | "nixpkgs": "nixpkgs", 6 | "systems": "systems" 7 | }, 8 | "locked": { 9 | "lastModified": 1695973661, 10 | "narHash": "sha256-BP2H4c42GThPIhERtTpV1yCtwQHYHEKdRu7pjrmQAwo=", 11 | "owner": "numtide", 12 | "repo": "devshell", 13 | "rev": "cd4e2fda3150dd2f689caeac07b7f47df5197c31", 14 | "type": "github" 15 | }, 16 | "original": { 17 | "owner": "numtide", 18 | "repo": "devshell", 19 | "type": "github" 20 | } 21 | }, 22 | "flake-parts": { 23 | "inputs": { 24 | "nixpkgs-lib": "nixpkgs-lib" 25 | }, 26 | "locked": { 27 | "lastModified": 1696343447, 28 | "narHash": "sha256-B2xAZKLkkeRFG5XcHHSXXcP7To9Xzr59KXeZiRf4vdQ=", 29 | "owner": "hercules-ci", 30 | "repo": "flake-parts", 31 | "rev": "c9afaba3dfa4085dbd2ccb38dfade5141e33d9d4", 32 | "type": "github" 33 | }, 34 | "original": { 35 | "owner": "hercules-ci", 36 | "repo": "flake-parts", 37 | "type": "github" 38 | } 39 | }, 40 | "nixpkgs": { 41 | "locked": { 42 | "lastModified": 1677383253, 43 | "narHash": "sha256-UfpzWfSxkfXHnb4boXZNaKsAcUrZT9Hw+tao1oZxd08=", 44 | "owner": "NixOS", 45 | "repo": "nixpkgs", 46 | "rev": "9952d6bc395f5841262b006fbace8dd7e143b634", 47 | "type": "github" 48 | }, 49 | "original": { 50 | "owner": "NixOS", 51 | "ref": "nixpkgs-unstable", 52 | "repo": "nixpkgs", 53 | "type": "github" 54 | } 55 | }, 56 | "nixpkgs-lib": { 57 | "locked": { 58 | "dir": "lib", 59 | "lastModified": 1696019113, 60 | "narHash": "sha256-X3+DKYWJm93DRSdC5M6K5hLqzSya9BjibtBsuARoPco=", 61 | "owner": "NixOS", 62 | "repo": "nixpkgs", 63 | "rev": "f5892ddac112a1e9b3612c39af1b72987ee5783a", 64 | "type": "github" 65 | }, 66 | "original": { 67 | "dir": "lib", 68 | "owner": "NixOS", 69 | "ref": "nixos-unstable", 70 | "repo": "nixpkgs", 71 | "type": "github" 72 | } 73 | }, 74 | "nixpkgs_2": { 75 | "locked": { 76 | "lastModified": 1696630667, 77 | "narHash": "sha256-kO67pYOeT/6m9BnPO+zNHWnC4eGiW87gIAJ+e8f3gwU=", 78 | "owner": "nixos", 79 | "repo": "nixpkgs", 80 | "rev": "b604023e0a5549b65da3040a07d2beb29ac9fc63", 81 | "type": "github" 82 | }, 83 | "original": { 84 | "owner": "nixos", 85 | "ref": "nixpkgs-unstable", 86 | "repo": "nixpkgs", 87 | "type": "github" 88 | } 89 | }, 90 | "root": { 91 | "inputs": { 92 | "devshell": "devshell", 93 | "flake-parts": "flake-parts", 94 | "nixpkgs": "nixpkgs_2" 95 | } 96 | }, 97 | "systems": { 98 | "locked": { 99 | "lastModified": 1681028828, 100 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 101 | "owner": "nix-systems", 102 | "repo": "default", 103 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 104 | "type": "github" 105 | }, 106 | "original": { 107 | "owner": "nix-systems", 108 | "repo": "default", 109 | "type": "github" 110 | } 111 | } 112 | }, 113 | "root": "root", 114 | "version": 7 115 | } 116 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "Ethereum network crawler, API, and frontend"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:nixos/nixpkgs/nixpkgs-unstable"; 6 | devshell.url = "github:numtide/devshell"; 7 | flake-parts.url = "github:hercules-ci/flake-parts"; 8 | }; 9 | 10 | outputs = inputs@{ self, nixpkgs, devshell, flake-parts }: 11 | flake-parts.lib.mkFlake { inherit inputs; } { 12 | imports = [ 13 | devshell.flakeModule 14 | flake-parts.flakeModules.easyOverlay 15 | ]; 16 | 17 | systems = [ 18 | "x86_64-linux" 19 | "aarch64-linux" 20 | "x86_64-darwin" 21 | "aarch64-darwin" 22 | ]; 23 | 24 | perSystem = { config, pkgs, final, ... }: { 25 | overlayAttrs = { 26 | inherit (config.packages) nodeCrawler; 27 | inherit (config.packages) nodeCrawlerFrontend; 28 | }; 29 | 30 | packages = { 31 | nodeCrawler = pkgs.buildGo121Module rec { 32 | pname = "crawler"; 33 | version = "0.0.0"; 34 | 35 | src = ./.; 36 | subPackages = [ "cmd/crawler" ]; 37 | 38 | vendorHash = "sha256-nR6YsXZvIUupDHGCgOYELDpJVbbPc1SPK9LdwnL5sAQ="; 39 | 40 | doCheck = false; 41 | 42 | CGO_ENABLED = 0; 43 | 44 | ldflags = [ 45 | "-s" 46 | "-w" 47 | "-extldflags -static" 48 | ]; 49 | }; 50 | nodeCrawlerFrontend = pkgs.buildNpmPackage rec { 51 | pname = "frontend"; 52 | version = "0.0.0"; 53 | 54 | src = ./frontend; 55 | 56 | npmDepsHash = "sha256-1nLQVoNkiA4x97UcPe8rNMXa7bYCskazpJesWVLnDHk="; 57 | 58 | installPhase = '' 59 | mkdir -p $out/share 60 | cp -r build/ $out/share/frontend 61 | ''; 62 | }; 63 | }; 64 | 65 | devshells.default = { 66 | packages = with pkgs; [ 67 | go_1_21 68 | golangci-lint 69 | nodejs 70 | sqlite 71 | ]; 72 | }; 73 | }; 74 | 75 | flake = rec { 76 | nixosModules.default = nixosModules.nodeCrawler; 77 | nixosModules.nodeCrawler = { config, lib, pkgs, ... }: 78 | with lib; 79 | let 80 | cfg = config.services.nodeCrawler; 81 | apiAddress = "${cfg.api.address}:${toString cfg.api.port}"; 82 | in 83 | { 84 | options.services.nodeCrawler = { 85 | enable = mkEnableOption (self.flake.description); 86 | 87 | hostName = mkOption { 88 | type = types.str; 89 | default = "localhost"; 90 | description = "Hostname to serve Node Crawler on."; 91 | }; 92 | 93 | nginx = mkOption { 94 | type = types.attrs; 95 | default = { }; 96 | example = literalExpression '' 97 | { 98 | forceSSL = true; 99 | enableACME = true; 100 | } 101 | ''; 102 | description = "Extra configuration for the vhost. Useful for adding SSL settings."; 103 | }; 104 | 105 | stateDir = mkOption { 106 | type = types.path; 107 | default = /var/lib/node_crawler; 108 | description = "Directory where the databases will exist."; 109 | }; 110 | 111 | crawlerDatabaseName = mkOption { 112 | type = types.str; 113 | default = "crawler.db"; 114 | description = "Name of the file within the `stateDir` for storing the data for the crawler."; 115 | }; 116 | 117 | apiDatabaseName = mkOption { 118 | type = types.str; 119 | default = "api.db"; 120 | description = "Name of the file within the `stateDir` for storing the data for the API."; 121 | }; 122 | 123 | user = mkOption { 124 | type = types.str; 125 | default = "nodecrawler"; 126 | description = "User account under which Node Crawler runs."; 127 | }; 128 | 129 | group = mkOption { 130 | type = types.str; 131 | default = "nodecrawler"; 132 | description = "Group account under which Node Crawler runs."; 133 | }; 134 | 135 | dynamicUser = mkOption { 136 | type = types.bool; 137 | default = true; 138 | description = '' 139 | Runs the Node Crawler as a SystemD DynamicUser. 140 | It means SystenD will allocate the user at runtime, and enables 141 | some other security features. 142 | If you are not sure what this means, it's safe to leave it default. 143 | ''; 144 | }; 145 | 146 | api = { 147 | enable = mkOption { 148 | default = true; 149 | type = types.bool; 150 | description = "Enables the Node Crawler API server."; 151 | }; 152 | 153 | address = mkOption { 154 | type = types.str; 155 | default = "127.0.0.1"; 156 | description = "Listen address for the API server."; 157 | }; 158 | 159 | port = mkOption { 160 | type = types.port; 161 | default = 10000; 162 | description = "Listen port for the API server."; 163 | }; 164 | }; 165 | 166 | crawler = { 167 | enable = mkOption { 168 | default = true; 169 | type = types.bool; 170 | description = "Enables the Node Crawler API server."; 171 | }; 172 | 173 | geoipdb = mkOption { 174 | type = types.path; 175 | default = config.services.geoipupdate.settings.DatabaseDirectory + "/GeoLite2-Country.mmdb"; 176 | description = '' 177 | Location of the GeoIP database. 178 | 179 | If the default is used, the `geoipupdate` service files. 180 | So you will need to configure it. 181 | Make sure to enable the `GeoLite2-Country` edition. 182 | 183 | If you do not want to enable the `geoipupdate` service, then 184 | the `GeoLite2-Country` file needs to be provided. 185 | ''; 186 | }; 187 | 188 | network = { 189 | type = types.str; 190 | default = "mainnet"; 191 | example = "goerli"; 192 | description = "Name of the network to crawl. Defaults to Mainnet."; 193 | }; 194 | }; 195 | }; 196 | 197 | config = mkIf cfg.enable { 198 | systemd.services = { 199 | node-crawler-crawler = { 200 | description = "Node Cralwer, the Ethereum Node Crawler."; 201 | wantedBy = [ "multi-user.target" ]; 202 | after = [ "network.target" ]; 203 | 204 | serviceConfig = { 205 | ExecStart = 206 | let 207 | args = [ 208 | "--crawler-db=${cfg.crawlerDatabaseName}" 209 | "--geoipdb=${cfg.crawler.geoipdb}" 210 | ] 211 | ++ optional (cfg.crawler.network == "goerli") "--goerli" 212 | ++ optional (cfg.crawler.network == "sepolia") "--sepolia"; 213 | in 214 | "${pkgs.nodeCrawler}/bin/crawler crawl ${concatStringsSep " " args}"; 215 | 216 | WorkingDirectory = cfg.stateDir; 217 | StateDirectory = optional (cfg.stateDir == /var/lib/node_crawler) "node_crawler"; 218 | 219 | DynamicUser = cfg.dynamicUser; 220 | Group = cfg.group; 221 | User = cfg.user; 222 | 223 | Restart = "on-failure"; 224 | }; 225 | }; 226 | node-crawler-api = { 227 | description = "Node Cralwer API, the API for the Ethereum Node Crawler."; 228 | wantedBy = [ "multi-user.target" ]; 229 | after = [ "network.target" ] 230 | ++ optional cfg.crawler.enable "node-crawler-crawler.service"; 231 | 232 | serviceConfig = { 233 | ExecStart = 234 | let 235 | args = [ 236 | "--addr=${apiAddress}" 237 | "--crawler-db=${cfg.crawlerDatabaseName}" 238 | "--api-db=${cfg.apiDatabaseName}" 239 | ]; 240 | in 241 | "${pkgs.nodeCrawler}/bin/crawler api ${concatStringsSep " " args}"; 242 | 243 | WorkingDirectory = cfg.stateDir; 244 | StateDirectory = optional (cfg.stateDir == /var/lib/node_crawler) "node_crawler"; 245 | 246 | DynamicUser = cfg.dynamicUser; 247 | Group = cfg.group; 248 | User = cfg.user; 249 | 250 | Restart = "on-failure"; 251 | }; 252 | }; 253 | }; 254 | 255 | services.nginx = { 256 | enable = true; 257 | upstreams.nodeCrawlerApi.servers."${apiAddress}" = { }; 258 | virtualHosts."${cfg.hostName}" = mkMerge [ 259 | cfg.nginx 260 | { 261 | root = mkForce "${pkgs.nodeCrawlerFrontend}/share/frontend"; 262 | locations = { 263 | "/" = { 264 | index = "index.html"; 265 | tryFiles = "$uri $uri/ /index.html"; 266 | }; 267 | "/v1/" = { 268 | proxyPass = "http://nodeCrawlerApi/v1/"; 269 | }; 270 | }; 271 | } 272 | ]; 273 | }; 274 | }; 275 | }; 276 | }; 277 | }; 278 | } 279 | -------------------------------------------------------------------------------- /frontend/.env: -------------------------------------------------------------------------------- 1 | CRAWLER_API_URL= -------------------------------------------------------------------------------- /frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | # Compile frontend 2 | FROM node:18-alpine as builder 3 | WORKDIR /app 4 | 5 | 6 | COPY ./package.json package-lock.json ./ 7 | RUN npm install 8 | 9 | COPY ./ ./ 10 | RUN echo '/v1/' >> .env && cp .env .env.local 11 | RUN npm run build 12 | 13 | # Copy compiled stuff to an alpine nginx image 14 | FROM nginx:alpine 15 | 16 | COPY --from=builder /app/build/ /usr/share/nginx/html 17 | -------------------------------------------------------------------------------- /frontend/deploy.bat: -------------------------------------------------------------------------------- 1 | npm run build && scp -r build\* azureuser@ethereum.mohamedmansour.com:/var/www/ethereum.mohamedmansour.com -------------------------------------------------------------------------------- /frontend/nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | listen [::]:80; 4 | 5 | server_name _; 6 | root /usr/share/nginx/html; 7 | index index.html; 8 | 9 | location / { 10 | try_files $uri $uri/ /index.html; 11 | } 12 | 13 | location /v1/ { 14 | proxy_pass http://api:10000/v1/; 15 | proxy_set_header Host $http_host; 16 | proxy_set_header X-Real-IP $remote_addr; 17 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 18 | proxy_set_header X-Forwarded-Proto $scheme; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ethereum-node-crawler", 3 | "version": "1.0.0", 4 | "private": true, 5 | "dependencies": { 6 | "@chakra-ui/react": "^1.6.6", 7 | "@emotion/react": "^11.4.1", 8 | "@emotion/styled": "^11.3.0", 9 | "@testing-library/jest-dom": "^5.14.1", 10 | "@testing-library/react": "^12.0.0", 11 | "@testing-library/user-event": "^13.2.1", 12 | "d3-scale": "^4.0.0", 13 | "d3-scale-chromatic": "^3.0.0", 14 | "framer-motion": "^4.1.17", 15 | "react": "^17.0.2", 16 | "react-dom": "^17.0.2", 17 | "react-icons": "^4.2.0", 18 | "react-router-dom": "^5.2.0", 19 | "react-scripts": "^5.0.1", 20 | "recharts": "2.0.10", 21 | "string.prototype.matchall": "^4.0.5" 22 | }, 23 | "scripts": { 24 | "start": "react-scripts start", 25 | "build": "react-scripts build", 26 | "test": "react-scripts test", 27 | "eject": "react-scripts eject" 28 | }, 29 | "eslintConfig": { 30 | "extends": [ 31 | "react-app", 32 | "react-app/jest" 33 | ] 34 | }, 35 | "browserslist": { 36 | "production": [ 37 | ">0.2%", 38 | "not dead", 39 | "not op_mini all" 40 | ], 41 | "development": [ 42 | "last 1 chrome version", 43 | "last 1 firefox version", 44 | "last 1 safari version" 45 | ] 46 | }, 47 | "devDependencies": { 48 | "@types/d3-scale": "^4.0.1", 49 | "@types/d3-scale-chromatic": "^3.0.0", 50 | "@types/jest": "^27.0.1", 51 | "@types/node": "^16.6.1", 52 | "@types/react": "^17.0.18", 53 | "@types/react-dom": "^17.0.9", 54 | "@types/react-router-dom": "^5.1.8", 55 | "@types/string.prototype.matchall": "^4.0.0", 56 | "node-cron": "^3.0.0", 57 | "node-fetch": "^2.6.1", 58 | "typescript": "^4.3.5" 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /frontend/public/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/android-chrome-192x192.png -------------------------------------------------------------------------------- /frontend/public/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/android-chrome-512x512.png -------------------------------------------------------------------------------- /frontend/public/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/apple-touch-icon.png -------------------------------------------------------------------------------- /frontend/public/browserconfig.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | #da532c 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /frontend/public/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/favicon-16x16.png -------------------------------------------------------------------------------- /frontend/public/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/favicon-32x32.png -------------------------------------------------------------------------------- /frontend/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/favicon.ico -------------------------------------------------------------------------------- /frontend/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 17 | 18 | 19 | Ethereum Node Stats 20 | 46 | 47 | 48 | 49 |
50 | 51 | 52 | -------------------------------------------------------------------------------- /frontend/public/mstile-144x144.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/mstile-144x144.png -------------------------------------------------------------------------------- /frontend/public/mstile-150x150.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/mstile-150x150.png -------------------------------------------------------------------------------- /frontend/public/mstile-310x150.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/mstile-310x150.png -------------------------------------------------------------------------------- /frontend/public/mstile-310x310.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/mstile-310x310.png -------------------------------------------------------------------------------- /frontend/public/mstile-70x70.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/node-crawler/e474feeda82aa39f9949054d4daac9f9157e2cbf/frontend/public/mstile-70x70.png -------------------------------------------------------------------------------- /frontend/public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | Disallow: 4 | -------------------------------------------------------------------------------- /frontend/public/safari-pinned-tab.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 8 | 19 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /frontend/public/site.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Ethereum Node Stats", 3 | "short_name": "ethereum-node-stats", 4 | "icons": [ 5 | { 6 | "src": "/android-chrome-192x192.png", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | }, 10 | { 11 | "src": "/android-chrome-512x512.png", 12 | "sizes": "512x512", 13 | "type": "image/png" 14 | } 15 | ], 16 | "theme_color": "#ffffff", 17 | "background_color": "#ffffff", 18 | "display": "standalone" 19 | } 20 | -------------------------------------------------------------------------------- /frontend/src/atoms/Card.tsx: -------------------------------------------------------------------------------- 1 | import { Box, forwardRef, Heading, HTMLChakraProps, useStyleConfig } from "@chakra-ui/react"; 2 | import React from "react"; 3 | 4 | interface CardProps extends HTMLChakraProps<"div"> { 5 | title: string 6 | contentHeight?: number 7 | variant?: string 8 | } 9 | 10 | export const Card = forwardRef((props: CardProps, ref: React.ForwardedRef) => { 11 | const { 12 | variant, 13 | title, 14 | contentHeight, 15 | ...rest 16 | } = props 17 | const styles = useStyleConfig("Card", { variant }) 18 | 19 | return ( 20 | 21 | {title} 22 | 23 | {props.children} 24 | 25 | 26 | ) 27 | }) 28 | -------------------------------------------------------------------------------- /frontend/src/atoms/CustomResponsiveContainer.tsx: -------------------------------------------------------------------------------- 1 | import { ResponsiveContainer } from "recharts"; 2 | 3 | // https://github.com/recharts/recharts/issues/1767#issuecomment-598607012 4 | export function CustomResponsiveContainer(props: any) { 5 | return ( 6 |
7 |
16 | 17 |
18 |
19 | ); 20 | } 21 | -------------------------------------------------------------------------------- /frontend/src/atoms/Logo.tsx: -------------------------------------------------------------------------------- 1 | import { forwardRef, HTMLChakraProps, Icon } from "@chakra-ui/react"; 2 | 3 | export interface LogoProps extends HTMLChakraProps<"svg"> { 4 | } 5 | 6 | export const Logo = forwardRef( 7 | (props: LogoProps, ref: React.ForwardedRef) => { 8 | let w = props.w 9 | let h = props.h 10 | 11 | if (w && !h) w = h 12 | if (h && !w) h = w 13 | if (h && w) h = w 14 | 15 | return ( 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | ) 25 | } 26 | ) -------------------------------------------------------------------------------- /frontend/src/atoms/TablePlus.tsx: -------------------------------------------------------------------------------- 1 | // Allows injecting variants so we can have custom tables such as sticky column headers. 2 | 3 | import { Table, TableCellProps, TableColumnHeaderProps, TableProps, Td, Th, useStyleConfig } from "@chakra-ui/react" 4 | 5 | export function TablePlus(props: TableProps) { 6 | const { children, ...rest } = props 7 | const styles = useStyleConfig("TablePlus", {}) as any 8 | return {children}
9 | } 10 | 11 | interface ThPlusProps extends TableColumnHeaderProps { 12 | variant?: 'sticky' 13 | } 14 | 15 | export function ThPlus(props: ThPlusProps) { 16 | const { variant, children, ...rest } = props 17 | const styles = useStyleConfig("ThPlus", { variant }) as any 18 | return {children} 19 | } 20 | 21 | interface TdPlusProps extends TableCellProps { 22 | variant?: 'sticky' 23 | } 24 | 25 | export function TdPlus(props: TdPlusProps) { 26 | const { variant, children, ...rest } = props 27 | const styles = useStyleConfig("TdPlus", { variant }) as any 28 | return {children} 29 | } 30 | -------------------------------------------------------------------------------- /frontend/src/atoms/TooltipCard.tsx: -------------------------------------------------------------------------------- 1 | import { Box, useColorModeValue } from "@chakra-ui/react"; 2 | 3 | export interface NamedCount { 4 | name: string; 5 | count: number; 6 | total: number; 7 | currentPercentage: number; 8 | totalPercentage: number; 9 | } 10 | 11 | interface TooltipCardProps { 12 | children: React.ReactNode; 13 | } 14 | 15 | export function TooltipCard(props: TooltipCardProps) { 16 | const tooltipBackroundColor = useColorModeValue("rgba(255,255,255,1)", "rgba(0,0,0,1)") 17 | return ( 18 | 19 | {props.children} 20 | 21 | ) 22 | } -------------------------------------------------------------------------------- /frontend/src/config.ts: -------------------------------------------------------------------------------- 1 | import { scaleOrdinal } from "d3-scale"; 2 | import { schemeCategory10 } from "d3-scale-chromatic"; 3 | import { FilterGroup, generateQueryStringFromFilterGroups } from "./data/FilterUtils"; 4 | 5 | export const colors = scaleOrdinal(schemeCategory10).range(); 6 | 7 | export const knownNodesFilter: FilterGroup[] = [ 8 | [{ name: 'name', value: 'geth' }], 9 | [{ name: 'name', value: 'nethermind' }], 10 | [{ name: 'name', value: 'turbogeth' }], 11 | [{ name: 'name', value: 'turbo-geth' }], 12 | [{ name: 'name', value: 'erigon' }], 13 | [{ name: 'name', value: 'besu' }], 14 | [{ name: 'name', value: 'openethereum' }], 15 | [{ name: 'name', value: 'ethereum-js' }] 16 | ] 17 | 18 | export const knownNodesFilterString = generateQueryStringFromFilterGroups(knownNodesFilter) 19 | 20 | export const LayoutEightPadding = [4, 4, 4, 8] 21 | export const LayoutTwoColumn = ["repeat(1, 1fr)", "repeat(1, 1fr)", "repeat(1, 1fr)", "repeat(2, 1fr)"] 22 | export const LayoutTwoColSpan = [1, 1, 1, 2] 23 | -------------------------------------------------------------------------------- /frontend/src/data/DataMassager.ts: -------------------------------------------------------------------------------- 1 | interface NamedCount { 2 | name: string; 3 | count: number; 4 | } 5 | 6 | export function appendOtherGroup(list: NamedCount[] | undefined): [NamedCount[], number] { 7 | let unknownItemCount = 0 8 | 9 | if (!list) { 10 | return [[], unknownItemCount]; 11 | } 12 | 13 | const otherItem: NamedCount = { 14 | name: 'Other', 15 | count: 0 16 | }; 17 | 18 | const filteredList: NamedCount[] = [] 19 | list.forEach((item) => { 20 | if (item.name === 'tmp' || item.name === 'eth2' || item.name === '') { 21 | unknownItemCount += item.count 22 | } else if (filteredList.length > 9) { 23 | otherItem.count += item.count 24 | } else { 25 | filteredList.push(item) 26 | } 27 | }) 28 | 29 | if (otherItem.count > 0) { 30 | filteredList.push(otherItem) 31 | } 32 | 33 | return [filteredList, unknownItemCount]; 34 | } 35 | -------------------------------------------------------------------------------- /frontend/src/data/DataProcessor.test.ts: -------------------------------------------------------------------------------- 1 | import { ClientApiResponse, ClientsProcessor } from "./DataProcessor"; 2 | 3 | const mockPeers: ClientApiResponse[] = [ 4 | { clientId: "Geth/v1.10.3-stable-991384a7/linux-amd64/go1.16.3", count: 5 }, 5 | { clientId: "Geth/v1.10.4-stable/linux-x64/go1.16.4", count: 4 }, 6 | { clientId: "Geth/goerli/v1.10.4-unstable-966ee3ae-20210528/linux-amd64/go1.16.4", count: 3 }, 7 | { clientId: "besu/v21.7.0-RC1/darwin-x86_64/corretto-java-11", count: 2 }, 8 | { clientId: "erigon/v2021.06.5-alpha-a0694dd3/windows-x86_64/go1.16.5", count: 1 }, 9 | { clientId: "OpenEthereum/v3.2.6-stable-f9f4926-20210514/x86_64-linux-gnu/rustc1.52.1", count: 5 }, 10 | { clientId: "Geth/v1.9.1-stable/linux-amd64/go1.15.1", count: 1 }, 11 | { clientId: "Geth/v1.9.2-stable/windows-amd64/go1.15.1", count: 1 }, 12 | { clientId: "Geth/v1.8.1-stable/darwin-amd64/go1.15.1", count: 1 }, 13 | ]; 14 | 15 | function mockRaw() { 16 | const errorCallback = jest.fn() 17 | const processor = ClientsProcessor(mockPeers, errorCallback).getRaw() 18 | expect(processor).toHaveLength(mockPeers.length) 19 | expect(errorCallback).not.toBeCalled() 20 | return processor 21 | } 22 | 23 | function mockProcessor() { 24 | const errorCallback = jest.fn() 25 | const processor = ClientsProcessor(mockPeers, errorCallback) 26 | expect(errorCallback).not.toBeCalled() 27 | return processor 28 | } 29 | 30 | test("processes full schema correctly", () => { 31 | const raw = mockRaw() 32 | expect(raw[0]).toEqual({ 33 | count: 5, 34 | primaryKey: 1, 35 | name: 'geth', 36 | version: { 37 | major: 1, 38 | minor: 10, 39 | patch: 3, 40 | tag: 'stable', 41 | build: '991384a7', 42 | }, 43 | os: { 44 | vendor: 'linux', 45 | architecture: 'amd64', 46 | }, 47 | runtime: { 48 | name: 'go', 49 | version: { 50 | major: 1, 51 | minor: 16, 52 | patch: 3 53 | } 54 | } 55 | }) 56 | 57 | expect(raw[1]).toEqual({ 58 | count: 4, 59 | primaryKey: 2, 60 | name: 'geth', 61 | version: { 62 | major: 1, 63 | minor: 10, 64 | patch: 4, 65 | tag: 'stable' 66 | }, 67 | os: { 68 | vendor: 'linux', 69 | architecture: 'x64' 70 | }, 71 | runtime: { 72 | name: 'go', 73 | version: { 74 | major: 1, 75 | minor: 16, 76 | patch: 4 77 | } 78 | } 79 | }) 80 | 81 | expect(raw[2]).toEqual({ 82 | count: 3, 83 | primaryKey: 3, 84 | name: 'geth', 85 | label: 'goerli', 86 | version: { 87 | major: 1, 88 | minor: 10, 89 | patch: 4, 90 | tag: 'unstable', 91 | build: '966ee3ae', 92 | date: '20210528' 93 | }, 94 | os: { 95 | vendor: 'linux', 96 | architecture: 'amd64' 97 | }, 98 | runtime: { 99 | name: 'go', 100 | version: { 101 | major: 1, 102 | minor: 16, 103 | patch: 4 104 | } 105 | } 106 | }) 107 | }); 108 | 109 | test("processes runtimes correctly", () => { 110 | const raw = mockRaw() 111 | expect(raw.map(p => (p?.runtime))).toEqual([ 112 | { name: 'go', version: { major: 1, minor: 16, patch: 3 } }, 113 | { name: 'go', version: { major: 1, minor: 16, patch: 4 } }, 114 | { name: 'go', version: { major: 1, minor: 16, patch: 4 } }, 115 | { name: 'java', version: { major: 11 } }, 116 | { name: 'go', version: { major: 1, minor: 16, patch: 5 } }, 117 | { name: 'rustc', version: { major: 1, minor: 52, patch: 1 } }, 118 | { name: 'go', version: { major: 1, minor: 15, patch: 1 } }, 119 | { name: 'go', version: { major: 1, minor: 15, patch: 1 } }, 120 | { name: 'go', version: { major: 1, minor: 15, patch: 1 } }, 121 | ]) 122 | }); 123 | 124 | test("processes os correctly", () => { 125 | const raw = mockRaw() 126 | expect(raw.map(p => (p?.os))).toEqual([ 127 | { vendor: 'linux', architecture: 'amd64' }, 128 | { vendor: 'linux', architecture: 'x64' }, 129 | { vendor: 'linux', architecture: 'amd64' }, 130 | { vendor: 'darwin', architecture: 'x86_64' }, 131 | { vendor: 'windows', architecture: 'x86_64' }, 132 | { vendor: 'linux', architecture: 'x86_64' }, 133 | { vendor: 'linux', architecture: 'amd64' }, 134 | { vendor: 'windows', architecture: 'amd64' }, 135 | { vendor: 'darwin', architecture: 'amd64' }, 136 | ]) 137 | }); 138 | 139 | test("top clients", () => { 140 | const processor = mockProcessor() 141 | expect(processor.queryData().clients).toEqual([ 142 | { name: 'geth', count: 15 }, 143 | { name: 'openethereum', count: 5 }, 144 | { name: 'besu', count: 2 }, 145 | { name: 'erigon', count: 1 } 146 | ]) 147 | }) 148 | 149 | test("top runtimes", () => { 150 | const processor = mockProcessor() 151 | expect(processor.queryData().languages).toEqual([ 152 | { name: 'go', count: 16 }, 153 | { name: 'rustc', count: 5 }, 154 | { name: 'java', count: 2 } 155 | ]) 156 | }) 157 | 158 | test("top operating systmes", () => { 159 | const processor = mockProcessor() 160 | expect(processor.queryData().operatingSystems).toEqual([ 161 | { name: 'linux', count: 18 }, 162 | { name: 'darwin', count: 3 }, 163 | { name: 'windows', count: 2 } 164 | ]) 165 | }) 166 | 167 | test("filtering with exact conditions", () => { 168 | const processor = mockProcessor() 169 | expect(processor.queryData({}, [{ os: {vendor: 'linux'}}]).clients).toEqual([ 170 | { name: 'geth', count: 13 }, 171 | { name: 'openethereum', count: 5 } 172 | ]) 173 | 174 | expect(processor.queryData({}, [{ name: 'Geth'}]).clients).toEqual([ 175 | { name: 'geth', count: 15 } 176 | ]) 177 | 178 | expect(processor.queryData({}, [{ name: 'geth', version: {major: 1, minor: 10, patch: 4}}]).clients).toEqual([ 179 | { name: 'geth', count: 7 } 180 | ]) 181 | }) 182 | 183 | test("filtering with conditionals", () => { 184 | const processor = mockProcessor() 185 | expect(processor.queryData({}, [{ name: 'Geth', version: {major: 1, minor: 'gte 10'}}]).clients).toEqual([ 186 | { name: 'geth', count: 12 } 187 | ]) 188 | 189 | expect(processor.queryData({}, [{ name: 'geth', version: {major: 1, minor: 'gte 8'}}]).clients).toEqual([ 190 | { name: 'geth', count: 15 } 191 | ]) 192 | }) 193 | 194 | test("complex filtering with multiple filters and conditions", () => { 195 | const processor = mockProcessor() 196 | expect(processor.queryData({}, [ 197 | { name: 'geth', version: {major: 1, minor: 'gte 9'}}, 198 | { name: 'geth', version: {major: 1, minor: 'lt 10'}} 199 | ]).clients).toEqual([ 200 | { name: 'geth', count: 2 } 201 | ]) 202 | }) -------------------------------------------------------------------------------- /frontend/src/data/DataProcessor.ts: -------------------------------------------------------------------------------- 1 | import matchAll from 'string.prototype.matchall' 2 | import { matchesFilter } from './FilterRunner'; 3 | import { SortedMap } from './SortedMap'; 4 | 5 | export interface ClientApiResponse { 6 | clientId: string; 7 | count: number; 8 | } 9 | 10 | export interface Option { 11 | showOperatingSystemArchitecture?: boolean 12 | showRuntimeVersion?: boolean 13 | } 14 | 15 | export interface Filter { 16 | name?: string 17 | version?: Partial 18 | os?: Partial 19 | runtime?: Partial 20 | } 21 | 22 | export interface Runtime { 23 | name?: string; 24 | version?: Version; 25 | } 26 | 27 | export interface Version { 28 | major: number | string; 29 | minor?: number | string; 30 | patch?: number | string; 31 | tag?: string; 32 | build?: string; 33 | date?: string; 34 | } 35 | 36 | export interface OperatingSytem { 37 | vendor: string; 38 | architecture: string; 39 | } 40 | 41 | export interface ClientDetail { 42 | label?: string; 43 | version?: Version; 44 | os?: OperatingSytem; 45 | runtime?: Runtime; 46 | } 47 | 48 | export interface Client extends ClientDetail { 49 | primaryKey: number; 50 | name: string; 51 | count: number; 52 | } 53 | 54 | export interface ClientResponse { 55 | clients: NameCountResponse[] 56 | versions: NameCountResponse[] 57 | operatingSystems: NameCountResponse[] 58 | languages: NameCountResponse[] 59 | } 60 | 61 | export interface ClientDatabase { 62 | obj: {[key: number]: Client} 63 | queryData(option?: Option, filters?: Filter[]): ClientResponse 64 | getRaw(): Client[] 65 | } 66 | 67 | export interface NameCountResponse { 68 | name: string 69 | count: number 70 | } 71 | 72 | export interface LoadingResponse { 73 | status: 'loading' 74 | } 75 | 76 | interface ParseParam { 77 | raw: string 78 | primaryKey: number 79 | errorCallback: (entity: string, data: string, primaryKey: number) => void 80 | } 81 | 82 | const osMapping: { [key: string]: string } = { 83 | linux: "vendor", 84 | windows: "vendor", 85 | darwin: "vendor", 86 | x86_64: "architecture", 87 | x64: "architecture", 88 | amd64: "architecture" 89 | }; 90 | 91 | function tryParseNumber(input: string): number | undefined { 92 | return parseInt(input) || undefined; 93 | } 94 | 95 | function parseVersion(version: string, parseOpt: ParseParam): Version | undefined { 96 | const matches = version.match(/v?(?\d+)(?:.(?\d+).(?\d+)(?:-(?\w+)(?:-(?[a-zA-Z0-9]+)(?:-(?\d+))?)?)?)?/) 97 | 98 | if (!matches?.groups) { 99 | parseOpt.errorCallback('version', version, parseOpt.primaryKey); 100 | return undefined 101 | } 102 | 103 | const minor = tryParseNumber(matches.groups.minor) 104 | const patch = tryParseNumber(matches.groups.patch) 105 | const tag = matches.groups.tag 106 | const build = matches.groups.build 107 | const date = matches.groups.date 108 | 109 | return { 110 | major: parseInt(matches.groups.major), 111 | ...minor !== undefined && { minor }, 112 | ...patch !== undefined && { patch }, 113 | ...tag && { tag }, 114 | ...build && { build }, 115 | ...date && { date }, 116 | } 117 | } 118 | 119 | function parseOs(os: string, parseOpt: ParseParam): OperatingSytem | undefined { 120 | const match = matchAll(os, /(linux|windows|darwin|x86_64|x64|amd64)/g) 121 | const matches = Array.from(match); 122 | 123 | if (matches.length) { 124 | const result: any = {}; 125 | matches.forEach((match) => { 126 | const value = match[1]; 127 | const key = osMapping[value]; 128 | result[key] = value; 129 | }); 130 | 131 | return result; 132 | } 133 | 134 | parseOpt.errorCallback('os', os, parseOpt.primaryKey); 135 | return undefined; 136 | } 137 | 138 | function parseRuntime(runtime: string, parseOpt: ParseParam): Runtime | undefined { 139 | const matches = runtime.match(/(?[a-zA-Z]+)?-?(?[\d+.?]+)/); 140 | if (matches?.groups) { 141 | const name = matches.groups['name'] 142 | const version = parseVersion(matches.groups['version'], parseOpt) 143 | return { 144 | ...name && { name }, 145 | version 146 | } 147 | } 148 | 149 | parseOpt.errorCallback('runtime', runtime, parseOpt.primaryKey); 150 | return undefined; 151 | } 152 | 153 | function parseRaw(raw: string, parseOpt: ParseParam): ClientDetail | undefined { 154 | const tokenize = raw.split('/') 155 | let label: string | undefined 156 | let version: Version | undefined 157 | let os: OperatingSytem | undefined 158 | let runtime: Runtime | undefined 159 | 160 | if (tokenize.length === 5) { 161 | label = tokenize[1] 162 | version = parseVersion(tokenize[2], parseOpt) 163 | os = parseOs(tokenize[3], parseOpt) 164 | runtime = parseRuntime(tokenize[4], parseOpt) 165 | } else if (tokenize.length === 4) { 166 | version = parseVersion(tokenize[1], parseOpt) 167 | os = parseOs(tokenize[2], parseOpt) 168 | runtime = parseRuntime(tokenize[3], parseOpt) 169 | } else if (tokenize.length === 3) { 170 | version = parseVersion(tokenize[0], parseOpt) 171 | os = parseOs(tokenize[1], parseOpt) 172 | runtime = parseRuntime(tokenize[2], parseOpt) 173 | } else if (tokenize.length === 2) { 174 | os = parseOs(tokenize[0], parseOpt) 175 | runtime = parseRuntime(tokenize[1], parseOpt) 176 | } 177 | 178 | if (label || version || os || runtime) { 179 | return { 180 | ...label && { label }, 181 | ...version && { version }, 182 | ...os && { os }, 183 | ...runtime && { runtime }, 184 | }; 185 | } 186 | 187 | parseOpt.errorCallback('raw', raw, parseOpt.primaryKey); 188 | return undefined; 189 | } 190 | 191 | export function ClientsProcessor( 192 | data: ClientApiResponse[], 193 | errorCallback: (entity: string, data: string, clientId: string) => void 194 | ): ClientDatabase { 195 | 196 | const obj: {[key: number]: Client} = {} 197 | const topRuntimes = new Map() 198 | const topOs = new Map() 199 | 200 | let primaryKey = 0 201 | 202 | const parse = (item: ClientApiResponse): Client | undefined => { 203 | primaryKey++; 204 | const clientId = item.clientId.toLowerCase() 205 | if (!clientId) { 206 | errorCallback('parse', 'empty client id', ''); 207 | return undefined; 208 | } 209 | 210 | const matches = clientId.match(/(?\w+)\/(?.+)/); 211 | if (matches?.groups) { 212 | const raw = parseRaw(clientId, { primaryKey, errorCallback: (entity, data, pk) => { 213 | errorCallback(entity, data, `${pk}: "${clientId}"`) 214 | }, raw: clientId }); 215 | if (!raw) { 216 | return undefined; 217 | } 218 | 219 | if (raw.runtime) { 220 | const runtimeName = raw.runtime.name || 'Unknown' 221 | topRuntimes.set(runtimeName, (topRuntimes.get(runtimeName) || 0) + item.count) 222 | } 223 | 224 | if (raw.os) { 225 | const osName = raw.os.vendor || 'Unknown' 226 | topOs.set(osName, (topOs.get(osName) || 0) + item.count) 227 | } 228 | 229 | return { 230 | primaryKey, 231 | name: matches.groups.name, 232 | count: item.count, 233 | ...raw, 234 | }; 235 | } 236 | 237 | errorCallback('parse', item.clientId, ''); 238 | return undefined; 239 | }; 240 | 241 | const matchesFilters = (client: Client, filters?: Filter[]): boolean => { 242 | if (!filters || !filters.length) { 243 | return true 244 | } 245 | 246 | return filters.every(f => matchesFilter(client, f)) 247 | } 248 | 249 | const queryData = (options: Option = {}, filters?: Filter[]): ClientResponse => { 250 | 251 | const convert = (a: [string, number]) => ({ 252 | name: a[0], 253 | count: a[1] 254 | }) 255 | 256 | const versionToString = (version: Version): string => { 257 | let versionString = '' + version.major 258 | 259 | if (version.minor !== undefined) { 260 | versionString += '.' + version.minor 261 | } 262 | 263 | if (version.patch !== undefined) { 264 | versionString += '.' + version.patch 265 | } 266 | 267 | if (version.tag !== undefined) { 268 | versionString += '-' + version.tag 269 | } 270 | 271 | return versionString 272 | } 273 | 274 | const runtimeToString = (runtime: Runtime): string => { 275 | let runtimeString = '' 276 | 277 | if (runtime.name) { 278 | runtimeString += runtime.name 279 | } 280 | 281 | if (options.showRuntimeVersion && runtime.version) { 282 | runtimeString += versionToString(runtime.version) 283 | } 284 | 285 | return runtimeString 286 | } 287 | 288 | const operatingSystemToString = (os: OperatingSytem): string => { 289 | let osString = [] 290 | 291 | if (os.vendor) { 292 | osString.push(os.vendor) 293 | } 294 | 295 | if (options.showOperatingSystemArchitecture && os.architecture) { 296 | osString.push(os.architecture) 297 | } 298 | 299 | return osString.join('-') 300 | } 301 | 302 | const cache = { 303 | clients: SortedMap((a, b) => b[1] - a[1]), 304 | versions: SortedMap((a, b) => b[1] - a[1]), 305 | runtimes: SortedMap((a, b) => b[1] - a[1]), 306 | operatingSystems: SortedMap((a, b) => b[1] - a[1]) 307 | } 308 | 309 | for (let a in obj) { 310 | const client = obj[a] 311 | if (matchesFilters(client, filters)) { 312 | cache.clients.set(client.name, (cache.clients.get(client.name) || 0) + client.count) 313 | 314 | if (client.version) { 315 | const versionString = versionToString(client.version) 316 | cache.versions.set(versionString, (cache.versions.get(versionString) || 0) + client.count) 317 | } 318 | 319 | if (client.runtime) { 320 | const runtimeString = runtimeToString(client.runtime) 321 | cache.runtimes.set(runtimeString, (cache.runtimes.get(runtimeString) || 0) + client.count) 322 | } 323 | 324 | if (client.os) { 325 | const osString = operatingSystemToString(client.os) 326 | cache.operatingSystems.set(osString, (cache.operatingSystems.get(osString) || 0) + client.count) 327 | } 328 | } 329 | } 330 | 331 | return { 332 | clients: cache.clients.map(convert), 333 | versions: cache.versions.map(convert), 334 | languages: cache.runtimes.map(convert), 335 | operatingSystems: cache.operatingSystems.map(convert) 336 | } 337 | } 338 | 339 | const getRaw = (): Client[] => { 340 | return Object.keys(obj).map(o => obj[parseInt(o)]) 341 | } 342 | 343 | data.forEach((item) => { 344 | const parsedItem = parse(item) 345 | if (parsedItem) { 346 | obj[parsedItem.primaryKey] = parsedItem 347 | } 348 | }) 349 | 350 | return { 351 | obj, 352 | getRaw, 353 | queryData 354 | } 355 | } 356 | 357 | export const EmptyDatabase: ClientDatabase = { 358 | obj: {}, 359 | queryData: (options: Option) => ({ 360 | clients: [], 361 | versions: [], 362 | operatingSystems: [], 363 | languages: [] 364 | }), 365 | getRaw: () => [], 366 | } 367 | -------------------------------------------------------------------------------- /frontend/src/data/FilterRunner.test.ts: -------------------------------------------------------------------------------- 1 | import { matchesFilter } from "./FilterRunner" 2 | 3 | const client = { 4 | a: 'b', 5 | num: 12, 6 | c: { 7 | 'd': 'cheetah', 8 | 'e': 'cat', 9 | f: { 10 | g: { 11 | h: { 12 | k: 'dog' 13 | } 14 | } 15 | } 16 | } 17 | } 18 | 19 | test("simple filter types on root", () => { 20 | expect(matchesFilter(client, { num: 'gte 11'})).toBeTruthy() 21 | }) 22 | 23 | test("simple filter types in nested", () => { 24 | expect(matchesFilter(client, { c: { d: 'cheetah'}})).toBeTruthy() 25 | expect(matchesFilter(client, { c: { e: 'cat'}})).toBeTruthy() 26 | expect(matchesFilter(client, { c: { f: { g: { h: { k: 'dog'}}}}})).toBeTruthy() 27 | }) 28 | 29 | test("multiple levels filter types in nested", () => { 30 | expect(matchesFilter(client, { c: { e: 'cat', f: { g: { h: { k: 'dog'}}}}})).toBeTruthy() 31 | }) 32 | 33 | test("filter not found", () => { 34 | expect(matchesFilter(client, { c: { d: 'chimp'}})).toBeFalsy() 35 | expect(matchesFilter(client, { c: { e: 'chimp', f: { g: { h: { k: 'dog'}}}}})).toBeFalsy() 36 | }) -------------------------------------------------------------------------------- /frontend/src/data/FilterRunner.ts: -------------------------------------------------------------------------------- 1 | export const matchesFilter = (client: any, filter: any): boolean => { 2 | const entries = Object.entries(filter) 3 | let matchesAll = entries.length 4 | 5 | for (const [key, value] of entries) { 6 | if (typeof value === 'object') { 7 | if (matchesFilter(client[key], value)) { 8 | matchesAll-- 9 | } 10 | } 11 | 12 | const clientValue = client[key] as number | string 13 | const filterValue = value as number | string 14 | 15 | if (typeof clientValue === 'number' && typeof filterValue === 'string') { 16 | const numberMatches = filterValue.match(/(?\w+)\s*(?\d+)/) 17 | if (numberMatches?.groups) { 18 | const operator = numberMatches.groups.operator 19 | const value = parseFloat(numberMatches.groups.value) 20 | 21 | switch (operator) { 22 | case 'gt': { 23 | if (clientValue > value) matchesAll-- 24 | break; 25 | } 26 | case 'gte': { 27 | if (clientValue >= value) matchesAll-- 28 | break; 29 | } 30 | case 'lt': { 31 | if (clientValue < value) matchesAll-- 32 | break; 33 | } 34 | case 'lte': { 35 | if (clientValue <= value) matchesAll-- 36 | break; 37 | } 38 | default: { 39 | console.warn(`Invalid conditional operator: "${operator}"`) 40 | break; 41 | } 42 | } 43 | } 44 | } 45 | else if (value === clientValue) { 46 | matchesAll-- 47 | } 48 | } 49 | 50 | return (matchesAll === 0) 51 | } -------------------------------------------------------------------------------- /frontend/src/data/FilterUtils.ts: -------------------------------------------------------------------------------- 1 | export type FilterOperator = "eq" | "not" | "lt" | "lte" | "gt" | "gte" 2 | 3 | export const FilterOperatorToSymbol = { 4 | "eq": "=", 5 | "not": "!=", 6 | "lt": "<", 7 | "lte": "<=", 8 | "gt": ">", 9 | "gte": ">=" 10 | } 11 | 12 | export interface Filter { 13 | name: string; 14 | value: string; 15 | operator?: FilterOperator; 16 | } 17 | 18 | export type FilterItem = Filter | undefined 19 | export type FilterGroup = FilterItem[]; 20 | 21 | export function generateQueryStringFromFilterGroups(filterGroups: FilterGroup[]): string { 22 | return `?filter=${JSON.stringify(filterGroups.map(l => (l.map(f => { 23 | const tokens = [f?.name, f?.value] 24 | if (f?.operator) tokens.push(f?.operator) 25 | return tokens.join(':') 26 | }))))}` 27 | } 28 | 29 | export function generateFilterGroupsFromQueryString(queryString: string): FilterGroup[] { 30 | const rawFilters = new URLSearchParams(queryString).get('filter') 31 | if (!rawFilters) { 32 | return [] 33 | } 34 | 35 | try { 36 | const parsedFilters: [string[]] = JSON.parse(rawFilters) 37 | if (!Array.isArray(parsedFilters)) { 38 | throw Error(`Invalid filters: ${rawFilters}`) 39 | } 40 | 41 | const filterGroup: FilterGroup[] = parsedFilters.map((unparsedFilters, idx) => { 42 | if (!Array.isArray(unparsedFilters)) { 43 | throw Error(`Invalid filter, item "${idx}" should be an array`) 44 | } 45 | 46 | return unparsedFilters.map((unparsedFilter, unparsedIdx) => { 47 | if (typeof unparsedFilter !== "string") { 48 | throw Error(`Invalid filter, item "${idx}" at "${unparsedIdx}" should be an array`) 49 | } 50 | 51 | const [name, value, operator] = unparsedFilter.split(":") 52 | if (operator && !(operator in FilterOperatorToSymbol)) { 53 | throw Error(`Invalid operator, item "${idx}" at "${unparsedIdx}" is invalid: ${operator}`) 54 | } 55 | 56 | if (!name && !value) { 57 | throw Error(`Invalid key/value, item "${idx}" at "${unparsedIdx}" is missing: ${name} and ${value}`) 58 | } 59 | 60 | return { name, value, operator } as Filter 61 | }) 62 | }) 63 | 64 | return filterGroup 65 | } catch (e) { 66 | throw Error(`Cannot parse filters: '${rawFilters}'. Reason: ${e}`) 67 | } 68 | } 69 | 70 | export function countTotalClientsInFilter(filters: FilterGroup[]) { 71 | return filters.reduce((groupCount, filterGroup) => { 72 | return groupCount + filterGroup.reduce((filterCount, filter) => { 73 | if (filter && filter.name === 'name') { 74 | filterCount++; 75 | } 76 | return filterCount; 77 | }, 0); 78 | }, 0); 79 | } 80 | 81 | export function cleanFilterGroup(groups: FilterGroup[]) { 82 | const cache = new Set() 83 | const hashGroup = (filterGroup: FilterGroup) => { 84 | return filterGroup.map(filter => { 85 | if (!filter) return undefined 86 | return `${filter.name}=${filter.value}` 87 | }).filter(f => !!f).sort((a: any, b: any) => b - a).join('&') 88 | } 89 | 90 | return groups.filter(f => { 91 | const hash = hashGroup(f); 92 | if (!cache.has(hash)) { 93 | cache.add(hash); 94 | return true; 95 | } 96 | return false; 97 | }) 98 | } 99 | 100 | export interface UniqueFilters { 101 | name: string; 102 | value: string; 103 | } 104 | 105 | export function getUniqueFilters(groups: FilterGroup[] | undefined): UniqueFilters[] { 106 | if (!groups) { 107 | return [] 108 | } 109 | 110 | const cache = new Map() 111 | 112 | groups.forEach(group => { 113 | group.forEach(filter => { 114 | if (!filter) return; 115 | const key = `${filter.name}|${filter.value}` 116 | cache.set(key, (cache.get(key) || 0) + 1) 117 | }) 118 | }) 119 | 120 | const processedFilters = [...cache.entries()].filter(f => f[1] === groups.length).map(f => { 121 | const [ name, value ] = f[0].split('|') 122 | return { 123 | name, 124 | value 125 | } 126 | }) 127 | 128 | let versionString = { 129 | major: '', 130 | minor: '', 131 | patch: '' 132 | } 133 | 134 | const postProcess = processedFilters.reduce((acc: UniqueFilters[], filter) => { 135 | if (filter.name.startsWith('version_')) { 136 | const versionType = filter.name.substr('version_'.length) as 'major' | 'minor' | 'patch' 137 | versionString[versionType] = filter.value 138 | } else { 139 | acc.push(filter); 140 | } 141 | return acc 142 | }, []) 143 | 144 | if (versionString.major || versionString.minor || versionString.patch) { 145 | postProcess.push({ 146 | name: 'version', 147 | value: `${versionString.major || 0}.${versionString.minor || 0}.${versionString.patch || 0}` 148 | }) 149 | } 150 | 151 | return postProcess 152 | } 153 | 154 | 155 | export const drilldownFilter = (filters: FilterGroup[] | undefined, name: string, value: string) => { 156 | const set = (filterGroup: FilterGroup, name: string, value: string) => { 157 | const nameFoundIndex = filterGroup.findIndex(fg => fg?.name === name); 158 | if (nameFoundIndex !== -1) { 159 | filterGroup[nameFoundIndex]!.value = value 160 | } else { 161 | filterGroup.push({ name, value: value }) 162 | } 163 | } 164 | 165 | const newFilters = [...filters || []] 166 | if (newFilters.length === 0) { 167 | newFilters.push([{name, value: value}]) 168 | } else { 169 | newFilters.forEach(filterGroup => { 170 | if (value === 'Other') { 171 | return 172 | } 173 | if (name === 'version') { 174 | const versions = value.split('.') 175 | if (versions.length >= 1) { 176 | set(filterGroup, 'version_major', versions[0]); 177 | } 178 | if (versions.length >= 2) { 179 | set(filterGroup, 'version_minor', versions[1]); 180 | } 181 | if (versions.length >= 3) { 182 | set(filterGroup, 'version_patch', versions[2]); 183 | } 184 | } else { 185 | set(filterGroup, name, value); 186 | } 187 | }) 188 | } 189 | 190 | return cleanFilterGroup(newFilters); 191 | } 192 | 193 | export function filterCount(filters: FilterGroup[] | undefined): number { 194 | if (!filters || filters.length === 0) 195 | return 0 196 | 197 | return filters.reduce((prev, curr) => prev + curr.length, 0) || 0; 198 | } 199 | -------------------------------------------------------------------------------- /frontend/src/data/SortedMap.ts: -------------------------------------------------------------------------------- 1 | interface ISortedMap { 2 | clear(): void; 3 | delete(key: K): boolean; 4 | forEach(callbackfn: (value: V, key: K, map: Map) => void, thisArg?: any): void; 5 | get(key: K): V | undefined; 6 | has(key: K): boolean; 7 | set(key: K, value: V): Map; 8 | size(): number; 9 | map: (callbackfn: (value: [K, V], index: number, array: [K, V][]) => any, thisArg?: any) => any[]; 10 | iterator: Map 11 | } 12 | 13 | export function SortedMap(comparator: (a: [K, V], b: [K, V]) => number): ISortedMap { 14 | const obj = new Map(); 15 | obj[Symbol.iterator] = function* () { 16 | yield* [...this.entries()].sort(comparator); 17 | }; 18 | return { 19 | size: () => obj.size, 20 | clear: () => obj.clear(), 21 | delete: (key: K) => obj.delete(key), 22 | forEach: (callbackfn: (value: V, key: K, map: Map) => void, thisArg?: any) => obj.forEach(callbackfn, thisArg), 23 | get: (key: K) => obj.get(key), 24 | has: (key: K) => obj.has(key), 25 | set: (key: K, value: V) => obj.set(key, value), 26 | map: (callbackfn: (value: [K, V], index: number, array: [K, V][]) => any, thisArg?: any) => { 27 | return [...obj].map(callbackfn, thisArg) 28 | }, 29 | iterator: obj 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /frontend/src/index.tsx: -------------------------------------------------------------------------------- 1 | import { ChakraProvider, ColorModeScript, CSSReset } from '@chakra-ui/react'; 2 | import React from 'react'; 3 | import ReactDOM from 'react-dom'; 4 | import { Routing } from './templates/Routing'; 5 | import { theme } from './theme' 6 | 7 | ReactDOM.render( 8 | 9 | 10 | 11 | 12 | 13 | 14 | , 15 | document.getElementById('root') 16 | ); 17 | -------------------------------------------------------------------------------- /frontend/src/organisms/ColorModeSwitcher.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react" 2 | import { 3 | useColorMode, 4 | useColorModeValue, 5 | IconButton, 6 | IconButtonProps, 7 | } from "@chakra-ui/react" 8 | import { IoMdMoon, IoMdSunny } from "react-icons/io" 9 | 10 | type ColorModeSwitcherProps = Omit 11 | 12 | export const ColorModeSwitcher: React.FC = (props) => { 13 | const { toggleColorMode } = useColorMode() 14 | const text = useColorModeValue("dark", "light") 15 | const SwitchIcon = useColorModeValue(IoMdMoon, IoMdSunny) 16 | 17 | return ( 18 | } 26 | aria-label={`Switch to ${text} mode`} 27 | {...props} 28 | /> 29 | ) 30 | } 31 | -------------------------------------------------------------------------------- /frontend/src/organisms/Filtering.tsx: -------------------------------------------------------------------------------- 1 | import { Badge, Box, BoxProps, Button, Drawer, DrawerBody, DrawerCloseButton, DrawerContent, DrawerFooter, DrawerHeader, DrawerOverlay, Flex, forwardRef, HStack, Input, Select, StackProps, Tag, TagLabel, TagRightIcon, Text, useColorModeValue, useDisclosure, VStack } from "@chakra-ui/react" 2 | import React, { useCallback, useRef, useState, useEffect } from "react"; 3 | import { VscCheck, VscClose, VscFilter, VscRemove } from "react-icons/vsc" 4 | import { FilterGroup, FilterOperatorToSymbol, FilterItem, FilterOperator, Filter, cleanFilterGroup, getUniqueFilters, UniqueFilters, filterCount } from "../data/FilterUtils"; 5 | 6 | type CachedNameMap = { [key: string]: boolean }; 7 | interface EditableProps extends StackProps { 8 | item: FilterItem 9 | editMode?: boolean 10 | showRemoveButton?: boolean 11 | cachedNames: CachedNameMap 12 | onRemoveClicked?: (e: React.MouseEvent) => void 13 | onSaveClicked: (item: FilterItem) => void 14 | } 15 | 16 | const EditableInput: React.FC = forwardRef((props: EditableProps, ref: React.ForwardedRef) => { 17 | const { 18 | item, 19 | editMode, 20 | showRemoveButton, 21 | cachedNames, 22 | onRemoveClicked, 23 | onSaveClicked, 24 | ...rest 25 | } = props 26 | 27 | const activeNames = Object.keys(cachedNames).find(name => cachedNames[name]) 28 | const [editItem, setEditItem] = useState(item || { name: activeNames || 'name', 'operator': 'eq', value: '' }) 29 | const [editing, setEditing] = useState(editMode) 30 | 31 | useEffect(() => { 32 | setEditing(editMode) 33 | }, [editMode]); 34 | 35 | useEffect(() => { 36 | if (item) { 37 | setEditItem(item) 38 | } 39 | }, [item]) 40 | 41 | if (editing) { 42 | const onInternalSaveClicked = () => { 43 | setEditing(false) 44 | onSaveClicked(editItem) 45 | } 46 | 47 | return ( 48 | 49 | 62 | 70 | setEditItem(item => ({ ...item, value: e.target.value }))} /> 71 | 72 | 166 | 167 | 168 | )} 169 | 170 | ) 171 | } 172 | 173 | 174 | interface FilteringProps extends BoxProps { 175 | filters?: FilterGroup[] 176 | onFiltersChange?: (filters: FilterGroup[]) => void 177 | } 178 | 179 | export const Filtering: React.FC = forwardRef((props: FilteringProps, ref: React.ForwardedRef) => { 180 | const { 181 | filters, 182 | onFiltersChange, 183 | ...rest 184 | } = props 185 | 186 | const color = useColorModeValue("teal.100", "teal.700") 187 | const { isOpen, onOpen, onClose } = useDisclosure() 188 | const btnRef: React.RefObject = useRef() 189 | 190 | const [uniqueFilters, setUniqueFilters] = useState([]) 191 | const [editFilters, setEditFilters] = React.useState(filters || []) 192 | const [totalFilters, setTotalFilters] = React.useState(filters?.length || 0) 193 | 194 | useEffect(() => { 195 | setUniqueFilters(getUniqueFilters(filters)); 196 | setEditFilters(filters || []); 197 | }, [filters]); 198 | 199 | useEffect(() => { 200 | setTotalFilters(filterCount(editFilters)) 201 | }, [editFilters]) 202 | 203 | const removeFilter = useCallback((groupIndex: number, filterIndex?: number) => { 204 | if (filterIndex === undefined) { 205 | setEditFilters(groupFilters => { 206 | const newGroupFilters = [...groupFilters] 207 | newGroupFilters.splice(groupIndex, 1) 208 | return newGroupFilters 209 | }) 210 | return 211 | } 212 | 213 | setEditFilters(groupFilters => { 214 | const newGroupFilters = [...groupFilters] 215 | const group: FilterGroup = [...groupFilters[groupIndex]] 216 | group.splice(filterIndex, 1) 217 | 218 | if (group.length === 0) { 219 | newGroupFilters.splice(groupIndex, 1) 220 | } else { 221 | newGroupFilters[groupIndex] = group 222 | } 223 | return newGroupFilters 224 | }) 225 | }, []) 226 | 227 | const addFilter = useCallback((groupIndex?: number) => { 228 | if (groupIndex === undefined) { 229 | setEditFilters(groupFilters => { 230 | const newGroupFilters = [...groupFilters] 231 | newGroupFilters.push([undefined]) 232 | return newGroupFilters 233 | }) 234 | return 235 | } 236 | 237 | setEditFilters(groupFilters => { 238 | const newGroupFilters = [...groupFilters] 239 | const group: FilterGroup = [...groupFilters[groupIndex]] 240 | group.push(undefined) 241 | newGroupFilters[groupIndex] = group 242 | return newGroupFilters 243 | }) 244 | }, []) 245 | 246 | const saveFilter = useCallback((groupIndex: number, filterIndex: number, savedFilter: FilterItem) => { 247 | if (!savedFilter || savedFilter.value === "") { 248 | return; 249 | } 250 | 251 | setEditFilters(groupFilters => { 252 | const newGroupFilters = [...groupFilters] 253 | newGroupFilters[groupIndex][filterIndex] = savedFilter 254 | return newGroupFilters 255 | }) 256 | }, []) 257 | 258 | const removeUniqueFilter = useCallback((uniqueFilter: UniqueFilters) => { 259 | const newGroupFilters = [...editFilters].map(group => group.filter(filter => { 260 | if (uniqueFilter.name === 'version') { 261 | // const [major, minor, patch] = uniqueFilter.value.split('.') 262 | return !filter?.name.startsWith('version') 263 | } else { 264 | return filter?.name !== uniqueFilter.name 265 | } 266 | })) 267 | setEditFilters(newGroupFilters) 268 | onFiltersChange && onFiltersChange(newGroupFilters) 269 | }, [editFilters, onFiltersChange]); 270 | 271 | const onApply = () => { 272 | const filtersToApply = cleanFilterGroup(editFilters.reduce((acc, curr) => { 273 | curr = curr.filter(g => !!g) 274 | if (curr.length) acc.push(curr) 275 | return acc 276 | }, [])); 277 | setEditFilters(filtersToApply) 278 | onFiltersChange && onFiltersChange(filtersToApply) 279 | onClose() 280 | } 281 | 282 | const onCancel = () => { 283 | setEditFilters(filters || []) 284 | onClose() 285 | } 286 | 287 | const onClearAll = () => { 288 | setEditFilters([]) 289 | onFiltersChange && onFiltersChange([]) 290 | onClose() 291 | } 292 | 293 | const filterText = totalFilters > 0 ? `${totalFilters} filters applied` : "Apply filter" 294 | 295 | return ( 296 | 297 | 298 | {uniqueFilters.length > 0 && uniqueFilters.map((filter, key) => ( 299 | 300 | {filter.name} 301 | {filter.value} 302 | removeUniqueFilter(filter)}/> 303 | 304 | ))} 305 | 306 | 307 | 308 | 309 | 315 | 316 | 317 | 318 | Filters {!onFiltersChange && (read-only)} 319 | 320 | {editFilters.length > 0 && editFilters.map((filterGroup: FilterGroup, groupIndex: number) => ( 321 | 322 | 331 | {groupIndex < editFilters.length - 1 && or} 332 | 333 | ))} 334 | {onFiltersChange && ( 335 | 336 | )} 337 | 338 | {onFiltersChange && ( 339 | 340 | 343 | 346 | 347 | 348 | )} 349 | 350 | 351 | 352 | ) 353 | }) 354 | -------------------------------------------------------------------------------- /frontend/src/organisms/Footer.tsx: -------------------------------------------------------------------------------- 1 | import { Flex, HStack, Icon, Link, Text } from "@chakra-ui/react"; 2 | import { VscChromeMinimize, VscGithub } from "react-icons/vsc"; 3 | 4 | export function Footer() { 5 | return ( 6 | 7 | 8 | 9 | 10 | 11 | contribute 12 | 13 | 14 | 15 | 16 | 17 | ethereum.org 18 | 19 | 20 | 21 | 22 | ) 23 | } -------------------------------------------------------------------------------- /frontend/src/organisms/Header.tsx: -------------------------------------------------------------------------------- 1 | import { Alert, AlertIcon, Button, Flex, Grid, GridItem, HStack, Spacer, Text} from "@chakra-ui/react"; 2 | import { useState } from "react"; 3 | import { VscClose } from "react-icons/vsc"; 4 | import { Logo } from "../atoms/Logo"; 5 | import { LayoutEightPadding } from "../config"; 6 | import { ColorModeSwitcher } from "./ColorModeSwitcher"; 7 | 8 | export function Note() { 9 | const [noteHidden, setNoteHidden] = useState(localStorage.getItem("noteHidden") === "true"); 10 | 11 | const hideNote = () => { 12 | localStorage.setItem("noteHidden", "true"); 13 | setNoteHidden(true); 14 | } 15 | 16 | if (noteHidden) { 17 | return null; 18 | } 19 | 20 | return ( 21 | 22 | 23 | This crawler only has a partial view of the Ethereum network and lists only nodes discovered in last 24 hours. This is not a full count of every node on the network. 24 |