├── .gitignore ├── .gitlab-ci.yml ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── Procfile ├── README.md ├── RustConfig ├── deployment.template.yml ├── diesel.toml ├── grafana_dashboard_01.json ├── grafana_dashboard_02.json ├── log_messages.txt ├── migrations ├── .gitkeep ├── 00000000000000_diesel_initial_setup │ ├── down.sql │ └── up.sql ├── 2019-02-11-181154_create_substrate_logs │ ├── down.sql │ └── up.sql ├── 2019-05-23-134208_create_idx_node_ip │ ├── down.sql │ └── up.sql ├── 2019-05-23-135015_create_idx_msg_type │ ├── down.sql │ └── up.sql ├── 2019-08-16-131907_create_peer_connections │ ├── down.sql │ └── up.sql ├── 2019-08-16-132515_add_peer_to_substrate_logs │ ├── down.sql │ └── up.sql ├── 2019-08-22-094016_create_index_on_peer_id │ ├── down.sql │ └── up.sql ├── 2019-09-27-125919_create_index_created_at_msg_type │ ├── down.sql │ └── up.sql ├── 2019-09-30-144205_add_audit_to_peer_connections │ ├── down.sql │ └── up.sql ├── 2019-10-28-055145_create_host_systems │ ├── down.sql │ └── up.sql ├── 2019-10-28-064727_create_benchmarks │ ├── down.sql │ └── up.sql ├── 2019-12-11-135359_add_peer_id_to_benchmarks │ ├── down.sql │ └── up.sql ├── 2019-12-13-130241_align_benchmarks │ ├── down.sql │ └── up.sql ├── 2019-12-13-161417_create_benchmark_events │ ├── down.sql │ └── up.sql ├── 2020-04-10-113123_add_info_to_peer_connections │ ├── down.sql │ └── up.sql └── 2020-09-08-111742_create_index_on_peerset_nodes │ ├── down.sql │ └── up.sql ├── rustfmt.toml ├── src ├── cache │ └── mod.rs ├── db │ ├── benchmarks.rs │ ├── filters.rs │ ├── mod.rs │ ├── models.rs │ ├── nodes.rs │ ├── peer_data.rs │ ├── reputation.rs │ └── stats.rs ├── main.rs ├── schema.rs ├── util │ └── mod.rs └── web │ ├── benchmarks.rs │ ├── dashboard.rs │ ├── feed.rs │ ├── metrics.rs │ ├── mod.rs │ ├── nodes.rs │ ├── reputation.rs │ ├── root.rs │ └── stats.rs ├── static ├── benchmarks │ └── index.html ├── profiling │ └── index.html └── reputation │ └── index.html └── telemetry_messages.json /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | .idea 4 | .env 5 | *.swp 6 | env_commands.md 7 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | # .gitlab-ci.yml 2 | # 3 | # substrate-analytics (derived from substrate ci) 4 | # 5 | # pipelines can be triggered manually in the web 6 | # setting DEPLOY_TAG will only deploy the tagged image 7 | # 8 | 9 | 10 | stages: 11 | - test 12 | - dockerize 13 | - deploy 14 | 15 | variables: 16 | GIT_STRATEGY: fetch 17 | CI_SERVER_NAME: "GitLab CI" 18 | CARGO_HOME: "/ci-cache/substrate-analytics/cargo/${CI_JOB_NAME}" 19 | 20 | .kubernetes-build: &kubernetes-build 21 | tags: 22 | - kubernetes-parity-build 23 | environment: 24 | name: parity-build 25 | 26 | #### stage: test 27 | 28 | # test-linux-stable: 29 | # stage: test 30 | # # doesn't contain libpq 31 | # image: parity/rust-substrate-build:stretch 32 | # variables: 33 | # RUST_TOOLCHAIN: stable 34 | # # Enable debug assertions since we are running optimized builds for testing 35 | # # but still want to have debug assertions. 36 | # RUSTFLAGS: -Cdebug-assertions=y 37 | # TARGET: native 38 | # tags: 39 | # - linux-docker 40 | # only: 41 | # - tags 42 | # - master 43 | # - schedules 44 | # - web 45 | # - /^[0-9]+$/ 46 | # except: 47 | # variables: 48 | # - $DEPLOY_TAG 49 | # before_script: 50 | # - sccache -s 51 | # script: 52 | # - time cargo test --all --release --verbose --locked 53 | # after_script: 54 | # - sccache -s 55 | 56 | .build-only: &build-only 57 | only: 58 | - master 59 | - tags 60 | - web 61 | 62 | #### stage: dockerize 63 | 64 | build-publish-docker-release: 65 | <<: *build-only 66 | <<: *kubernetes-build 67 | stage: dockerize 68 | image: quay.io/buildah/stable 69 | # collect VERSION artifact here to pass it on to kubernetes 70 | variables: 71 | # GIT_STRATEGY: none 72 | # DOCKERFILE: scripts/docker/Dockerfile 73 | CONTAINER_IMAGE: parity/substrate-analytics 74 | before_script: 75 | - test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity" || 76 | ( echo "no docker credentials provided"; exit 1 ) 77 | - echo "$Docker_Hub_Pass_Parity" | 78 | buildah login --username "$Docker_Hub_User_Parity" --password-stdin docker.io 79 | - buildah info 80 | script: 81 | - buildah bud 82 | --format=docker 83 | --tag "$CONTAINER_IMAGE:$CI_COMMIT_SHORT_SHA" 84 | --tag "$CONTAINER_IMAGE:latest" . 85 | - buildah push --format=v2s2 $CONTAINER_IMAGE:$CI_COMMIT_SHORT_SHA 86 | - buildah push --format=v2s2 $CONTAINER_IMAGE:latest 87 | after_script: 88 | - buildah logout docker.io 89 | except: 90 | variables: 91 | - $DEPLOY_TAG 92 | 93 | .deploy-template: &deploy 94 | <<: *build-only 95 | stage: deploy 96 | when: manual 97 | retry: 1 98 | image: paritytech/kubetools:latest 99 | variables: 100 | NAMESPACE: "${KUBE_NAMESPACE}" 101 | REPLICAS: 1 102 | WS_PORT: 1024 103 | ANALYTICS_LOG_EXPIRY_H: 3 104 | ANALYTICS_RUST_LOG: debug 105 | WS_MAX_PAYLOAD: 524_288 # Needs to be more than 65_536 (64KiB) which is actix-web default 106 | ASSETS_PATH: /srv/substrate-analytics 107 | tags: 108 | # this is the runner that is used to deploy it 109 | - kubernetes-parity-build 110 | before_script: 111 | - kubectl version 112 | - test "${DEPLOY_TAG}" || DEPLOY_TAG="$CI_COMMIT_SHORT_SHA" 113 | script: 114 | - echo "Substrate-analytics version = ${DEPLOY_TAG}" 115 | - export SUBSTRATE_ANALYTICS_DOCKER_IMAGE="parity/substrate-analytics:${DEPLOY_TAG}" 116 | # or use helm to render the template 117 | - cat ./deployment.template.yml | envsubst | tee deployment.yaml 118 | - kubectl apply -n "$KUBE_NAMESPACE" -f ./deployment.yaml 119 | - rm -f ./deployment.yaml 120 | - echo "# substrate-analytics namespace ${KUBE_NAMESPACE}" 121 | - kubectl -n ${KUBE_NAMESPACE} get all 122 | - echo "# wait for the rollout to complete" 123 | - kubectl -n ${KUBE_NAMESPACE} rollout status deployment/substrate-analytics 124 | 125 | 126 | # have environment:url eventually point to the logs 127 | 128 | .deploy-cibuild: &deploy-cibuild 129 | <<: *deploy 130 | dependencies: 131 | - build-publish-docker-release 132 | except: 133 | variables: 134 | - $DEPLOY_TAG 135 | 136 | .deploy-tag: &deploy-tag 137 | <<: *deploy 138 | only: 139 | variables: 140 | - $DEPLOY_TAG 141 | 142 | # have environment:url eventually point to the logs 143 | 144 | deploy-ew3: 145 | <<: *deploy-cibuild 146 | environment: 147 | name: parity-prod-ew3 148 | 149 | # deploy-ue1: 150 | # extends: .deploy-cibuild 151 | # environment: 152 | # name: parity-prod-ue1 153 | 154 | deploy-ew3-tag: 155 | <<: *deploy-tag 156 | environment: 157 | name: parity-prod-ew3 158 | 159 | # deploy-ue1-tag: 160 | # extends: .deploy-tag 161 | # environment: 162 | # name: parity-prod-ue1 163 | 164 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "substrate-analytics" 3 | version = "0.1.4" 4 | authors = ["Matt Rutherford "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | actix = "0.10.0-alpha.1" 9 | actix-codec = "0.3.0" 10 | actix-files = "0.3.0-beta.1" 11 | actix-http = { version = "2.0.0-beta.3", features = ["openssl", "rustls", "compress", "secure-cookies", "actors"] } 12 | actix-rt = "1.1.1" 13 | actix-web = "3.0.0-beta.3" 14 | actix-web-actors = "3.0.0-beta.1" 15 | awc = "2.0.0-beta.3" 16 | 17 | bytes = "0.5.3" 18 | diesel = { version = "1.4.3", features = ["postgres", "chrono", "network-address", "r2d2", "numeric", "serde_json"] } 19 | bigdecimal = "0.0.11" 20 | chrono = { version = "0.4", features = ["serde"] } 21 | dotenv = "0.13.0" 22 | env_logger = "0.6" 23 | failure = "0.1.1" 24 | futures = "0.3.5" 25 | json = "*" 26 | lazy_static = "1.1.0" 27 | log = "*" 28 | num_cpus = "1.0" 29 | parking_lot = "0.11" 30 | rand = "0.7.2" 31 | serde_json = "1" 32 | serde = "1" 33 | serde_derive = "1" 34 | slice-deque = "0.3.0" 35 | statrs = "0.12.0" 36 | sysinfo = "0.13.0" 37 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:slim as builder 2 | 3 | RUN apt-get update && \ 4 | DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \ 5 | DEBIAN_FRONTEND=noninteractive apt-get install -y libpq-dev 6 | 7 | # build diesel first as there may be no changes and caching will be used 8 | RUN echo "building diesel-cli" && \ 9 | cargo install diesel_cli --root /substrate-analytics --bin diesel --force --no-default-features --features postgres 10 | 11 | WORKDIR /substrate-analytics 12 | 13 | # speed up docker build using pre-build dependencies 14 | # http://whitfin.io/speeding-up-rust-docker-builds/ 15 | RUN USER=root cargo init --bin 16 | 17 | # copy over your manifests 18 | COPY ./Cargo.lock ./Cargo.lock 19 | COPY ./Cargo.toml ./Cargo.toml 20 | 21 | # this build step will cache your dependencies 22 | RUN cargo build --release 23 | RUN rm -rf ./src ./target/release/deps/substrate_analytics-* 24 | 25 | # copy your source tree 26 | COPY ./src ./src 27 | 28 | # ADD ./ ./ 29 | 30 | RUN echo "building substrate-analytics" && \ 31 | cargo build --release 32 | 33 | FROM debian:stretch-slim 34 | # metadata 35 | LABEL maintainer="devops-team@parity.io" \ 36 | vendor="Parity Technologies" \ 37 | name="parity/substrate-analytics" \ 38 | description="Substrate Analytical and Visual Environment - Incoming telemetry" \ 39 | url="https://github.com/paritytech/substrate-analytics/" \ 40 | vcs-url="./" 41 | 42 | 43 | RUN apt-get update && \ 44 | DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \ 45 | DEBIAN_FRONTEND=noninteractive apt-get install -y libpq5 && \ 46 | DEBIAN_FRONTEND=noninteractive apt-get autoremove -y && \ 47 | apt-get clean && \ 48 | find /var/lib/apt/lists/ -type f -not -name lock -delete 49 | 50 | RUN useradd -m -u 1000 -U -s /bin/sh -d /analytics analytics 51 | 52 | COPY --from=builder /substrate-analytics/target/release/substrate-analytics /usr/local/bin/ 53 | COPY --from=builder /substrate-analytics/bin/diesel /usr/local/bin/ 54 | 55 | COPY ./migrations /analytics/migrations 56 | COPY ./static /srv/substrate-analytics 57 | 58 | WORKDIR /analytics 59 | USER analytics 60 | ENV RUST_BACKTRACE 1 61 | 62 | ENTRYPOINT [ "/bin/sh", "-x", "-c", "/usr/local/bin/diesel migration run && exec /usr/local/bin/substrate-analytics"] 63 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | release: ./target/release/diesel migration run 2 | web: ./target/release/save -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Substrate Analytics 2 | 3 | \* to connect to substrate-analytics you must whitelist your IP address in `deployment.template.yml` 4 | 5 | Comprises a websocket server accepting incoming telemetry from multiple 6 | [Substrate](https://github.com/paritytech/substrate) nodes. substrate-analytics is designed to be resilient (to network errors), 7 | performant and horizontally scalable by deploying more servers. 8 | 9 | Telemetry is stored in a PostgreSQL database. Management of the database schema is via `diesel` migrations. 10 | 11 | Stored data is purged from the DB according to `LOG_EXPIRY_H` 12 | 13 | For convenience there are also some JSON endpoints to make ad-hoc queries, although it is expected that 14 | the data is accessed directly from the database by a suitable dashboard (eg. Grafana). 15 | 16 | ### Routes 17 | 18 | #### Data ingestion 19 | `substrate-analytics` can work in one of two modes: with or without purging data after `LOG_EXPIRY_H` hours. The mode it operates under depends on which of the following two endpoints you send data to from your substrate nodes. 20 | - **`/`** 21 | - incoming telemetry (with expiry as set by `LOG_EXPIRY_H`) (ws) - set with this option in substrate cli: `--telemetry-url 'ws://127.0.0.1:8080 5'` 22 | - **`/audit`** 23 | - incoming telemetry with no expiry (ws) - set with this option in substrate cli: `--telemetry-url 'ws://127.0.0.1:8080/audit 5'` 24 | 25 | #### JSON endpoints 26 | `subtrate-analytics` includes a few convenience endpoints to query for common data. 27 | - **`/stats/db`** 28 | - statistics about the postgres db, showing table and index sizes on disk 29 | - **`/nodes`** 30 | - list of logged nodes 31 | - **`/nodes/log_stats?peer_id=Qmd5K38Yti1NStacv7fjJwsXDCUZcf1ioKcAuFkq88RKtx`** 32 | - shows the quantity of each type of log message received 33 | - **`/nodes/logs?peer_id=Qmd5K38Yti1NStacv7fjJwsXDCUZcf1ioKcAuFkq88RKtx&limit=1&msg=tracing.profiling&target=pallet_babe&start_time=2020-03-25T13:17:09.008533`** 34 | - recent log messages. Required params: `peer_id`, Optional params: `msg, target, start_time, end_time, limit`. 35 | 36 | `msg`: String. Type of log message received, e.g. `block.import`. See [./telemetry_messages.json](telemetry_messages.json) for the current list of message types. 37 | 38 | `target`: String. Origin of the message, e.g. `NetworkInitialSync` 39 | 40 | `start_time`: String. Include entries more recent than this; format: `2019-01-01T00:00:00`. Default: `NOW`. 41 | 42 | `end_time`: String. Include entries less recent than this; format: `2019-01-01T00:00:00`. Default: `NOW`. 43 | 44 | `limit`: Number. Don't include more results than this. Default: `100` 45 | - **`/reputation/{peer_id}`** 46 | - reported reputation for `peer_id` from the POV of other nodes. 47 | - **`/reputation/logged`** 48 | - reported reputation for all peers from the POV of all logged (past/present) nodes 49 | - **`/reputation`** 50 | - reported reputation for all peers unfiltered 51 | (note that this can contain many entries that are not even part of the network) 52 | 53 | 54 | `reputation` routes take the following optional parameters (with sensible defaults if not specified): 55 | - `max_age_s` in the format: `10` 56 | - `limit` in the format: `100` 57 | 58 | #### Self-monitoring 59 | 60 | Substrate Analytics provides a `/metrics` endpoint for Prometheus to useful to monitor the analytics instance itself. Visit the endpoint in a browser to see what metrics are available. 61 | 62 | ### Set up for development and deployment 63 | - [Install Postgres](https://www.postgresql.org/docs/current/tutorial-install.html) 64 | - For development, create a `.env` file in the project root containing: 65 | - `DATABASE_URL=postgres://username:password@localhost/substrate-analytics` 66 | - `PORT=8080` 67 | - any other settings from the list of environment variables below 68 | - Next, install [Diesel cli](https://github.com/diesel-rs/diesel/tree/master/diesel_cli) 69 | - You might need [additional packages](https://github.com/diesel-rs/diesel/blob/master/guide_drafts/backend_installation.md) 70 | - Run `diesel database setup` to initialise the postgres DB 71 | - You must `diesel migration run` after any changes to the database schema 72 | 73 | Optionally specify the following environment variables: 74 | 75 | - `HEARTBEAT_INTERVAL` (default: 5) 76 | - `CLIENT_TIMEOUT_S` (default: 10) 77 | - `PURGE_INTERVAL_S` (default: 600) 78 | - `LOG_EXPIRY_H` (default: 280320) 79 | - `MAX_PENDING_CONNECTIONS` (default: 8192) 80 | - `WS_MAX_PAYLOAD` (default: 524_288) 81 | - `NUM_THREADS` (default: CPUs * 3) 82 | - `DB_POOL_SIZE` (default: `NUM_THREADS`) 83 | - `DB_BATCH_SIZE` (default: 1024) - batch size for insert 84 | - `DB_SAVE_LATENCY_MS` (default: 100) - max latency (ms) for insert 85 | - `CACHE_UPDATE_TIMEOUT_S` (default: 15) - seconds before timeout warning - aborts update after 4* timeout 86 | - `CACHE_UPDATE_INTERVAL_MS` (default: 1000) - time interval (ms) between updates 87 | - `CACHE_EXPIRY_S` (default: 3600) - expiry time (s) of log messages 88 | - `ASSETS_PATH` (default: `./static`) - static files path 89 | 90 | Include `RUST_LOG` in your `.env` file to make `substrate-analytics` log to stdout. A good development setting is `RUST_LOG = debug`. 91 | 92 | Substrate log messages are batched together before they are sent off for storage in the postgres DB by the actor for `INSERT`. Batches include up to `DB_BATCH_SIZE` messages or `DB_SAVE_LATENCY_MS`, whichever is reached sooner. 93 | 94 | #### Benchmarking 95 | 96 | Substrate-analytics has endpoints to define benchmarks and host systems that run the benchmarks. This is 97 | designed to be cross-referenced with telemetry data to provide insights into the node and system under test. 98 | 99 | JSON endpoints: 100 | 101 | - **`/host_systems`**: the server machines we're benchmarking 102 | - `GET` to list all; `POST` to create new using the format (returns object with newly created `id`): 103 | ```json 104 | { 105 | "cpu_clock":2600, 106 | "cpu_qty":4, 107 | "description":"Any notes to go here", 108 | "disk_info":"NVME", 109 | "os":"freebsd", 110 | "ram_mb":8192 111 | } 112 | ``` 113 | - **`/benchmarks`**: 114 | - `GET` to list all, `POST` to create new using the format (returns object with newly created `id`): 115 | ```json 116 | { 117 | "benchmark_spec":{ 118 | "tdb":"tbd" 119 | }, 120 | "chain_spec":{ 121 | "name":"Development", 122 | "etc": "more chain spec stuff" 123 | }, 124 | "description":"notes", 125 | "host_system_id":2, 126 | "ts_end":"2019-10-28T14:05:27.618903", 127 | "ts_start":"1970-01-01T00:00:01" 128 | } 129 | ``` 130 | -------------------------------------------------------------------------------- /RustConfig: -------------------------------------------------------------------------------- 1 | RUST_INSTALL_DIESEL=1 2 | 3 | -------------------------------------------------------------------------------- /deployment.template.yml: -------------------------------------------------------------------------------- 1 | # substrate-analytics 2 | # deployment.template.yml 3 | # 4 | # use: 5 | # envsubst < deployment.template.yml | kubectl apply -f - 6 | # 7 | # the following variables have to be set: 8 | # 9 | # SUBSTRATE_ANALYTICS_DB_URL postgres://username:password@localhost/save 10 | # WS_PORT can be 1024 11 | # REPLICAS 12 | # SUBSTRATE_ANALYTICS_DOCKER_IMAGE 13 | # 14 | # Optional variables: 15 | # 16 | # HEARTBEAT_INTERVAL 17 | # CLIENT_TIMEOUT_S 18 | # PURGE_INTERVAL_S 19 | # LOG_EXPIRY_H 20 | # MAX_PENDING_CONNECTIONS 21 | # WS_MAX_PAYLOAD 22 | # DB_POOL_SIZE 23 | # DB_BATCH_SIZE 24 | # DB_SAVE_LATENCY_MS 25 | # CACHE_UPDATE_TIMEOUT_S 26 | # CACHE_UPDATE_INTERVAL_MS 27 | # CACHE_EXPIRY_S 28 | # 29 | --- 30 | apiVersion: apps/v1 31 | kind: Deployment 32 | metadata: 33 | name: substrate-analytics 34 | namespace: $NAMESPACE 35 | spec: 36 | selector: 37 | matchLabels: 38 | app: substrate-analytics 39 | 40 | replicas: $REPLICAS 41 | revisionHistoryLimit: 2 42 | minReadySeconds: 3 43 | strategy: 44 | rollingUpdate: 45 | maxUnavailable: 0 46 | maxSurge: 1 47 | 48 | template: 49 | metadata: 50 | labels: 51 | app: substrate-analytics 52 | 53 | spec: 54 | containers: 55 | - name: substrate-analytics 56 | image: $SUBSTRATE_ANALYTICS_DOCKER_IMAGE 57 | imagePullPolicy: Always 58 | ports: 59 | - containerPort: $WS_PORT 60 | env: 61 | - name: DATABASE_URL 62 | value: "$SUBSTRATE_ANALYTICS_DB_URL" 63 | envFrom: 64 | - configMapRef: 65 | name: substrate-analytics 66 | readinessProbe: 67 | tcpSocket: 68 | port: $WS_PORT 69 | initialDelaySeconds: 5 70 | periodSeconds: 10 71 | livenessProbe: 72 | tcpSocket: 73 | port: $WS_PORT 74 | initialDelaySeconds: 15 75 | periodSeconds: 20 76 | --- 77 | apiVersion: v1 78 | kind: ConfigMap 79 | metadata: 80 | name: substrate-analytics 81 | namespace: $NAMESPACE 82 | data: 83 | PORT: "$WS_PORT" 84 | HEARTBEAT_INTERVAL: "$HEARTBEAT_INTERVAL" 85 | CLIENT_TIMEOUT_S: "$CLIENT_TIMEOUT_S" 86 | PURGE_INTERVAL_S: "$PURGE_INTERVAL_S" 87 | MAX_PENDING_CONNECTIONS: "$MAX_PENDING_CONNECTIONS" 88 | LOG_EXPIRY_H: "$ANALYTICS_LOG_EXPIRY_H" 89 | RUST_LOG: "$ANALYTICS_RUST_LOG" 90 | WS_MAX_PAYLOAD: "$WS_MAX_PAYLOAD" 91 | DB_POOL_SIZE: "$DB_POOL_SIZE" 92 | DB_BATCH_SIZE: "$DB_BATCH_SIZE" 93 | DB_SAVE_LATENCY_MS: "$DB_SAVE_LATENCY_MS" 94 | CACHE_UPDATE_TIMEOUT_S: "$CACHE_UPDATE_TIMEOUT_S" 95 | CACHE_UPDATE_INTERVAL_MS: "$CACHE_UPDATE_INTERVAL_MS" 96 | CACHE_EXPIRY_S: "$CACHE_EXPIRY_S" 97 | ASSETS_PATH: "$ASSETS_PATH" 98 | --- 99 | apiVersion: monitoring.coreos.com/v1 100 | kind: ServiceMonitor 101 | metadata: 102 | name: substrate-analytics 103 | namespace: $NAMESPACE 104 | spec: 105 | selector: 106 | matchLabels: 107 | app: substrate-analytics 108 | endpoints: 109 | - port: ws 110 | interval: 30s 111 | --- 112 | apiVersion: v1 113 | kind: Service 114 | metadata: 115 | name: substrate-analytics 116 | namespace: $NAMESPACE 117 | spec: 118 | selector: 119 | app: substrate-analytics 120 | ports: 121 | - name: ws 122 | port: $WS_PORT 123 | targetPort: $WS_PORT 124 | protocol: TCP 125 | sessionAffinity: None 126 | type: LoadBalancer 127 | loadBalancerSourceRanges: 128 | - 35.198.174.150/32 # test-machine 129 | - 212.227.252.235/32 # vpn-server 130 | - 35.246.224.91/32 # substrate20-0 131 | - 35.246.210.11/32 # substrate20-1 132 | - 35.198.110.45/32 # substrate20-2 133 | - 35.198.114.154/32 # substrate20-3 134 | - 51.77.66.187/32 # kusama-bootnode-0 135 | - 51.79.17.206/32 # kusama-bootnode-1 136 | - 104.155.57.65/32 # matt-node1 (custom build/metrics) 137 | - 35.242.161.55/32 # matt-node2 138 | - 148.0.0.0/8 139 | -------------------------------------------------------------------------------- /diesel.toml: -------------------------------------------------------------------------------- 1 | # For documentation on how to configure this file, 2 | # see diesel.rs/guides/configuring-diesel-cli 3 | 4 | [print_schema] 5 | file = "src/schema.rs" 6 | -------------------------------------------------------------------------------- /grafana_dashboard_01.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "editable": true, 16 | "gnetId": null, 17 | "graphTooltip": 0, 18 | "id": 3, 19 | "iteration": 1558627311234, 20 | "links": [], 21 | "panels": [ 22 | { 23 | "aliasColors": {}, 24 | "bars": false, 25 | "dashLength": 10, 26 | "dashes": false, 27 | "datasource": "PostgreSQL", 28 | "fill": 1, 29 | "gridPos": { 30 | "h": 8, 31 | "w": 12, 32 | "x": 0, 33 | "y": 0 34 | }, 35 | "id": 4, 36 | "legend": { 37 | "alignAsTable": true, 38 | "avg": false, 39 | "current": true, 40 | "max": true, 41 | "min": false, 42 | "show": true, 43 | "total": false, 44 | "values": true 45 | }, 46 | "lines": true, 47 | "linewidth": 1, 48 | "links": [], 49 | "nullPointMode": "null", 50 | "percentage": false, 51 | "pointradius": 2, 52 | "points": false, 53 | "renderer": "flot", 54 | "seriesOverrides": [], 55 | "spaceLength": 10, 56 | "stack": false, 57 | "steppedLine": false, 58 | "targets": [ 59 | { 60 | "format": "time_series", 61 | "group": [], 62 | "metricColumn": "none", 63 | "rawQuery": true, 64 | "rawSql": "SELECT\n created_at AS \"time\",\n CAST(logs->>'round' as INTEGER) as round\nFROM substrate_logs\nWHERE\n $__timeFilter(created_at)\n AND logs->>'msg' = 'afg.announcing_blocks_to_voted_peers'\n AND substrate_logs.node_ip LIKE '$node_ip%'\nORDER BY 1", 65 | "refId": "A", 66 | "select": [ 67 | [ 68 | { 69 | "params": [ 70 | "id" 71 | ], 72 | "type": "column" 73 | } 74 | ] 75 | ], 76 | "table": "substrate_logs", 77 | "timeColumn": "created_at", 78 | "timeColumnType": "timestamp", 79 | "where": [ 80 | { 81 | "name": "$__timeFilter", 82 | "params": [], 83 | "type": "macro" 84 | } 85 | ] 86 | } 87 | ], 88 | "thresholds": [], 89 | "timeFrom": null, 90 | "timeRegions": [], 91 | "timeShift": null, 92 | "title": "Round", 93 | "tooltip": { 94 | "shared": true, 95 | "sort": 0, 96 | "value_type": "individual" 97 | }, 98 | "type": "graph", 99 | "xaxis": { 100 | "buckets": null, 101 | "mode": "time", 102 | "name": null, 103 | "show": true, 104 | "values": [] 105 | }, 106 | "yaxes": [ 107 | { 108 | "format": "none", 109 | "label": null, 110 | "logBase": 1, 111 | "max": null, 112 | "min": null, 113 | "show": true 114 | }, 115 | { 116 | "format": "none", 117 | "label": null, 118 | "logBase": 1, 119 | "max": null, 120 | "min": null, 121 | "show": true 122 | } 123 | ], 124 | "yaxis": { 125 | "align": false, 126 | "alignLevel": null 127 | } 128 | }, 129 | { 130 | "aliasColors": {}, 131 | "bars": false, 132 | "dashLength": 10, 133 | "dashes": false, 134 | "datasource": "PostgreSQL", 135 | "fill": 1, 136 | "gridPos": { 137 | "h": 9, 138 | "w": 12, 139 | "x": 0, 140 | "y": 8 141 | }, 142 | "id": 2, 143 | "legend": { 144 | "alignAsTable": true, 145 | "avg": true, 146 | "current": true, 147 | "max": true, 148 | "min": true, 149 | "show": true, 150 | "total": false, 151 | "values": true 152 | }, 153 | "lines": true, 154 | "linewidth": 1, 155 | "links": [], 156 | "nullPointMode": "null", 157 | "percentage": false, 158 | "pointradius": 2, 159 | "points": false, 160 | "renderer": "flot", 161 | "seriesOverrides": [], 162 | "spaceLength": 10, 163 | "stack": false, 164 | "steppedLine": false, 165 | "targets": [ 166 | { 167 | "format": "time_series", 168 | "group": [], 169 | "metricColumn": "none", 170 | "rawQuery": true, 171 | "rawSql": "SELECT\n created_at AS \"time\",\n CAST (logs->>'peers' as INTEGER) as peer_count\nFROM substrate_logs\nWHERE\n $__timeFilter(created_at)\n AND substrate_logs.node_ip LIKE '10.11.0.11%'\n AND logs->>'msg' = 'system.interval'\nORDER BY 1", 172 | "refId": "A", 173 | "select": [ 174 | [ 175 | { 176 | "params": [ 177 | "id" 178 | ], 179 | "type": "column" 180 | } 181 | ] 182 | ], 183 | "table": "substrate_logs", 184 | "timeColumn": "created_at", 185 | "timeColumnType": "timestamp", 186 | "where": [ 187 | { 188 | "name": "$__timeFilter", 189 | "params": [], 190 | "type": "macro" 191 | } 192 | ] 193 | } 194 | ], 195 | "thresholds": [], 196 | "timeFrom": null, 197 | "timeRegions": [], 198 | "timeShift": null, 199 | "title": "Peers", 200 | "tooltip": { 201 | "shared": true, 202 | "sort": 0, 203 | "value_type": "individual" 204 | }, 205 | "type": "graph", 206 | "xaxis": { 207 | "buckets": null, 208 | "mode": "time", 209 | "name": null, 210 | "show": true, 211 | "values": [] 212 | }, 213 | "yaxes": [ 214 | { 215 | "format": "short", 216 | "label": null, 217 | "logBase": 1, 218 | "max": null, 219 | "min": null, 220 | "show": true 221 | }, 222 | { 223 | "format": "short", 224 | "label": null, 225 | "logBase": 1, 226 | "max": null, 227 | "min": null, 228 | "show": true 229 | } 230 | ], 231 | "yaxis": { 232 | "align": false, 233 | "alignLevel": null 234 | } 235 | } 236 | ], 237 | "refresh": false, 238 | "schemaVersion": 18, 239 | "style": "dark", 240 | "tags": [], 241 | "templating": { 242 | "list": [ 243 | { 244 | "allValue": null, 245 | "current": { 246 | "tags": [], 247 | "text": "10.11.0.12", 248 | "value": "10.11.0.12" 249 | }, 250 | "datasource": "PostgreSQL", 251 | "definition": "select distinct split_part(node_ip, ':', 1) from substrate_logs", 252 | "hide": 0, 253 | "includeAll": false, 254 | "label": null, 255 | "multi": false, 256 | "name": "node_ip", 257 | "options": [ 258 | { 259 | "selected": false, 260 | "text": "10.11.0.10", 261 | "value": "10.11.0.10" 262 | }, 263 | { 264 | "selected": false, 265 | "text": "10.11.0.11", 266 | "value": "10.11.0.11" 267 | }, 268 | { 269 | "selected": true, 270 | "text": "10.11.0.12", 271 | "value": "10.11.0.12" 272 | }, 273 | { 274 | "selected": false, 275 | "text": "10.11.0.13", 276 | "value": "10.11.0.13" 277 | }, 278 | { 279 | "selected": false, 280 | "text": "10.24.3.1", 281 | "value": "10.24.3.1" 282 | } 283 | ], 284 | "query": "select distinct split_part(node_ip, ':', 1) from substrate_logs", 285 | "refresh": 0, 286 | "regex": "", 287 | "skipUrlSync": false, 288 | "sort": 5, 289 | "tagValuesQuery": "", 290 | "tags": [], 291 | "tagsQuery": "", 292 | "type": "query", 293 | "useTags": false 294 | } 295 | ] 296 | }, 297 | "time": { 298 | "from": "now/d", 299 | "to": "now" 300 | }, 301 | "timepicker": { 302 | "refresh_intervals": [ 303 | "5s", 304 | "10s", 305 | "30s", 306 | "1m", 307 | "5m", 308 | "15m", 309 | "30m", 310 | "1h", 311 | "2h", 312 | "1d" 313 | ], 314 | "time_options": [ 315 | "5m", 316 | "15m", 317 | "1h", 318 | "6h", 319 | "12h", 320 | "24h", 321 | "2d", 322 | "7d", 323 | "30d" 324 | ] 325 | }, 326 | "timezone": "", 327 | "title": "Substrate Save", 328 | "uid": "AsdLzYWWk", 329 | "version": 6 330 | } -------------------------------------------------------------------------------- /grafana_dashboard_02.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "editable": true, 16 | "gnetId": null, 17 | "graphTooltip": 0, 18 | "id": 1, 19 | "iteration": 1579014779384, 20 | "links": [], 21 | "panels": [ 22 | { 23 | "aliasColors": {}, 24 | "bars": false, 25 | "cacheTimeout": null, 26 | "dashLength": 10, 27 | "dashes": false, 28 | "datasource": null, 29 | "decimals": null, 30 | "fill": 2, 31 | "fillGradient": 0, 32 | "gridPos": { 33 | "h": 16, 34 | "w": 24, 35 | "x": 0, 36 | "y": 0 37 | }, 38 | "hiddenSeries": false, 39 | "id": 2, 40 | "legend": { 41 | "avg": true, 42 | "current": false, 43 | "max": true, 44 | "min": true, 45 | "show": true, 46 | "total": true, 47 | "values": true 48 | }, 49 | "lines": true, 50 | "linewidth": 1, 51 | "links": [], 52 | "nullPointMode": "null", 53 | "options": { 54 | "dataLinks": [] 55 | }, 56 | "percentage": false, 57 | "pointradius": 2, 58 | "points": false, 59 | "renderer": "flot", 60 | "seriesOverrides": [], 61 | "spaceLength": 10, 62 | "stack": false, 63 | "steppedLine": false, 64 | "targets": [ 65 | { 66 | "format": "time_series", 67 | "group": [], 68 | "metricColumn": "none", 69 | "rawQuery": true, 70 | "rawSql": "SELECT\n created_at AS \"time\",\n cast(logs->>'time' as decimal) / 1000 as \"execution time\"\nFROM substrate_logs\nWHERE\n $__timeFilter(created_at) AND logs->>'target' = '$target' AND logs->>'name' = '$span'\nORDER BY 1", 71 | "refId": "A", 72 | "select": [ 73 | [ 74 | { 75 | "params": [ 76 | "id" 77 | ], 78 | "type": "column" 79 | } 80 | ] 81 | ], 82 | "table": "substrate_logs", 83 | "timeColumn": "created_at", 84 | "timeColumnType": "timestamp", 85 | "where": [ 86 | { 87 | "name": "$__timeFilter", 88 | "params": [], 89 | "type": "macro" 90 | } 91 | ] 92 | } 93 | ], 94 | "thresholds": [], 95 | "timeFrom": null, 96 | "timeRegions": [], 97 | "timeShift": null, 98 | "title": "$target - $span", 99 | "tooltip": { 100 | "shared": true, 101 | "sort": 0, 102 | "value_type": "individual" 103 | }, 104 | "transparent": true, 105 | "type": "graph", 106 | "xaxis": { 107 | "buckets": null, 108 | "mode": "time", 109 | "name": null, 110 | "show": true, 111 | "values": [] 112 | }, 113 | "yaxes": [ 114 | { 115 | "format": "µs", 116 | "label": "", 117 | "logBase": 1, 118 | "max": null, 119 | "min": null, 120 | "show": true 121 | }, 122 | { 123 | "format": "short", 124 | "label": "Time", 125 | "logBase": 1, 126 | "max": null, 127 | "min": null, 128 | "show": true 129 | } 130 | ], 131 | "yaxis": { 132 | "align": false, 133 | "alignLevel": null 134 | } 135 | } 136 | ], 137 | "schemaVersion": 21, 138 | "style": "dark", 139 | "tags": [], 140 | "templating": { 141 | "list": [ 142 | { 143 | "allValue": null, 144 | "current": { 145 | "tags": [], 146 | "text": "polkadot_runtime_common::slots", 147 | "value": "polkadot_runtime_common::slots" 148 | }, 149 | "datasource": "PostgreSQL", 150 | "definition": "select DISTINCT logs->>'target' from substrate_logs where logs->>'msg' = 'tracing.profiling';", 151 | "hide": 0, 152 | "includeAll": false, 153 | "label": "target", 154 | "multi": false, 155 | "name": "target", 156 | "options": [ 157 | { 158 | "selected": false, 159 | "text": "pallet_authorship", 160 | "value": "pallet_authorship" 161 | }, 162 | { 163 | "selected": false, 164 | "text": "pallet_babe", 165 | "value": "pallet_babe" 166 | }, 167 | { 168 | "selected": false, 169 | "text": "pallet_balances", 170 | "value": "pallet_balances" 171 | }, 172 | { 173 | "selected": false, 174 | "text": "pallet_democracy", 175 | "value": "pallet_democracy" 176 | }, 177 | { 178 | "selected": false, 179 | "text": "pallet_elections_phragmen", 180 | "value": "pallet_elections_phragmen" 181 | }, 182 | { 183 | "selected": false, 184 | "text": "pallet_finality_tracker", 185 | "value": "pallet_finality_tracker" 186 | }, 187 | { 188 | "selected": false, 189 | "text": "pallet_grandpa", 190 | "value": "pallet_grandpa" 191 | }, 192 | { 193 | "selected": false, 194 | "text": "pallet_identity", 195 | "value": "pallet_identity" 196 | }, 197 | { 198 | "selected": false, 199 | "text": "pallet_randomness_collective_flip", 200 | "value": "pallet_randomness_collective_flip" 201 | }, 202 | { 203 | "selected": false, 204 | "text": "pallet_session", 205 | "value": "pallet_session" 206 | }, 207 | { 208 | "selected": false, 209 | "text": "pallet_staking", 210 | "value": "pallet_staking" 211 | }, 212 | { 213 | "selected": false, 214 | "text": "pallet_timestamp", 215 | "value": "pallet_timestamp" 216 | }, 217 | { 218 | "selected": false, 219 | "text": "pallet_transaction_payment", 220 | "value": "pallet_transaction_payment" 221 | }, 222 | { 223 | "selected": false, 224 | "text": "pallet_treasury", 225 | "value": "pallet_treasury" 226 | }, 227 | { 228 | "selected": false, 229 | "text": "polkadot_runtime_common::attestations", 230 | "value": "polkadot_runtime_common::attestations" 231 | }, 232 | { 233 | "selected": false, 234 | "text": "polkadot_runtime_common::parachains", 235 | "value": "polkadot_runtime_common::parachains" 236 | }, 237 | { 238 | "selected": false, 239 | "text": "polkadot_runtime_common::registrar", 240 | "value": "polkadot_runtime_common::registrar" 241 | }, 242 | { 243 | "selected": true, 244 | "text": "polkadot_runtime_common::slots", 245 | "value": "polkadot_runtime_common::slots" 246 | } 247 | ], 248 | "query": "select DISTINCT logs->>'target' from substrate_logs where logs->>'msg' = 'tracing.profiling';", 249 | "refresh": 0, 250 | "regex": "", 251 | "skipUrlSync": false, 252 | "sort": 1, 253 | "tagValuesQuery": "", 254 | "tags": [], 255 | "tagsQuery": "", 256 | "type": "query", 257 | "useTags": false 258 | }, 259 | { 260 | "allValue": null, 261 | "current": { 262 | "text": "on_finalize", 263 | "value": "on_finalize" 264 | }, 265 | "datasource": "PostgreSQL", 266 | "definition": "select DISTINCT logs->>'name' from substrate_logs where logs->>'msg' = 'tracing.profiling' and logs->>'target' = '$target';", 267 | "hide": 0, 268 | "includeAll": false, 269 | "label": null, 270 | "multi": false, 271 | "name": "span", 272 | "options": [], 273 | "query": "select DISTINCT logs->>'name' from substrate_logs where logs->>'msg' = 'tracing.profiling' and logs->>'target' = '$target';", 274 | "refresh": 1, 275 | "regex": "", 276 | "skipUrlSync": false, 277 | "sort": 0, 278 | "tagValuesQuery": "", 279 | "tags": [], 280 | "tagsQuery": "", 281 | "type": "query", 282 | "useTags": false 283 | } 284 | ] 285 | }, 286 | "time": { 287 | "from": "now-5m", 288 | "to": "now" 289 | }, 290 | "timepicker": { 291 | "refresh_intervals": [ 292 | "5s", 293 | "10s", 294 | "30s", 295 | "1m", 296 | "5m", 297 | "15m", 298 | "30m", 299 | "1h", 300 | "2h", 301 | "1d" 302 | ] 303 | }, 304 | "timezone": "", 305 | "title": "Runtime", 306 | "uid": "zj7vkLEWk", 307 | "version": 6 308 | } -------------------------------------------------------------------------------- /log_messages.txt: -------------------------------------------------------------------------------- 1 | logs | ?column? 2 | -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------- 3 | {"ts": "2020-02-14T16:59:34.691495203+00:00", "msg": "afg.announcing_blocks_to_voted_peers", "block": "0x59e280a61e998277636665dfb485b020a774f4de07461932aebb2304fd4a5235", "level": "INFO", "round": "25", "set_id": "0"} | afg.announcing_blocks_to_voted_peers 4 | {"ts": "2020-02-14T16:59:37.405501573+00:00", "msg": "afg.authority_set", "level": "INFO", "authorities": "[\"5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu\"]", "authority_id": "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu", "authority_set_id": "0"} | afg.authority_set 5 | {"ts": "2020-02-14T17:00:04.981011897+00:00", "msg": "afg.commit_issued", "level": "INFO", "target_hash": "0xc6fc70d0a99d5ebfdae7f05773ec3bc900fafad2fc6b3e368855f49612629231", "target_number": "7"} | afg.commit_issued 6 | {"ts": "2020-02-12T14:59:32.999440041+00:00", "msg": "afg.finalized", "level": "INFO", "finalized_hash": "0x4d87a8cd68ff05b9dbda2aa991b706de9b0d8838be82bd001cd1cfa4a3f747ef", "finalized_number": "2"} | afg.finalized 7 | {"ts": "2020-02-14T17:00:04.086859210+00:00", "msg": "afg.finalized_blocks_up_to", "hash": "0xc6fc70d0a99d5ebfdae7f05773ec3bc900fafad2fc6b3e368855f49612629231", "level": "INFO", "number": "7"} | afg.finalized_blocks_up_to 8 | {"ts": "2020-01-22T12:48:11.227526768+00:00", "msg": "babe.checked_and_importing", "level": "INFO", "pre_header": "Header { parent_hash: 0x35f028b081689e94e054ce80909cd4a65dde0f5b3ec0c2ac07223542a8a442d2, number: 364, state_root: 0xc2ccce1c3d0a94d2e983dcc9491a69b5588c3c031a907456ef1c1f6c4b601372, extrinsics_root: 0xaf9a60d94bc81e28091df512c8ea34688eeb5886117eaeb6c9662c2bf2b73543, digest: Digest { logs: [DigestItem::PreRuntime([66, 65, 66, 69], [2, 2, 0, 0, 0, 198, 141, 83, 31, 0, 0, 0, 0])] } }"} | babe.checked_and_importing 9 | {"ts": "2020-02-06T09:59:03.062431862+00:00", "msg": "block.import", "best": "0xfae3c824e81a9e70e7d5227b1079b704be70eb8a0bbfe5ba386d081f046b63b9", "level": "INFO", "height": 22, "origin": "Own"} | block.import 10 | {"ts": "2020-02-14T17:00:04.086874260+00:00", "msg": "notify.finalized", "best": "0xc6fc70d0a99d5ebfdae7f05773ec3bc900fafad2fc6b3e368855f49612629231", "level": "INFO", "height": "7"} | notify.finalized 11 | {"ts": "2020-02-14T17:00:03.005148103+00:00", "msg": "prepared_block_for_proposing", "hash": "0x348244fd82d817ee84664406cdf09811bc0b82c65d18927c7d7d7df09fa8cd6d", "level": "INFO", "number": "9"} | prepared_block_for_proposing 12 | {"ts": "2020-02-14T16:59:33.610386293+00:00", "err": "ClientImport(\"Unexpected epoch change\")", "msg": "slots.err_with_block_built_on", "hash": "0x9d200cb05595a3b5e6e62fd07db5ed508a52c96639e498eded94dc52348be16b", "level": "INFO"} | slots.err_with_block_built_on 13 | {"ts": "2020-02-14T17:00:03.005447405+00:00", "msg": "slots.pre_sealed_block", "level": "INFO", "hash_now": "0xdc16527ea5ccc592ea23f2a9c264f2e1f4f4c2776cde924a6d72e4d0b07143e6", "header_num": "9", "hash_previously": "0x348244fd82d817ee84664406cdf09811bc0b82c65d18927c7d7d7df09fa8cd6d"} | slots.pre_sealed_block 14 | {"ts": "2020-02-12T10:55:15.532499485+00:00", "msg": "slots.starting_authorship", "level": "INFO", "slot_num": 527168305, "timestamp": 1581504915000} | slots.starting_authorship 15 | {"ts": "2020-02-14T16:59:37.403479912+00:00", "msg": "system.connected", "name": "Alice", "chain": "Development", "level": "INFO", "config": "", "version": "2.0.0-x86_64-linux-gnu", "authority": true, "network_id": "Qmac7KAf8YVzPen8rgUnD2TS98d6uRA1kKiw1cL8G4nrRf", "startup_time": "1581699577396", "implementation": "Substrate Node"} | system.connected 16 | {"ts": "2020-02-14T17:00:02.398355300+00:00", "cpu": 288.6646, "msg": "system.interval", "best": "0x158bc408f212ba0754aa87027d44b2f326cf4567160f8d7f497ccbf3fba62fe5", "level": "INFO", "peers": 0, "height": 8, "memory": 1317708, "txcount": 0, "finalized_hash": "0x20539e68f554c8c960e1a63e5192ddac6dedaab241e881af585c5e68e7dc626a", "bandwidth_upload": 0, "finalized_height": 6, "disk_read_per_sec": 2266081, "bandwidth_download": 0, "disk_write_per_sec": 202398, "used_db_cache_size": 2716118, "used_state_cache_size": 1990876} | system.interval 17 | {"ts": "2020-02-06T10:06:09.460452500+00:00", "msg": "system.network_state", "level": "INFO", "state": {"peerId": "QmPyzhhQDN5YhQr6Nmdg5mQBRFMRUhnhD6ihaxt3Rz1PJd", "peerset": {"nodes": {}, "message_queue": 0, "reserved_only": false}, "connectedPeers": {}, "externalAddresses": [], "listenedAddresses": ["/ip4/192.168.2.2/tcp/30333", "/ip4/127.0.0.1/tcp/30333", "/ip6/::1/tcp/30333"], "notConnectedPeers": {}, "averageUploadPerSec": 0, "averageDownloadPerSec": 0}} | system.network_state 18 | {"ts": "2020-02-14T17:00:00.040896003+00:00", "msg": "tracing.profiling", "name": ">::get", "time": "760", "level": "INFO", "target": "frame_system"} | tracing.profiling 19 | {"ts": "2020-02-14T16:59:57.531421788+00:00", "msg": "txpool.import", "level": "INFO", "ready": 207, "future": 0} | txpool.import -------------------------------------------------------------------------------- /migrations/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paritytech/substrate-analytics/d6b7436fdf4d6e3268f808758a46eaab6f212fca/migrations/.gitkeep -------------------------------------------------------------------------------- /migrations/00000000000000_diesel_initial_setup/down.sql: -------------------------------------------------------------------------------- 1 | -- This file was automatically created by Diesel to setup helper functions 2 | -- and other internal bookkeeping. This file is safe to edit, any future 3 | -- changes will be added to existing projects as new migrations. 4 | 5 | DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); 6 | DROP FUNCTION IF EXISTS diesel_set_updated_at(); 7 | -------------------------------------------------------------------------------- /migrations/00000000000000_diesel_initial_setup/up.sql: -------------------------------------------------------------------------------- 1 | -- This file was automatically created by Diesel to setup helper functions 2 | -- and other internal bookkeeping. This file is safe to edit, any future 3 | -- changes will be added to existing projects as new migrations. 4 | 5 | 6 | 7 | 8 | -- Sets up a trigger for the given table to automatically set a column called 9 | -- `updated_at` whenever the row is modified (unless `updated_at` was included 10 | -- in the modified columns) 11 | -- 12 | -- # Example 13 | -- 14 | -- ```sql 15 | -- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); 16 | -- 17 | -- SELECT diesel_manage_updated_at('users'); 18 | -- ``` 19 | CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ 20 | BEGIN 21 | EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s 22 | FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); 23 | END; 24 | $$ LANGUAGE plpgsql; 25 | 26 | CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ 27 | BEGIN 28 | IF ( 29 | NEW IS DISTINCT FROM OLD AND 30 | NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at 31 | ) THEN 32 | NEW.updated_at := current_timestamp; 33 | END IF; 34 | RETURN NEW; 35 | END; 36 | $$ LANGUAGE plpgsql; 37 | -------------------------------------------------------------------------------- /migrations/2019-02-11-181154_create_substrate_logs/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE substrate_logs; -------------------------------------------------------------------------------- /migrations/2019-02-11-181154_create_substrate_logs/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE substrate_logs ( 2 | id SERIAL PRIMARY KEY, 3 | node_ip VARCHAR NOT NULL, 4 | created_at TIMESTAMP NOT NULL DEFAULT NOW(), 5 | logs JSONB NOT NULL 6 | ); -------------------------------------------------------------------------------- /migrations/2019-05-23-134208_create_idx_node_ip/down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX substrate_logs_node_ip_idx; -------------------------------------------------------------------------------- /migrations/2019-05-23-134208_create_idx_node_ip/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX substrate_logs_node_ip_idx ON substrate_logs (node_ip); -------------------------------------------------------------------------------- /migrations/2019-05-23-135015_create_idx_msg_type/down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX substrate_logs_msg_type_idx; -------------------------------------------------------------------------------- /migrations/2019-05-23-135015_create_idx_msg_type/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX substrate_logs_msg_type_idx ON substrate_logs USING BTREE ((logs->>'msg')); -------------------------------------------------------------------------------- /migrations/2019-08-16-131907_create_peer_connections/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE peer_connections; -------------------------------------------------------------------------------- /migrations/2019-08-16-131907_create_peer_connections/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE peer_connections 2 | ( 3 | id SERIAL PRIMARY KEY, 4 | ip_addr VARCHAR NOT NULL, 5 | peer_id VARCHAR, 6 | created_at TIMESTAMP NOT NULL DEFAULT NOW() 7 | ); -------------------------------------------------------------------------------- /migrations/2019-08-16-132515_add_peer_to_substrate_logs/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE substrate_logs DROP COLUMN peer_connection_id; 2 | ALTER TABLE substrate_logs ADD COLUMN node_ip VARCHAR NOT NULL DEFAULT 'NULL'; -------------------------------------------------------------------------------- /migrations/2019-08-16-132515_add_peer_to_substrate_logs/up.sql: -------------------------------------------------------------------------------- 1 | TRUNCATE substrate_logs; 2 | ALTER TABLE substrate_logs DROP COLUMN node_ip; 3 | ALTER TABLE substrate_logs ADD COLUMN peer_connection_id INTEGER REFERENCES peer_connections(id) NOT NULL; -------------------------------------------------------------------------------- /migrations/2019-08-22-094016_create_index_on_peer_id/down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX peer_connections_peer_id_idx; -------------------------------------------------------------------------------- /migrations/2019-08-22-094016_create_index_on_peer_id/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX peer_connections_peer_id_idx ON peer_connections (peer_id); -------------------------------------------------------------------------------- /migrations/2019-09-27-125919_create_index_created_at_msg_type/down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX substrate_logs_created_at_msg_idx; -------------------------------------------------------------------------------- /migrations/2019-09-27-125919_create_index_created_at_msg_type/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX substrate_logs_created_at_msg_idx ON substrate_logs USING BTREE (created_at, (logs->>'msg')); -------------------------------------------------------------------------------- /migrations/2019-09-30-144205_add_audit_to_peer_connections/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE peer_connections DROP COLUMN audit; -------------------------------------------------------------------------------- /migrations/2019-09-30-144205_add_audit_to_peer_connections/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE peer_connections ADD COLUMN audit BOOLEAN DEFAULT FALSE NOT NULL; -------------------------------------------------------------------------------- /migrations/2019-10-28-055145_create_host_systems/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE host_systems; -------------------------------------------------------------------------------- /migrations/2019-10-28-055145_create_host_systems/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE host_systems 2 | ( 3 | id SERIAL PRIMARY KEY, 4 | description VARCHAR NOT NULL, 5 | os VARCHAR NOT NULL, 6 | cpu_qty INTEGER NOT NULL, 7 | cpu_clock INTEGER NOT NULL, 8 | ram_mb INTEGER NOT NULL, 9 | disk_info VARCHAR NOT NULL 10 | ); -------------------------------------------------------------------------------- /migrations/2019-10-28-064727_create_benchmarks/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE benchmarks; -------------------------------------------------------------------------------- /migrations/2019-10-28-064727_create_benchmarks/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE benchmarks 2 | ( 3 | id SERIAL PRIMARY KEY, 4 | ts_start TIMESTAMP NOT NULL DEFAULT NOW(), 5 | ts_end TIMESTAMP NOT NULL DEFAULT NOW(), 6 | description VARCHAR, 7 | chain_spec JSONB, 8 | benchmark_spec JSONB, 9 | host_system_id INTEGER REFERENCES host_systems (id) NOT NULL 10 | ); 11 | -------------------------------------------------------------------------------- /migrations/2019-12-11-135359_add_peer_id_to_benchmarks/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE benchmarks DROP COLUMN peer_id; -------------------------------------------------------------------------------- /migrations/2019-12-11-135359_add_peer_id_to_benchmarks/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE benchmarks ADD COLUMN peer_id VARCHAR DEFAULT 'NOT SET' NOT NULL; -------------------------------------------------------------------------------- /migrations/2019-12-13-130241_align_benchmarks/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE benchmarks; -------------------------------------------------------------------------------- /migrations/2019-12-13-130241_align_benchmarks/up.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS benchmarks CASCADE; 2 | CREATE TABLE benchmarks( 3 | id SERIAL PRIMARY KEY, 4 | setup JSONB NOT NULL, 5 | created_at TIMESTAMP NOT NULL DEFAULT (NOW() AT TIME ZONE 'utc') 6 | ); -------------------------------------------------------------------------------- /migrations/2019-12-13-161417_create_benchmark_events/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE benchmark_events; -------------------------------------------------------------------------------- /migrations/2019-12-13-161417_create_benchmark_events/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE benchmark_events( 2 | id SERIAL PRIMARY KEY, 3 | benchmark_id INTEGER REFERENCES benchmarks(id) NOT NULL, 4 | name VARCHAR NOT NULL, 5 | phase VARCHAR NOT NULL, 6 | created_at TIMESTAMP NOT NULL DEFAULT (NOW() AT TIME ZONE 'utc') 7 | ); -------------------------------------------------------------------------------- /migrations/2020-04-10-113123_add_info_to_peer_connections/down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE peer_connections DROP COLUMN "name"; 2 | ALTER TABLE peer_connections DROP COLUMN chain; 3 | ALTER TABLE peer_connections DROP COLUMN version; 4 | ALTER TABLE peer_connections DROP COLUMN authority; 5 | ALTER TABLE peer_connections DROP COLUMN startup_time; 6 | ALTER TABLE peer_connections DROP COLUMN implementation; -------------------------------------------------------------------------------- /migrations/2020-04-10-113123_add_info_to_peer_connections/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE peer_connections ADD COLUMN "name" VARCHAR; 2 | ALTER TABLE peer_connections ADD COLUMN chain VARCHAR; 3 | ALTER TABLE peer_connections ADD COLUMN version VARCHAR; 4 | ALTER TABLE peer_connections ADD COLUMN authority BOOLEAN; 5 | ALTER TABLE peer_connections ADD COLUMN startup_time BIGINT; 6 | ALTER TABLE peer_connections ADD COLUMN implementation VARCHAR; -------------------------------------------------------------------------------- /migrations/2020-09-08-111742_create_index_on_peerset_nodes/down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX substrate_logs_peerset_nodes_idx; 2 | -------------------------------------------------------------------------------- /migrations/2020-09-08-111742_create_index_on_peerset_nodes/up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX substrate_logs_peerset_nodes_idx ON substrate_logs USING GIN ((logs->'state'->'peerset'->'nodes') jsonb_path_ops); 2 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2018" -------------------------------------------------------------------------------- /src/db/benchmarks.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use super::models::{Benchmark, BenchmarkEvent, NewBenchmark, NewBenchmarkEvent}; 18 | use super::DbExecutor; 19 | use crate::db::filters::Filters; 20 | use actix::prelude::*; 21 | use diesel::prelude::*; 22 | use diesel::sql_query; 23 | use diesel::sql_types::*; 24 | use failure::Error; 25 | use serde_json::Value; 26 | 27 | #[derive(Serialize, Deserialize, Debug, QueryableByName)] 28 | pub struct Targets { 29 | #[sql_type = "Text"] 30 | target: String, 31 | } 32 | 33 | pub enum Query { 34 | All(Filters), 35 | /// Targets for benchmark id 36 | Targets(i32), 37 | /// Events for benchmark id 38 | Events(i32), 39 | } 40 | 41 | impl Message for Query { 42 | type Result = Result; 43 | } 44 | 45 | impl Handler for DbExecutor { 46 | type Result = Result; 47 | 48 | fn handle(&mut self, msg: Query, _: &mut Self::Context) -> Self::Result { 49 | match msg { 50 | Query::All(filters) => self.get_benchmarks(filters), 51 | Query::Events(id) => self.get_events(id), 52 | Query::Targets(id) => self.get_targets(id), 53 | } 54 | } 55 | } 56 | 57 | impl Message for NewBenchmark { 58 | type Result = Result; 59 | } 60 | 61 | impl Handler for DbExecutor { 62 | type Result = Result; 63 | 64 | fn handle(&mut self, msg: NewBenchmark, _: &mut Self::Context) -> Self::Result { 65 | self.save_benchmark(msg) 66 | } 67 | } 68 | 69 | impl Message for NewBenchmarkEvent { 70 | type Result = Result; 71 | } 72 | 73 | impl Handler for DbExecutor { 74 | type Result = Result; 75 | 76 | fn handle(&mut self, msg: NewBenchmarkEvent, _: &mut Self::Context) -> Self::Result { 77 | self.save_benchmark_event(msg) 78 | } 79 | } 80 | 81 | impl DbExecutor { 82 | fn get_benchmarks(&self, _filters: Filters) -> Result { 83 | match self.with_connection(|conn| { 84 | use crate::schema::benchmarks::dsl::*; 85 | benchmarks.order(created_at.desc()).load::(conn) 86 | }) { 87 | Ok(Ok(v)) => Ok(json!(v)), 88 | Ok(Err(e)) => Err(e.into()), 89 | Err(e) => Err(e.into()), 90 | } 91 | } 92 | 93 | fn get_targets(&self, id: i32) -> Result { 94 | match self.with_connection(|conn| { 95 | let query = sql_query( 96 | "SELECT DISTINCT logs->>'target' as target from substrate_logs \ 97 | WHERE logs->>'target' IS NOT NULL \ 98 | AND peer_connection_id = ANY (\ 99 | SELECT id from peer_connections WHERE peer_id = \ 100 | (SELECT setup->'substrate'->>'peerId' as peer_id FROM benchmarks WHERE id = $1)\ 101 | ) ORDER BY target ASC", 102 | ) 103 | .bind::(id); 104 | debug!( 105 | "get_targets query: {}", 106 | diesel::debug_query::(&query) 107 | ); 108 | let result: QueryResult> = query.get_results(conn); 109 | result 110 | }) { 111 | Ok(Ok(v)) => Ok(json!(v)), 112 | Ok(Err(e)) => Err(e.into()), 113 | Err(e) => Err(e.into()), 114 | } 115 | } 116 | 117 | fn get_events(&self, bm_id: i32) -> Result { 118 | match self.with_connection(|conn| { 119 | use crate::schema::benchmark_events::dsl::*; 120 | benchmark_events 121 | .filter(benchmark_id.eq(bm_id)) 122 | .order(name.asc()) 123 | .load::(conn) 124 | }) { 125 | Ok(Ok(v)) => Ok(json!(v)), 126 | Ok(Err(e)) => Err(e.into()), 127 | Err(e) => Err(e.into()), 128 | } 129 | } 130 | 131 | fn save_benchmark(&self, msg: NewBenchmark) -> Result { 132 | match self.with_connection(|conn| { 133 | use crate::schema::benchmarks; 134 | diesel::insert_into(benchmarks::table) 135 | .values(msg) 136 | .get_result::(conn) 137 | }) { 138 | Ok(Ok(v)) => Ok(json!(v)), 139 | Ok(Err(e)) => Err(e.into()), 140 | Err(e) => Err(e.into()), 141 | } 142 | } 143 | 144 | fn save_benchmark_event(&self, msg: NewBenchmarkEvent) -> Result { 145 | match self.with_connection(|conn| { 146 | use crate::schema::benchmark_events; 147 | diesel::insert_into(benchmark_events::table) 148 | .values(msg) 149 | .get_result::(conn) 150 | }) { 151 | Ok(Ok(v)) => Ok(json!(v)), 152 | Ok(Err(e)) => Err(e.into()), 153 | Err(e) => Err(e.into()), 154 | } 155 | } 156 | } 157 | 158 | impl NewBenchmark { 159 | pub fn example() -> Self { 160 | NewBenchmark { 161 | setup: serde_json::Value::default(), 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/db/filters.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use chrono::NaiveDateTime; 18 | 19 | //const TIME_FORMAT: &'static str = "2000-01-01T00:00:00"; 20 | 21 | // TODO implement validator derive 22 | // #[macro_use] 23 | // extern crate validator_derive; 24 | 25 | #[derive(Deserialize, Default, Debug, Clone)] 26 | pub struct Filters { 27 | pub start_time: Option, 28 | pub end_time: Option, 29 | pub max_age_s: Option, 30 | pub limit: Option, 31 | pub peer_id: Option, 32 | pub target: Option, 33 | pub msg: Option, 34 | } 35 | -------------------------------------------------------------------------------- /src/db/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | pub mod benchmarks; 18 | pub mod filters; 19 | pub mod models; 20 | pub mod nodes; 21 | pub mod peer_data; 22 | pub mod reputation; 23 | pub mod stats; 24 | 25 | use actix::prelude::*; 26 | use diesel; 27 | use diesel::pg::PgConnection; 28 | use diesel::prelude::*; 29 | use diesel::r2d2::{ConnectionManager, Pool, PoolError}; 30 | use diesel::result::QueryResult; 31 | use diesel::RunQueryDsl; 32 | 33 | use self::models::{NewPeerConnection, NewSubstrateLog, PeerConnection}; 34 | use crate::{DATABASE_URL, DB_POOL_SIZE}; 35 | 36 | pub const RECORD_LIMIT: i32 = 10_000; 37 | 38 | pub struct DbExecutor { 39 | pool: Pool>, 40 | } 41 | 42 | impl Actor for DbExecutor { 43 | type Context = SyncContext; 44 | } 45 | 46 | impl DbExecutor { 47 | // Execute query, log error if any and return result. 48 | fn with_connection(&self, f: F) -> Result 49 | where 50 | F: FnOnce(&PgConnection) -> R, 51 | { 52 | let result = self.pool.get().map(|conn| f(&conn)); 53 | if let Err(e) = &result { 54 | error!("Couldn't get DB connection from pool: {}", e); 55 | } 56 | result 57 | } 58 | 59 | pub fn new(pool: Pool>) -> Self { 60 | DbExecutor { pool } 61 | } 62 | } 63 | 64 | impl Message for NewPeerConnection { 65 | type Result = Result; 66 | } 67 | 68 | impl Handler for DbExecutor { 69 | type Result = Result; 70 | 71 | fn handle(&mut self, msg: NewPeerConnection, _: &mut Self::Context) -> Self::Result { 72 | use crate::schema::peer_connections; 73 | #[allow(unused_imports)] 74 | use crate::schema::peer_connections::dsl::*; 75 | let pc: Result, _> = self.with_connection(|conn| { 76 | let result: QueryResult = diesel::insert_into(peer_connections::table) 77 | .values(&msg) 78 | .get_result(conn); 79 | result 80 | }); 81 | if let Ok(pcr) = pc { 82 | if let Ok(p) = pcr { 83 | return Ok(p); 84 | } 85 | }; 86 | Err(format!( 87 | "Error inserting PeerConnection, for ip: {}", 88 | msg.ip_addr 89 | )) 90 | } 91 | } 92 | 93 | impl Message for PeerConnection { 94 | type Result = Result<(), String>; 95 | } 96 | 97 | impl Handler for DbExecutor { 98 | type Result = Result<(), String>; 99 | 100 | fn handle(&mut self, msg: PeerConnection, _: &mut Self::Context) -> Self::Result { 101 | //use crate::schema::peer_connections; 102 | #[allow(unused_imports)] 103 | use crate::schema::peer_connections::dsl::*; 104 | let msg_id = msg.id; 105 | let result = self.with_connection(|conn| { 106 | diesel::update(peer_connections.filter(id.eq(msg.id))) 107 | .set(( 108 | peer_id.eq(msg.peer_id), 109 | ip_addr.eq(msg.ip_addr), 110 | name.eq(msg.name), 111 | chain.eq(msg.chain), 112 | version.eq(msg.version), 113 | authority.eq(msg.authority), 114 | startup_time.eq(msg.startup_time), 115 | implementation.eq(msg.implementation), 116 | )) 117 | .execute(conn) 118 | }); 119 | if let Ok(ir) = result { 120 | if ir.is_ok() { 121 | return Ok(()); 122 | } 123 | }; 124 | Err(format!("Error updating PeerConnection, id: {}", msg_id)) 125 | } 126 | } 127 | 128 | pub struct LogBatch(pub Vec); 129 | 130 | impl Message for LogBatch { 131 | type Result = Result<(), &'static str>; 132 | } 133 | 134 | impl Handler for DbExecutor { 135 | type Result = Result<(), &'static str>; 136 | 137 | fn handle(&mut self, msg: LogBatch, _: &mut Self::Context) -> Self::Result { 138 | use crate::schema::substrate_logs; 139 | #[allow(unused_imports)] 140 | use crate::schema::substrate_logs::dsl::*; 141 | let _ = self.with_connection(|conn| { 142 | match diesel::insert_into(substrate_logs::table) 143 | .values(msg.0) 144 | .execute(conn) 145 | { 146 | Err(e) => error!("Error inserting logs: {:?}", e), 147 | Ok(n) => debug!("Inserted {} substrate_logs", n), 148 | } 149 | }); 150 | Ok(()) 151 | } 152 | } 153 | 154 | #[derive(Clone)] 155 | pub struct PurgeLogs { 156 | pub hours_valid: u32, 157 | } 158 | 159 | impl Message for PurgeLogs { 160 | type Result = Result<(), &'static str>; 161 | } 162 | 163 | impl Handler for DbExecutor { 164 | type Result = Result<(), &'static str>; 165 | 166 | fn handle(&mut self, msg: PurgeLogs, _: &mut Self::Context) -> Self::Result { 167 | let _ = self.with_connection(|conn| { 168 | let query = format!( 169 | "DELETE FROM substrate_logs \ 170 | USING peer_connections \ 171 | WHERE peer_connections.id = peer_connection_id \ 172 | AND audit = false \ 173 | AND substrate_logs.created_at < now() - {} * interval '1 hour'", 174 | msg.hours_valid 175 | ); 176 | info!("Cleaning up database - deleting old log messages"); 177 | match diesel::sql_query(query).execute(conn) { 178 | Err(e) => error!("Error purging expired logs: {:?}", e), 179 | Ok(n) => info!("Purged {} records from database", n), 180 | } 181 | }); 182 | let _ = self.with_connection(|conn| { 183 | let query = "DELETE FROM peer_connections \ 184 | WHERE id NOT IN \ 185 | (SELECT DISTINCT peer_connection_id FROM substrate_logs)"; 186 | info!("Cleaning up database - deleting unreferenced peer_connections"); 187 | match diesel::sql_query(query).execute(conn) { 188 | Err(e) => error!("Error purging expired peer_connections: {:?}", e), 189 | Ok(n) => info!("Purged {} records from database", n), 190 | } 191 | }); 192 | Ok(()) 193 | } 194 | } 195 | 196 | pub fn create_pool() -> Pool> { 197 | let manager = ConnectionManager::new(DATABASE_URL.to_string()); 198 | let pool = Pool::builder() 199 | .max_size(*DB_POOL_SIZE) 200 | .build(manager) 201 | .expect("Failed to create pool"); 202 | info!("Database pool created with {} connections", *DB_POOL_SIZE); 203 | pool 204 | } 205 | -------------------------------------------------------------------------------- /src/db/models.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use crate::schema::{benchmark_events, benchmarks, peer_connections, substrate_logs}; 18 | use chrono::NaiveDateTime; 19 | use serde_json::Value; 20 | 21 | #[derive(Queryable, Identifiable, PartialEq, Serialize, Debug)] 22 | #[table_name = "benchmark_events"] 23 | pub struct BenchmarkEvent { 24 | pub id: i32, 25 | pub benchmark_id: i32, 26 | pub name: String, 27 | pub phase: String, 28 | pub created_at: NaiveDateTime, 29 | } 30 | 31 | #[derive(Insertable, Debug, Serialize, Deserialize)] 32 | #[table_name = "benchmark_events"] 33 | pub struct NewBenchmarkEvent { 34 | pub benchmark_id: i32, 35 | pub name: String, 36 | pub phase: String, 37 | pub created_at: NaiveDateTime, 38 | } 39 | 40 | #[derive(Queryable, Identifiable, PartialEq, Serialize, Debug)] 41 | #[table_name = "benchmarks"] 42 | pub struct Benchmark { 43 | pub id: i32, 44 | pub setup: Value, 45 | pub created_at: NaiveDateTime, 46 | } 47 | 48 | #[derive(Insertable, Debug, Serialize, Deserialize)] 49 | #[table_name = "benchmarks"] 50 | pub struct NewBenchmark { 51 | pub setup: Value, 52 | } 53 | 54 | #[derive(Queryable, QueryableByName, Identifiable, Serialize, PartialEq, Clone, Debug)] 55 | #[table_name = "substrate_logs"] 56 | pub struct SubstrateLog { 57 | pub id: i32, 58 | pub created_at: NaiveDateTime, 59 | pub logs: Value, 60 | pub peer_connection_id: Option, 61 | } 62 | 63 | #[derive(Insertable, Debug, Serialize, Deserialize)] 64 | #[table_name = "substrate_logs"] 65 | pub struct NewSubstrateLog { 66 | pub logs: Value, 67 | pub peer_connection_id: i32, 68 | pub created_at: NaiveDateTime, 69 | } 70 | 71 | #[derive(Queryable, QueryableByName, Identifiable, Serialize, PartialEq, Clone, Debug)] 72 | #[table_name = "peer_connections"] 73 | pub struct PeerConnection { 74 | pub id: i32, 75 | pub ip_addr: String, 76 | pub peer_id: Option, 77 | pub created_at: NaiveDateTime, 78 | pub audit: bool, 79 | pub name: Option, 80 | pub chain: Option, 81 | pub version: Option, 82 | pub authority: Option, 83 | pub startup_time: Option, 84 | pub implementation: Option, 85 | } 86 | 87 | #[derive(Insertable, Debug, Serialize, Deserialize)] 88 | #[table_name = "peer_connections"] 89 | pub struct NewPeerConnection { 90 | pub ip_addr: String, 91 | pub peer_id: Option, 92 | pub audit: bool, 93 | pub name: Option, 94 | pub chain: Option, 95 | pub version: Option, 96 | pub authority: Option, 97 | pub startup_time: Option, 98 | pub implementation: Option, 99 | } 100 | -------------------------------------------------------------------------------- /src/db/nodes.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use actix::prelude::*; 18 | use chrono::{NaiveDateTime, Utc}; 19 | use diesel::sql_types::*; 20 | use diesel::{result::QueryResult, sql_query, RunQueryDsl}; 21 | use failure::Error; 22 | use serde_json::Value; 23 | 24 | use super::{filters::Filters, DbExecutor, RECORD_LIMIT}; 25 | use crate::db::models::PeerConnection; 26 | 27 | pub struct NodesQuery(pub Filters); 28 | 29 | impl Message for NodesQuery { 30 | type Result = Result, Error>; 31 | } 32 | 33 | impl Handler for DbExecutor { 34 | type Result = Result, Error>; 35 | 36 | fn handle(&mut self, msg: NodesQuery, _: &mut Self::Context) -> Self::Result { 37 | self.get_nodes(msg.0) 38 | } 39 | } 40 | 41 | pub struct LogsQuery(pub Filters); 42 | 43 | impl Message for LogsQuery { 44 | type Result = Result, Error>; 45 | } 46 | 47 | impl Handler for DbExecutor { 48 | type Result = Result, Error>; 49 | 50 | fn handle(&mut self, msg: LogsQuery, _: &mut Self::Context) -> Self::Result { 51 | let has_msg = msg.0.msg.is_some(); 52 | let has_target = msg.0.target.is_some(); 53 | if has_msg { 54 | if has_target { 55 | self.get_log_msgs_with_target(msg.0) 56 | } else { 57 | self.get_log_msgs(msg.0) 58 | } 59 | } else { 60 | self.get_all_logs(msg.0) 61 | } 62 | } 63 | } 64 | 65 | pub struct StatsQuery(pub Filters); 66 | 67 | impl Message for StatsQuery { 68 | type Result = Result, Error>; 69 | } 70 | 71 | impl Handler for DbExecutor { 72 | type Result = Result, Error>; 73 | 74 | fn handle(&mut self, msg: StatsQuery, _: &mut Self::Context) -> Self::Result { 75 | self.get_log_stats(msg.0) 76 | } 77 | } 78 | 79 | #[derive(Serialize, Deserialize, Debug, QueryableByName)] 80 | pub struct Node { 81 | #[sql_type = "Nullable"] 82 | pub peer_id: Option, 83 | } 84 | 85 | #[derive(Serialize, Deserialize, Debug, QueryableByName)] 86 | pub struct Log { 87 | #[sql_type = "Text"] 88 | pub ip_addr: String, 89 | #[sql_type = "Text"] 90 | pub peer_id: String, 91 | #[sql_type = "Text"] 92 | pub msg: String, 93 | #[sql_type = "Timestamp"] 94 | pub created_at: NaiveDateTime, 95 | #[sql_type = "Jsonb"] 96 | pub logs: Value, 97 | } 98 | 99 | #[derive(Serialize, Deserialize, Debug, QueryableByName)] 100 | pub struct Stats { 101 | #[sql_type = "BigInt"] 102 | pub qty: i64, 103 | #[sql_type = "Text"] 104 | pub log_type: String, 105 | } 106 | 107 | #[derive(Serialize, Deserialize, Debug, QueryableByName)] 108 | pub struct PeerInfoDb { 109 | #[sql_type = "Text"] 110 | pub ip_addr: String, 111 | #[sql_type = "Text"] 112 | pub peer_id: String, 113 | #[sql_type = "Timestamp"] 114 | pub ts: NaiveDateTime, 115 | #[sql_type = "Integer"] 116 | pub peer_count: i32, 117 | #[sql_type = "Integer"] 118 | pub connection_id: i32, 119 | #[sql_type = "Nullable"] 120 | pub not_connected: Option, 121 | } 122 | 123 | impl PeerInfoDb { 124 | pub fn get_not_connected(&self) -> Option { 125 | if let Some(value) = &self.not_connected { 126 | if let Some(obj) = value.as_object() { 127 | return Some(obj.len()); 128 | } 129 | } 130 | None 131 | } 132 | } 133 | 134 | #[derive(Serialize, Deserialize, Debug)] 135 | pub struct PeerInfo { 136 | ip_addr: String, 137 | peer_id: String, 138 | connection_id: i32, 139 | ts: NaiveDateTime, 140 | peers_connected: i32, 141 | not_connected: Option, 142 | } 143 | 144 | impl From for PeerInfo { 145 | fn from(p: PeerInfoDb) -> Self { 146 | PeerInfo { 147 | not_connected: p.get_not_connected(), 148 | ip_addr: p.ip_addr, 149 | peer_id: p.peer_id, 150 | ts: p.ts, 151 | peers_connected: p.peer_count, 152 | connection_id: p.connection_id, 153 | } 154 | } 155 | } 156 | 157 | impl DbExecutor { 158 | fn get_log_stats(&self, filters: Filters) -> Result, Error> { 159 | match self.with_connection(|conn| { 160 | let query = sql_query( 161 | "SELECT COUNT(log_type) as qty, log_type \ 162 | FROM ( \ 163 | SELECT logs->>'msg' AS log_type \ 164 | FROM substrate_logs sl \ 165 | LEFT JOIN peer_connections pc ON sl.peer_connection_id = pc.id \ 166 | WHERE peer_id = $1) t \ 167 | GROUP BY t.log_type", 168 | ) 169 | .bind::(filters.peer_id.unwrap_or(String::new())); 170 | debug!( 171 | "get_log_stats query: {}", 172 | diesel::debug_query::(&query) 173 | ); 174 | let result: QueryResult> = query.get_results(conn); 175 | result 176 | }) { 177 | Ok(Ok(v)) => Ok(v), 178 | Ok(Err(e)) => Err(e.into()), 179 | Err(e) => Err(e.into()), 180 | } 181 | } 182 | 183 | fn get_peer_counts(&self, filters: Filters) -> Result { 184 | match self.with_connection(|conn| { 185 | let query = sql_query( 186 | "SELECT ip_addr, peer_id, pc.id as connection_id, \ 187 | CAST (logs->>'peers' as INTEGER) as peer_count, \ 188 | CAST (logs->>'ts' as TIMESTAMP) as ts, \ 189 | logs->'network_state'->'notConnectedPeers' as not_connected \ 190 | FROM substrate_logs sl \ 191 | LEFT JOIN peer_connections pc ON sl.peer_connection_id = pc.id \ 192 | WHERE logs->>'msg' = 'system.interval' \ 193 | AND peer_id = $1 \ 194 | AND sl.created_at > $2 \ 195 | AND sl.created_at < $3 \ 196 | GROUP BY pc.id, peer_id, ip_addr, sl.created_at, logs \ 197 | ORDER BY pc.id, ts ASC \ 198 | LIMIT $4", 199 | ) 200 | .bind::(filters.peer_id.unwrap_or(String::new())) 201 | .bind::( 202 | filters 203 | .start_time 204 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(0, 0)), 205 | ) 206 | .bind::( 207 | filters 208 | .end_time 209 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(Utc::now().timestamp(), 0)), 210 | ) 211 | .bind::(filters.limit.unwrap_or(RECORD_LIMIT)); 212 | debug!( 213 | "get_peer_counts query: {}", 214 | diesel::debug_query::(&query) 215 | ); 216 | let result: QueryResult> = query.get_results(conn); 217 | result 218 | }) { 219 | Ok(Ok(v)) => Ok(json!(v)), 220 | Ok(Err(e)) => Err(e.into()), 221 | Err(e) => Err(e.into()), 222 | } 223 | } 224 | 225 | fn get_nodes(&self, _filters: Filters) -> Result, Error> { 226 | match self.with_connection(|conn| { 227 | let query = "SELECT DISTINCT ON (peer_id) peer_id, \ 228 | id, ip_addr, created_at, audit, name, \ 229 | chain, version, authority, startup_time, implementation \ 230 | FROM peer_connections \ 231 | ORDER BY peer_id, created_at DESC"; 232 | let result: QueryResult> = 233 | diesel::sql_query(query).get_results(conn); 234 | result 235 | }) { 236 | Ok(Ok(v)) => Ok(v), 237 | Ok(Err(e)) => Err(e.into()), 238 | Err(e) => Err(e.into()), 239 | } 240 | } 241 | 242 | fn get_all_logs(&self, filters: Filters) -> Result, Error> { 243 | match self.with_connection(|conn| { 244 | let query = sql_query( 245 | "SELECT sl.id, \ 246 | ip_addr, \ 247 | peer_id, \ 248 | logs->>'msg' AS msg, \ 249 | logs, \ 250 | sl.created_at, \ 251 | peer_connection_id \ 252 | FROM substrate_logs sl \ 253 | LEFT JOIN peer_connections pc ON sl.peer_connection_id = pc.id \ 254 | WHERE peer_id = $1 \ 255 | AND sl.created_at > $2 \ 256 | AND sl.created_at < $3 \ 257 | ORDER BY created_at DESC \ 258 | LIMIT $4", 259 | ) 260 | .bind::(filters.peer_id.unwrap_or(String::new())) 261 | .bind::( 262 | filters 263 | .start_time 264 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(0, 0)), 265 | ) 266 | .bind::( 267 | filters 268 | .end_time 269 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(Utc::now().timestamp(), 0)), 270 | ) 271 | .bind::(filters.limit.unwrap_or(RECORD_LIMIT)); 272 | debug!( 273 | "get_all_logs query: {}", 274 | diesel::debug_query::(&query) 275 | ); 276 | let result: QueryResult> = query.get_results(conn); 277 | result 278 | }) { 279 | Ok(Ok(v)) => Ok(v), 280 | Ok(Err(e)) => Err(e.into()), 281 | Err(e) => Err(e.into()), 282 | } 283 | } 284 | 285 | fn get_log_msgs(&self, filters: Filters) -> Result, Error> { 286 | match self.with_connection(|conn| { 287 | let query = sql_query( 288 | "SELECT sl.id, \ 289 | ip_addr, \ 290 | peer_id, \ 291 | logs->>'msg' AS msg, \ 292 | logs, \ 293 | sl.created_at, \ 294 | peer_connection_id \ 295 | FROM substrate_logs sl \ 296 | LEFT JOIN peer_connections pc ON sl.peer_connection_id = pc.id \ 297 | WHERE peer_id = $1 \ 298 | AND sl.created_at > $2 \ 299 | AND sl.created_at < $3 \ 300 | AND logs->>'msg' = $4 301 | ORDER BY created_at DESC \ 302 | LIMIT $5", 303 | ) 304 | .bind::(filters.peer_id.unwrap_or(String::new())) 305 | .bind::( 306 | filters 307 | .start_time 308 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(0, 0)), 309 | ) 310 | .bind::( 311 | filters 312 | .end_time 313 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(Utc::now().timestamp(), 0)), 314 | ) 315 | .bind::(filters.msg.unwrap_or(String::new())) 316 | .bind::(filters.limit.unwrap_or(RECORD_LIMIT)); 317 | debug!( 318 | "get_log_msgs query: {}", 319 | diesel::debug_query::(&query) 320 | ); 321 | let result: QueryResult> = query.get_results(conn); 322 | result 323 | }) { 324 | Ok(Ok(v)) => Ok(v), 325 | Ok(Err(e)) => Err(e.into()), 326 | Err(e) => Err(e.into()), 327 | } 328 | } 329 | 330 | fn get_log_msgs_with_target(&self, filters: Filters) -> Result, Error> { 331 | match self.with_connection(|conn| { 332 | let query = sql_query( 333 | "SELECT sl.id, \ 334 | ip_addr, \ 335 | peer_id, \ 336 | logs->>'msg' AS msg, \ 337 | logs, \ 338 | sl.created_at, \ 339 | peer_connection_id \ 340 | FROM substrate_logs sl \ 341 | LEFT JOIN peer_connections pc ON sl.peer_connection_id = pc.id \ 342 | WHERE peer_id = $1 \ 343 | AND sl.created_at > $2 \ 344 | AND sl.created_at < $3 \ 345 | AND logs->>'msg' = $4 346 | AND logs->>'target' = $5 347 | ORDER BY created_at DESC \ 348 | LIMIT $6", 349 | ) 350 | .bind::(filters.peer_id.unwrap_or(String::new())) 351 | .bind::( 352 | filters 353 | .start_time 354 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(0, 0)), 355 | ) 356 | .bind::( 357 | filters 358 | .end_time 359 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(Utc::now().timestamp(), 0)), 360 | ) 361 | .bind::(filters.msg.unwrap_or(String::new())) 362 | .bind::(filters.target.unwrap_or(String::new())) 363 | .bind::(filters.limit.unwrap_or(RECORD_LIMIT)); 364 | debug!( 365 | "Query: get_log_msgs_with_target: {}", 366 | diesel::debug_query::(&query) 367 | ); 368 | let result: QueryResult> = query.get_results(conn); 369 | result 370 | }) { 371 | Ok(Ok(v)) => Ok(v), 372 | Ok(Err(e)) => Err(e.into()), 373 | Err(e) => Err(e.into()), 374 | } 375 | } 376 | } 377 | -------------------------------------------------------------------------------- /src/db/peer_data.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use actix::prelude::*; 18 | use chrono::{DateTime, Local, NaiveDateTime}; 19 | use diesel::sql_types::*; 20 | use diesel::{result::QueryResult, sql_query, RunQueryDsl}; 21 | use serde::Serialize; 22 | use serde_json::Value; 23 | use std::collections::HashMap; 24 | use std::hash::Hash; 25 | use std::time::{Duration, SystemTime}; 26 | 27 | use super::{filters::Filters, DbExecutor, RECORD_LIMIT}; 28 | 29 | #[derive(Serialize, Deserialize, QueryableByName, Clone, Debug)] 30 | pub struct SubstrateLog { 31 | #[sql_type = "Jsonb"] 32 | #[serde(flatten)] 33 | pub log: Value, 34 | #[sql_type = "Timestamp"] 35 | pub created_at: NaiveDateTime, 36 | } 37 | 38 | #[derive(Serialize, Debug)] 39 | pub struct PeerDataArray { 40 | pub peer_message: PeerMessage, 41 | pub data: Vec, 42 | } 43 | 44 | impl Message for PeerDataArray { 45 | type Result = Result<(), &'static str>; 46 | } 47 | 48 | #[derive(Hash, Serialize, Eq, PartialEq, Clone, Debug)] 49 | pub struct PeerMessage { 50 | pub peer_id: String, 51 | pub msg: String, 52 | } 53 | 54 | pub struct PeerMessages(pub HashMap); 55 | 56 | #[derive(Clone, Debug)] 57 | pub struct PeerMessageTime { 58 | pub peer_message: PeerMessage, 59 | pub time: NaiveDateTime, 60 | } 61 | #[derive(Debug)] 62 | pub struct PeerMessageTimeList { 63 | pub list: Vec, 64 | pub cache: Recipient, 65 | } 66 | 67 | impl Message for PeerMessageTimeList { 68 | type Result = (); 69 | } 70 | 71 | impl Handler for DbExecutor { 72 | type Result = (); 73 | fn handle(&mut self, msg: PeerMessageTimeList, _ctx: &mut Self::Context) -> Self::Result { 74 | debug!("Handling PeerMessageStartTimeList"); 75 | let cache = msg.cache; 76 | let pmuts = msg.list; 77 | for pmut in pmuts { 78 | let p = pmut.clone(); 79 | let filters = Filters { 80 | start_time: Some(p.time), 81 | peer_id: Some(p.peer_message.peer_id), 82 | msg: Some(p.peer_message.msg), 83 | ..Default::default() 84 | }; 85 | let pd_res = self.get_logs(filters); 86 | if let Ok(pdr) = pd_res { 87 | // send to cache 88 | if let Err(e) = cache.do_send(pdr) { 89 | error!("Sending PeerDataResponse to Cache failed : {:?}", e); 90 | } 91 | } 92 | } 93 | } 94 | } 95 | 96 | impl DbExecutor { 97 | fn get_logs(&self, filters: Filters) -> Result { 98 | let peer_id = filters.peer_id.clone().unwrap_or(String::new()); 99 | let msg = filters.msg.clone().unwrap_or(String::new()); 100 | let start_time = filters 101 | .start_time 102 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(0, 0)); 103 | match self.with_connection(|conn| { 104 | let query = sql_query( 105 | "SELECT sl.logs - 'ts' - 'id' - 'msg' - 'level' - 'line' as log, \ 106 | sl.created_at \ 107 | FROM substrate_logs sl \ 108 | LEFT JOIN peer_connections pc ON sl.peer_connection_id = pc.id \ 109 | WHERE peer_id = $1 \ 110 | AND sl.created_at > $2 \ 111 | AND sl.logs->>'msg' = $3 112 | ORDER BY created_at ASC \ 113 | LIMIT $4", 114 | ) 115 | .bind::(peer_id.clone()) 116 | .bind::(start_time) 117 | .bind::(msg.clone()) 118 | .bind::(filters.limit.unwrap_or(RECORD_LIMIT)); 119 | debug!( 120 | "get_profiling query: {}", 121 | diesel::debug_query::(&query) 122 | ); 123 | let result: QueryResult> = query.get_results(conn); 124 | result 125 | }) { 126 | Ok(Ok(data)) => { 127 | let peer_message = PeerMessage { peer_id, msg }; 128 | Ok(PeerDataArray { peer_message, data }) 129 | } 130 | Ok(Err(e)) => Err(e.into()), 131 | Err(e) => Err(e.into()), 132 | } 133 | } 134 | } 135 | 136 | pub fn time_secs_ago(seconds_ago: u64) -> NaiveDateTime { 137 | let now = SystemTime::now(); 138 | let ts = now 139 | .checked_sub(Duration::from_secs(seconds_ago)) 140 | .expect("We should be using sane values for default_start_time"); 141 | let dt: DateTime = ts.into(); 142 | dt.naive_utc() 143 | } 144 | 145 | impl std::fmt::Display for PeerMessage { 146 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 147 | write!(f, "({}, {})", self.peer_id, self.msg) 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /src/db/reputation.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use super::DbExecutor; 18 | use crate::db::filters::Filters; 19 | use actix::prelude::*; 20 | use chrono::{NaiveDateTime, Utc}; 21 | use diesel::sql_types::*; 22 | use diesel::{result::QueryResult, sql_query, QueryDsl, RunQueryDsl}; 23 | use failure::Error; 24 | use std::time::Duration; 25 | 26 | /// Message to indicate what information is required for aggregate data response 27 | pub enum PeerReputationsQuery { 28 | All(Filters), 29 | Logged(Filters), 30 | Mock(usize), 31 | } 32 | 33 | impl Message for PeerReputationsQuery { 34 | type Result = Result, Error>; 35 | } 36 | 37 | impl Handler for DbExecutor { 38 | type Result = Result, Error>; 39 | 40 | fn handle(&mut self, msg: PeerReputationsQuery, _: &mut Self::Context) -> Self::Result { 41 | match msg { 42 | PeerReputationsQuery::All(filters) => self.get_reputation_latest_all(filters), 43 | PeerReputationsQuery::Logged(filters) => { 44 | self.get_reputation_latest_logged(self.get_logged_nodes()?, filters) 45 | } 46 | PeerReputationsQuery::Mock(qty) => self.get_mock_results(qty), 47 | } 48 | } 49 | } 50 | 51 | /// Message to indicate what information is required for peer specific response 52 | pub struct PeerReputationQuery { 53 | pub peer_id: String, 54 | pub filters: Filters, 55 | } 56 | 57 | impl Message for PeerReputationQuery { 58 | type Result = Result, Error>; 59 | } 60 | 61 | impl Handler for DbExecutor { 62 | type Result = Result, Error>; 63 | 64 | fn handle(&mut self, msg: PeerReputationQuery, _: &mut Self::Context) -> Self::Result { 65 | self.get_peer_reputation(msg.peer_id, msg.filters) 66 | } 67 | } 68 | 69 | /// Contains aggregate data 70 | #[derive(Serialize, Deserialize, Debug, QueryableByName)] 71 | pub struct PeerReputations { 72 | #[sql_type = "Text"] 73 | reporting_peer: String, 74 | #[sql_type = "Array"] 75 | remote_peer: Vec, 76 | #[sql_type = "Array"] 77 | reputation: Vec, 78 | #[sql_type = "Array"] 79 | connected: Vec, 80 | #[sql_type = "Timestamp"] 81 | ts: NaiveDateTime, 82 | } 83 | 84 | /// Contains individual data 85 | #[derive(Serialize, Deserialize, Debug, QueryableByName)] 86 | pub struct PeerReputation { 87 | #[sql_type = "Text"] 88 | reporting_peer: String, 89 | #[sql_type = "BigInt"] 90 | reputation: i64, 91 | #[sql_type = "Bool"] 92 | connected: bool, 93 | #[sql_type = "Timestamp"] 94 | ts: NaiveDateTime, 95 | } 96 | 97 | fn start_time_from_offset(offset_s: u64) -> NaiveDateTime { 98 | let utc_now = Utc::now(); 99 | let utc = utc_now 100 | .checked_sub_signed( 101 | chrono::Duration::from_std(Duration::from_secs(offset_s)) 102 | .unwrap_or(chrono::Duration::seconds(60)), 103 | ) 104 | .unwrap_or(utc_now); 105 | NaiveDateTime::from_timestamp_opt(utc.timestamp(), utc.timestamp_subsec_nanos()) 106 | .unwrap_or(NaiveDateTime::from_timestamp(60, 0)) 107 | } 108 | 109 | impl DbExecutor { 110 | fn get_logged_nodes(&self) -> Result, Error> { 111 | match self.with_connection(|conn| { 112 | use crate::schema::peer_connections::dsl::*; 113 | peer_connections 114 | .select(peer_id) 115 | .distinct() 116 | .load::>(conn) 117 | }) { 118 | Ok(Ok(v)) => { 119 | let r = v.into_iter().filter_map(|c| c).collect(); 120 | Ok(r) 121 | } 122 | Ok(Err(e)) => Err(e.into()), 123 | Err(e) => Err(e.into()), 124 | } 125 | } 126 | 127 | fn get_reputation_latest_all(&self, filters: Filters) -> Result, Error> { 128 | match self.with_connection(|conn| { 129 | let max_age_s = filters.max_age_s.unwrap_or_else(|| 60); 130 | let start_time = start_time_from_offset(max_age_s as u64); 131 | let sql = 132 | "SELECT \ 133 | DISTINCT ON (reporting_peer) \ 134 | peer_id as reporting_peer, \ 135 | array_agg(peers.key::varchar) as remote_peer, \ 136 | array_agg(jsonb_extract_path_text(peers.value, 'reputation')::bigint) as reputation, \ 137 | array_agg(jsonb_extract_path_text(peers.value, 'connected')::boolean) as connected, \ 138 | sl.created_at as ts \ 139 | FROM peer_connections pc \ 140 | INNER JOIN substrate_logs sl \ 141 | ON peer_connection_id = pc.id \ 142 | AND logs->>'msg' = 'system.network_state' \ 143 | AND sl.created_at > $1 AT TIME ZONE 'UTC', \ 144 | lateral jsonb_each(logs->'state'->'peerset'->'nodes') as peers \ 145 | WHERE sl.id = ANY (\ 146 | SELECT DISTINCT ON (peer_id) substrate_logs.id \ 147 | FROM substrate_logs \ 148 | INNER JOIN peer_connections ON peer_connection_id = peer_connections.id \ 149 | WHERE logs ->> 'msg' = 'system.network_state' \ 150 | AND substrate_logs.created_at > $2 AT TIME ZONE 'UTC' \ 151 | ORDER BY peer_id, substrate_logs.created_at DESC \ 152 | ) \ 153 | GROUP BY reporting_peer, sl.created_at \ 154 | LIMIT $3"; 155 | let query = sql_query(sql) 156 | .bind::(start_time) 157 | .bind::(start_time) 158 | .bind::(filters.limit.unwrap_or(100)); 159 | debug!( 160 | "get_reputation_all query: {}", 161 | diesel::debug_query::(&query) 162 | ); 163 | let result: QueryResult> = query.get_results(conn); 164 | result 165 | }) { 166 | Ok(Ok(v)) => Ok(v), 167 | Ok(Err(e)) => Err(e.into()), 168 | Err(e) => Err(e.into()), 169 | } 170 | } 171 | 172 | fn get_reputation_latest_logged( 173 | &self, 174 | selected: Vec, 175 | filters: Filters, 176 | ) -> Result, Error> { 177 | match self.with_connection(|conn| { 178 | let max_age_s = filters.max_age_s.unwrap_or_else(|| 60); 179 | let start_time = start_time_from_offset(max_age_s as u64); 180 | let sql = 181 | "SELECT \ 182 | DISTINCT ON (reporting_peer) \ 183 | peer_id as reporting_peer, \ 184 | array_agg(peers.key::varchar) as remote_peer, \ 185 | array_agg(jsonb_extract_path_text(peers.value, 'reputation')::bigint) as reputation, \ 186 | array_agg(jsonb_extract_path_text(peers.value, 'connected')::boolean) as connected, \ 187 | sl.created_at as ts \ 188 | FROM peer_connections pc \ 189 | INNER JOIN substrate_logs sl \ 190 | ON peer_connection_id = pc.id \ 191 | AND logs->>'msg' = 'system.network_state' \ 192 | AND sl.created_at > $1 AT TIME ZONE 'UTC', \ 193 | LATERAL jsonb_each(logs->'state'->'peerset'->'nodes') as peers \ 194 | WHERE key::text = ANY ($2) \ 195 | AND sl.id = ANY (\ 196 | SELECT DISTINCT ON (peer_id) substrate_logs.id \ 197 | FROM substrate_logs \ 198 | INNER JOIN peer_connections ON peer_connection_id = peer_connections.id \ 199 | WHERE logs ->> 'msg' = 'system.network_state' \ 200 | AND substrate_logs.created_at > $3 AT TIME ZONE 'UTC' \ 201 | ORDER BY peer_id, substrate_logs.created_at DESC \ 202 | ) \ 203 | GROUP BY reporting_peer, sl.created_at \ 204 | LIMIT $4"; 205 | let query = sql_query(sql) 206 | .bind::(start_time) 207 | .bind::, _>(selected) 208 | .bind::(start_time) 209 | .bind::(filters.limit.unwrap_or(100)); 210 | 211 | debug!( 212 | "get_reputation_selected query: {}", 213 | diesel::debug_query::(&query) 214 | ); 215 | let result: QueryResult> = query.get_results(conn); 216 | result 217 | }) { 218 | Ok(Ok(v)) => Ok(v), 219 | Ok(Err(e)) => Err(e.into()), 220 | Err(e) => Err(e.into()), 221 | } 222 | } 223 | 224 | fn get_peer_reputation( 225 | &self, 226 | selected: String, 227 | filters: Filters, 228 | ) -> Result, Error> { 229 | match self.with_connection(|conn| { 230 | let jsonb = format!("{{\"{}\": {{ }} }}", selected); 231 | let sql = " \ 232 | SELECT \ 233 | DISTINCT ON (reporting_peer,ts) \ 234 | peer_id as reporting_peer, \ 235 | jsonb_extract_path_text(peers.value, 'reputation')::bigint as reputation, \ 236 | jsonb_extract_path_text(peers.value, 'connected')::boolean as connected, \ 237 | sl.created_at as ts \ 238 | FROM peer_connections pc \ 239 | INNER JOIN substrate_logs sl \ 240 | ON peer_connection_id = pc.id, \ 241 | LATERAL jsonb_each(logs->'state'->'peerset'->'nodes') as peers \ 242 | WHERE logs->'state'->'peerset'->'nodes' @> ($1)::jsonb \ 243 | AND logs->>'msg' = 'system.network_state' \ 244 | AND sl.created_at > $2 AT TIME ZONE 'UTC' \ 245 | AND sl.created_at < $3 AT TIME ZONE 'UTC' \ 246 | GROUP BY reporting_peer, ts, reputation, connected \ 247 | LIMIT $4"; 248 | let query = 249 | sql_query(sql) 250 | .bind::(jsonb) 251 | .bind::( 252 | filters 253 | .start_time 254 | .unwrap_or_else(|| NaiveDateTime::from_timestamp(0, 0)), 255 | ) 256 | .bind::(filters.end_time.unwrap_or_else(|| { 257 | NaiveDateTime::from_timestamp(Utc::now().timestamp(), 0) 258 | })) 259 | .bind::(filters.limit.unwrap_or(1000)); 260 | debug!( 261 | "get_peers_reputation query: {}", 262 | diesel::debug_query::(&query) 263 | ); 264 | let result: QueryResult> = query.get_results(conn); 265 | result 266 | }) { 267 | Ok(Ok(v)) => Ok(v), 268 | Ok(Err(e)) => Err(e.into()), 269 | Err(e) => Err(e.into()), 270 | } 271 | } 272 | 273 | /// For front-end tests 274 | pub fn get_mock_results(&self, max: usize) -> Result, Error> { 275 | use rand::distributions::Distribution; 276 | use rand::Rng; 277 | use statrs::distribution::Exponential; 278 | let mut r = rand::thread_rng(); 279 | let n = Exponential::new(0.01).unwrap(); 280 | let mut results: Vec = Vec::new(); 281 | let mut peer_ids = vec![ 282 | "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV", 283 | "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdW", 284 | "QmWv9Ww7znzgLFyCzf21SR6tUKXrmHCZH9KhebeH4gyE9f", 285 | "QmWv9Ww7znzgLFyCzf21SR6tUKXrmHCZH9KhebeH4gyE9g", 286 | "QmTtcYKJho9vFmqtMA548QBSmLbmwAkBSiEKK3kWKfb6bJ", 287 | "QmTtcYKJho9vFmqtMA548QBSmLbmwAkBSiEKK3kWKfb6bK", 288 | "QmQJmDorK9c8KjMF5PdWiH2WGUXyzJtgTeJ55S5gggdju6", 289 | "QmQJmDorK9c8KjMF5PdWiH2WGUXyzJtgTeJ55S5gggdju7", 290 | ]; 291 | if max < peer_ids.len() { 292 | peer_ids.truncate(max); 293 | }; 294 | for peer_id in peer_ids.clone() { 295 | let mut p = vec![]; 296 | let mut c = vec![]; 297 | let mut re = vec![]; 298 | for peer_id2 in peer_ids.clone() { 299 | if peer_id == peer_id2 { 300 | continue; 301 | } 302 | let x = n.sample(&mut r) as i64; 303 | let y = r.gen_range(-1_000, 10_000); 304 | let rep = if y < 0 { -(x * y * y) } else { 0 }; 305 | p.push(peer_id2.to_string()); 306 | c.push(true); 307 | re.push(rep); 308 | } 309 | results.push(PeerReputations { 310 | reporting_peer: peer_id.to_string(), 311 | remote_peer: p, 312 | reputation: re, 313 | connected: c, 314 | ts: start_time_from_offset(10), 315 | }); 316 | } 317 | Ok(results) 318 | } 319 | } 320 | 321 | #[cfg(test)] 322 | mod tests { 323 | #[test] 324 | fn test() {} 325 | } 326 | -------------------------------------------------------------------------------- /src/db/stats.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use actix::prelude::*; 18 | use diesel::sql_types::*; 19 | use diesel::{result::QueryResult, RunQueryDsl}; 20 | use failure::Error; 21 | use serde_json::Value; 22 | 23 | use super::DbExecutor; 24 | 25 | /// Message to indicate what information is required 26 | /// Response is always json 27 | pub enum Query { 28 | Db, 29 | } 30 | 31 | impl Message for Query { 32 | type Result = Result; 33 | } 34 | 35 | impl Handler for DbExecutor { 36 | type Result = Result; 37 | 38 | fn handle(&mut self, msg: Query, _: &mut Self::Context) -> Self::Result { 39 | match msg { 40 | Query::Db => self.get_db_size(), 41 | } 42 | } 43 | } 44 | 45 | #[derive(Serialize, Deserialize, Debug, QueryableByName)] 46 | pub struct DbSize { 47 | #[sql_type = "Text"] 48 | relation: String, 49 | #[sql_type = "Text"] 50 | size: String, 51 | } 52 | 53 | impl DbExecutor { 54 | fn get_db_size(&self) -> Result { 55 | match self.with_connection(|conn| { 56 | let query = "SELECT nspname || '.' || relname AS relation, \ 57 | pg_size_pretty(pg_relation_size(C.oid)) AS size \ 58 | FROM pg_class C \ 59 | LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) \ 60 | WHERE nspname NOT IN ('pg_catalog', 'information_schema') \ 61 | ORDER BY pg_relation_size(C.oid) DESC \ 62 | LIMIT 1000;"; 63 | let result: QueryResult> = diesel::sql_query(query).get_results(conn); 64 | result 65 | }) { 66 | Ok(Ok(v)) => Ok(json!(v)), 67 | Ok(Err(e)) => Err(e.into()), 68 | Err(e) => Err(e.into()), 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | #[macro_use] 18 | extern crate log; 19 | extern crate env_logger; 20 | #[macro_use] 21 | extern crate diesel; 22 | #[macro_use] 23 | extern crate lazy_static; 24 | #[macro_use] 25 | extern crate serde_derive; 26 | #[macro_use] 27 | extern crate serde_json; 28 | 29 | pub mod cache; 30 | pub mod db; 31 | pub mod schema; 32 | pub mod util; 33 | mod web; 34 | 35 | use cache::Cache; 36 | 37 | use dotenv::dotenv; 38 | use std::env; 39 | use std::time::Duration; 40 | 41 | use crate::db::models::NewSubstrateLog; 42 | //use crate::db::peer_data::UpdateCache; 43 | use crate::db::*; 44 | use actix::prelude::*; 45 | use actix_web::{middleware, App, HttpServer}; 46 | 47 | lazy_static! { 48 | /// *Must be set* Database URL 49 | pub static ref DATABASE_URL: String = 50 | env::var("DATABASE_URL").expect("Must have DATABASE_URL environment variable set"); 51 | /// Port to start server 52 | pub static ref PORT: String = env::var("PORT").expect("Unable to get PORT from ENV"); 53 | /// Interval to send WS Ping 54 | pub static ref HEARTBEAT_INTERVAL: Duration = Duration::from_secs( 55 | parse_env("HEARTBEAT_INTERVAL").unwrap_or(5) 56 | ); 57 | /// How long to wait before dropping an unresponsive WS connection 58 | pub static ref CLIENT_TIMEOUT_S: Duration = Duration::from_secs( 59 | parse_env("CLIENT_TIMEOUT_S").unwrap_or(10) 60 | ); 61 | /// How often we clean out the DB of expired logs 62 | pub static ref PURGE_INTERVAL_S: Duration = Duration::from_secs( 63 | parse_env("PURGE_INTERVAL_S").unwrap_or(600) 64 | ); 65 | /// Number of hours to keep logs in DB before deleting them 66 | /// Does not affect logs received on the `/archive` route 67 | pub static ref LOG_EXPIRY_H: u32 = parse_env("LOG_EXPIRY_H").unwrap_or(3); 68 | /// Max number of pending connections to hold in backlog before returnin server error 69 | pub static ref MAX_PENDING_CONNECTIONS: i32 = parse_env("MAX_PENDING_CONNECTIONS").unwrap_or(1024); 70 | /// Max payload size for WS message 71 | /// Default to accept payload size of 512 MiB because default 65KiB is not enough 72 | pub static ref WS_MAX_PAYLOAD: usize = parse_env("WS_MAX_PAYLOAD").unwrap_or(524_288); 73 | /// Number of threads to start for DbExecutor 74 | pub static ref NUM_THREADS: usize = parse_env("NUM_THREADS").unwrap_or(num_cpus::get() * 3); 75 | /// Connections to establish for DB pool 76 | pub static ref DB_POOL_SIZE: u32 = parse_env("DB_POOL_SIZE").unwrap_or(*NUM_THREADS as u32); 77 | /// Max batch size before saving logs to DB 78 | pub static ref DB_BATCH_SIZE: usize = parse_env("DB_BATCH_SIZE").unwrap_or(1024); 79 | /// Max amount of time to wait before saving logs to DB 80 | pub static ref DB_SAVE_LATENCY_MS: Duration = Duration::from_millis(parse_env("DB_SAVE_LATENCY_MS").unwrap_or(100)); 81 | /// How long to wait for a response from the DB - warnings logged at this time and update aborted at 4* this value 82 | pub static ref CACHE_UPDATE_TIMEOUT_S: Duration = Duration::from_secs(parse_env("CACHE_UPDATE_TIMEOUT_S").unwrap_or(15)); 83 | /// How often to poll the DB for new messages 84 | pub static ref CACHE_UPDATE_INTERVAL_MS: Duration = Duration::from_millis(parse_env("CACHE_UPDATE_INTERVAL_MS").unwrap_or(1000)); 85 | /// Duration of history to store in cache 86 | pub static ref CACHE_EXPIRY_S: u64 = parse_env("CACHE_EXPIRY_S").unwrap_or(10_800); 87 | /// How long to keep an unused cache in memory until we drop it 88 | pub static ref CACHE_TIMEOUT_S: u64 = parse_env("CACHE_TIMEOUT_S").unwrap_or(60); 89 | /// Location of `static` directory 90 | pub static ref ASSETS_PATH: String = parse_env("ASSETS_PATH").unwrap_or("./static".to_string()); 91 | } 92 | 93 | struct LogBuffer { 94 | logs: Vec, 95 | db_arbiter: Recipient, 96 | } 97 | 98 | impl Actor for LogBuffer { 99 | type Context = Context; 100 | 101 | fn started(&mut self, ctx: &mut Self::Context) { 102 | ctx.set_mailbox_capacity(10_000); 103 | } 104 | } 105 | 106 | impl Message for NewSubstrateLog { 107 | type Result = Result<(), &'static str>; 108 | } 109 | 110 | impl Handler for LogBuffer { 111 | type Result = Result<(), &'static str>; 112 | 113 | fn handle(&mut self, msg: NewSubstrateLog, _: &mut Self::Context) -> Self::Result { 114 | self.logs.push(msg); 115 | Ok(()) 116 | } 117 | } 118 | 119 | #[derive(Clone)] 120 | struct SaveLogs; 121 | 122 | impl Message for SaveLogs { 123 | type Result = Result<(), &'static str>; 124 | } 125 | 126 | impl Handler for LogBuffer { 127 | type Result = Result<(), &'static str>; 128 | 129 | fn handle(&mut self, _msg: SaveLogs, _: &mut Self::Context) -> Self::Result { 130 | while !self.logs.is_empty() { 131 | let lb = LogBatch( 132 | self.logs 133 | .split_off(self.logs.len().saturating_sub(*DB_BATCH_SIZE)), 134 | ); 135 | self.db_arbiter 136 | .try_send(lb) 137 | .unwrap_or_else(|e| error!("Failed to send LogBatch to DB arbiter - {:?}", e)); 138 | } 139 | Ok(()) 140 | } 141 | } 142 | 143 | #[actix_rt::main] 144 | async fn main() -> std::io::Result<()> { 145 | dotenv().ok(); 146 | env_logger::init(); 147 | log_statics(); 148 | info!("Starting substrate-analytics"); 149 | info!("Creating database pool"); 150 | let pool = create_pool(); 151 | info!("Starting DbArbiter with {} threads", *NUM_THREADS); 152 | 153 | let db_arbiter = SyncArbiter::start(*NUM_THREADS, move || db::DbExecutor::new(pool.clone())); 154 | info!("DbExecutor started"); 155 | 156 | let cache = Cache::new(db_arbiter.clone()).start(); 157 | 158 | let log_buffer = LogBuffer { 159 | logs: Vec::new(), 160 | db_arbiter: db_arbiter.clone().recipient(), 161 | } 162 | .start(); 163 | 164 | util::PeriodicAction { 165 | interval: *PURGE_INTERVAL_S, 166 | message: PurgeLogs { 167 | hours_valid: *LOG_EXPIRY_H, 168 | }, 169 | recipient: db_arbiter.clone().recipient(), 170 | } 171 | .start(); 172 | 173 | util::PeriodicAction { 174 | interval: *DB_SAVE_LATENCY_MS, 175 | message: SaveLogs, 176 | recipient: log_buffer.clone().recipient(), 177 | } 178 | .start(); 179 | 180 | let metrics = web::metrics::Metrics::default(); 181 | let address = format!("0.0.0.0:{}", &*PORT); 182 | info!("Starting server on: {}", &address); 183 | HttpServer::new(move || { 184 | App::new() 185 | .data(db_arbiter.clone()) 186 | .data(metrics.clone()) 187 | .data(log_buffer.clone()) 188 | .data(cache.clone()) 189 | .data(actix_web::web::JsonConfig::default().limit(4096)) 190 | .wrap(middleware::NormalizePath) 191 | .wrap(middleware::Logger::default()) 192 | .configure(web::nodes::configure) 193 | .configure(web::reputation::configure) 194 | .configure(web::stats::configure) 195 | .configure(web::metrics::configure) 196 | .configure(web::benchmarks::configure) 197 | .configure(web::dashboard::configure) 198 | .configure(web::feed::configure) 199 | .configure(web::root::configure) 200 | }) 201 | .backlog(*MAX_PENDING_CONNECTIONS) 202 | .bind(&address)? 203 | .run() 204 | .await 205 | } 206 | 207 | // Private 208 | 209 | fn log_statics() { 210 | info!("DATABASE_URL has been set"); 211 | info!("PORT = {:?}", *PORT); 212 | info!("NUM_THREADS = {:?}", *NUM_THREADS); 213 | info!("HEARTBEAT_INTERVAL = {:?}", *HEARTBEAT_INTERVAL); 214 | info!("CLIENT_TIMEOUT_S = {:?}", *CLIENT_TIMEOUT_S); 215 | info!("MAX_PENDING_CONNECTIONS = {:?}", *MAX_PENDING_CONNECTIONS); 216 | info!("WS_MAX_PAYLOAD = {:?} bytes", *WS_MAX_PAYLOAD); 217 | info!("DB_POOL_SIZE = {:?}", *DB_POOL_SIZE); 218 | info!("DB_BATCH_SIZE = {:?}", *DB_BATCH_SIZE); 219 | info!("DB_SAVE_LATENCY_MS = {:?}", *DB_SAVE_LATENCY_MS); 220 | info!("PURGE_INTERVAL_S = {:?}", *PURGE_INTERVAL_S); 221 | info!("LOG_EXPIRY_H = {:?}", *LOG_EXPIRY_H); 222 | info!("CACHE_UPDATE_TIMEOUT_S = {:?}", *CACHE_UPDATE_TIMEOUT_S); 223 | info!("CACHE_UPDATE_INTERVAL_MS = {:?}", *CACHE_UPDATE_INTERVAL_MS); 224 | info!("CACHE_EXPIRY_S = {:?}", *CACHE_EXPIRY_S); 225 | info!("CACHE_TIMEOUT_S = {:?}", *CACHE_TIMEOUT_S); 226 | } 227 | 228 | fn parse_env(var: &'static str) -> Result 229 | where 230 | T: std::str::FromStr, 231 | { 232 | env::var(var) 233 | .map_err(|_| ()) 234 | .and_then(|v| v.parse().map_err(|_| ())) 235 | } 236 | -------------------------------------------------------------------------------- /src/schema.rs: -------------------------------------------------------------------------------- 1 | table! { 2 | benchmark_events (id) { 3 | id -> Int4, 4 | benchmark_id -> Int4, 5 | name -> Varchar, 6 | phase -> Varchar, 7 | created_at -> Timestamp, 8 | } 9 | } 10 | 11 | table! { 12 | benchmarks (id) { 13 | id -> Int4, 14 | setup -> Jsonb, 15 | created_at -> Timestamp, 16 | } 17 | } 18 | 19 | table! { 20 | host_systems (id) { 21 | id -> Int4, 22 | description -> Varchar, 23 | os -> Varchar, 24 | cpu_qty -> Int4, 25 | cpu_clock -> Int4, 26 | ram_mb -> Int4, 27 | disk_info -> Varchar, 28 | } 29 | } 30 | 31 | table! { 32 | peer_connections (id) { 33 | id -> Int4, 34 | ip_addr -> Varchar, 35 | peer_id -> Nullable, 36 | created_at -> Timestamp, 37 | audit -> Bool, 38 | name -> Nullable, 39 | chain -> Nullable, 40 | version -> Nullable, 41 | authority -> Nullable, 42 | startup_time -> Nullable, 43 | implementation -> Nullable, 44 | } 45 | } 46 | 47 | table! { 48 | substrate_logs (id) { 49 | id -> Int4, 50 | created_at -> Timestamp, 51 | logs -> Jsonb, 52 | peer_connection_id -> Int4, 53 | } 54 | } 55 | 56 | joinable!(benchmark_events -> benchmarks (benchmark_id)); 57 | joinable!(substrate_logs -> peer_connections (peer_connection_id)); 58 | 59 | allow_tables_to_appear_in_same_query!( 60 | benchmark_events, 61 | benchmarks, 62 | host_systems, 63 | peer_connections, 64 | substrate_logs, 65 | ); 66 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use actix::prelude::*; 18 | use std::time::Duration; 19 | 20 | pub struct PeriodicAction> + Send> { 21 | pub interval: Duration, 22 | pub message: M, 23 | pub recipient: Recipient, 24 | } 25 | 26 | impl> + Clone + Send + 'static + Unpin> Actor 27 | for PeriodicAction 28 | { 29 | type Context = Context; 30 | 31 | fn started(&mut self, ctx: &mut Context) { 32 | ctx.run_interval(self.interval, |act, _ctx| { 33 | if let Err(e) = act.recipient.try_send(act.message.clone()) { 34 | error!("PeriodicAction: unable to send message to recipient. {}", e); 35 | } 36 | }); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/web/benchmarks.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use super::get_filters; 18 | use super::metrics::Metrics; 19 | use crate::db::{benchmarks::*, models::*, DbExecutor}; 20 | use actix::prelude::*; 21 | use actix_web::{HttpRequest, HttpResponse}; 22 | 23 | pub fn configure(cfg: &mut actix_web::web::ServiceConfig) { 24 | cfg.service( 25 | actix_web::web::scope("/benchmarks/") 26 | .route("/example/", actix_web::web::get().to(example)) 27 | .route("/events/", actix_web::web::post().to(new_event)) 28 | .route( 29 | "/{benchmark_id}/targets/", 30 | actix_web::web::get().to(targets), 31 | ) 32 | .route("/{benchmark_id}/events/", actix_web::web::get().to(events)) 33 | .route("", actix_web::web::get().to(all)) 34 | .route("", actix_web::web::post().to(new)), 35 | ); 36 | } 37 | 38 | async fn all( 39 | req: HttpRequest, 40 | db: actix_web::web::Data>, 41 | metrics: actix_web::web::Data, 42 | ) -> Result { 43 | metrics.inc_req_count(); 44 | let filters = get_filters(&req); 45 | let res = db.send(Query::All(filters)).await?; 46 | match res { 47 | Ok(r) => Ok(HttpResponse::Ok().json(r)), 48 | Err(e) => { 49 | error!("Could not complete benchmarks query: {:?}", e); 50 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 51 | } 52 | } 53 | } 54 | 55 | async fn targets( 56 | req: HttpRequest, 57 | db: actix_web::web::Data>, 58 | metrics: actix_web::web::Data, 59 | ) -> Result { 60 | metrics.inc_req_count(); 61 | let benchmark_id: i32 = req 62 | .match_info() 63 | .get("benchmark_id") 64 | .expect("benchmark_id should be available because the route matched") 65 | .to_string() 66 | .parse() 67 | .unwrap_or(0); 68 | let res = db.send(Query::Targets(benchmark_id)).await?; 69 | match res { 70 | Ok(r) => Ok(HttpResponse::Ok().json(r)), 71 | Err(e) => { 72 | error!("Could not complete benchmarks query: {:?}", e); 73 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 74 | } 75 | } 76 | } 77 | 78 | async fn events( 79 | req: HttpRequest, 80 | db: actix_web::web::Data>, 81 | metrics: actix_web::web::Data, 82 | ) -> Result { 83 | metrics.inc_req_count(); 84 | let benchmark_id: i32 = req 85 | .match_info() 86 | .get("benchmark_id") 87 | .expect("benchmark_id should be available because the route matched") 88 | .to_string() 89 | .parse() 90 | .unwrap_or(0); 91 | let res = db.send(Query::Events(benchmark_id)).await?; 92 | match res { 93 | Ok(r) => Ok(HttpResponse::Ok().json(r)), 94 | Err(e) => { 95 | error!("Could not complete benchmarks query: {:?}", e); 96 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 97 | } 98 | } 99 | } 100 | 101 | // TODO move to generic Profiling module 102 | //async fn timings( 103 | // req: HttpRequest, 104 | // db: actix_web::web::Data>, 105 | // metrics: actix_web::web::Data, 106 | //) -> Result { 107 | // metrics.inc_req_count(); 108 | // let event_id: i32 = req 109 | // .match_info() 110 | // .get("event_id") 111 | // .expect("event_id should be available because the route matched") 112 | // .to_string() 113 | // .parse() 114 | // .unwrap_or(0); 115 | // let filters = get_filters(&req); 116 | // let res = db.send(Query::Events(benchmark_id)).await?; 117 | // match res { 118 | // Ok(r) => Ok(HttpResponse::Ok().json(r)), 119 | // Err(e) => { 120 | // error!("Could not complete benchmarks query: {:?}", e); 121 | // Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 122 | // } 123 | // } 124 | //} 125 | 126 | async fn new( 127 | item: actix_web::web::Json, 128 | db: actix_web::web::Data>, 129 | metrics: actix_web::web::Data, 130 | ) -> Result { 131 | metrics.inc_req_count(); 132 | let res = db.send(item.into_inner()).await?; 133 | match res { 134 | Ok(r) => Ok(HttpResponse::Ok().json(r)), 135 | Err(e) => { 136 | error!("Could not create New Benchmark: {:?}", e); 137 | Ok(HttpResponse::InternalServerError().json(json!({"error": e.to_string()}))) 138 | } 139 | } 140 | } 141 | 142 | async fn new_event( 143 | item: actix_web::web::Json, 144 | db: actix_web::web::Data>, 145 | metrics: actix_web::web::Data, 146 | ) -> Result { 147 | metrics.inc_req_count(); 148 | let res = db.send(item.into_inner()).await?; 149 | match res { 150 | Ok(r) => Ok(HttpResponse::Ok().json(r)), 151 | Err(e) => { 152 | error!("Could not create New Benchmark: {:?}", e); 153 | Ok(HttpResponse::InternalServerError().json(json!({"error": e.to_string()}))) 154 | } 155 | } 156 | } 157 | 158 | async fn example(metrics: actix_web::web::Data) -> Result { 159 | metrics.inc_req_count(); 160 | Ok(actix_web::web::HttpResponse::Ok().finish()) 161 | } 162 | -------------------------------------------------------------------------------- /src/web/dashboard.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use crate::ASSETS_PATH; 18 | use actix_files::Files; 19 | 20 | pub fn configure(cfg: &mut actix_web::web::ServiceConfig) { 21 | cfg.service( 22 | Files::new( 23 | "dashboard/benchmarks", 24 | &format!("{}/benchmarks/", *ASSETS_PATH), 25 | ) 26 | .index_file("index.html"), 27 | ); 28 | cfg.service( 29 | Files::new( 30 | "dashboard/profiling", 31 | &format!("{}/profiling/", *ASSETS_PATH), 32 | ) 33 | .index_file("index.html"), 34 | ); 35 | cfg.service( 36 | Files::new( 37 | "dashboard/reputation", 38 | &format!("{}/reputation/", *ASSETS_PATH), 39 | ) 40 | .index_file("index.html"), 41 | ); 42 | } 43 | -------------------------------------------------------------------------------- /src/web/feed.rs: -------------------------------------------------------------------------------- 1 | use crate::cache::{Cache, Interest, Subscription}; 2 | use crate::db::peer_data::{PeerDataArray, PeerMessage, SubstrateLog}; 3 | use crate::web::metrics::Metrics; 4 | use actix::prelude::*; 5 | use actix_web::{web, web::Data, Error, HttpRequest, HttpResponse}; 6 | use actix_web_actors::ws; 7 | use chrono::NaiveDateTime; 8 | use serde_json::Value; 9 | use std::collections::{HashMap, VecDeque}; 10 | use std::convert::TryFrom; 11 | use std::convert::TryInto; 12 | use std::time::{Duration, Instant}; 13 | 14 | const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5); 15 | const CLIENT_TIMEOUT_S: Duration = Duration::from_secs(60); 16 | 17 | pub fn configure(cfg: &mut actix_web::web::ServiceConfig) { 18 | cfg.service(actix_web::web::scope("/feed/").route("", actix_web::web::get().to(ws_index))); 19 | } 20 | 21 | async fn ws_index( 22 | r: HttpRequest, 23 | stream: web::Payload, 24 | cache: Data>, 25 | metrics: actix_web::web::Data, 26 | ) -> Result { 27 | ws::start(WebSocket::new(cache, metrics), &r, stream) 28 | } 29 | 30 | struct WebSocket { 31 | hb: Instant, 32 | cache: Data>, 33 | metrics: actix_web::web::Data, 34 | aggregate_subscriptions: HashMap, 35 | } 36 | 37 | impl Drop for WebSocket { 38 | fn drop(&mut self) { 39 | self.metrics.dec_concurrent_feed_count(); 40 | } 41 | // info("Dropped client feed connection, mailbox backlog = {}", ); 42 | } 43 | 44 | impl Handler for WebSocket { 45 | type Result = Result<(), &'static str>; 46 | 47 | fn handle(&mut self, msg: PeerDataArray, ctx: &mut Self::Context) -> Self::Result { 48 | match self.aggregate_subscriptions.get_mut(&msg.peer_message) { 49 | Some(subs) => { 50 | subs.aggregate_remainder.append(&mut msg.data.into()); 51 | let mut aggregates: Vec<(NaiveDateTime, HashMap<(String, String), Vec>)> = 52 | Vec::new(); 53 | let mut accum: HashMap<(String, String), Vec> = HashMap::new(); 54 | let mut last_index = 0usize; 55 | let mut start_ts = subs 56 | .aggregate_remainder 57 | .get(0) 58 | .expect( 59 | "Shouldn't be empty because we don't send to Subscribers if array is empty", 60 | ) 61 | .created_at; 62 | debug!("Aggregate interval start_ts initialized to: {:?}", start_ts); 63 | for (idx, log) in subs.aggregate_remainder.iter().enumerate() { 64 | if log.created_at.timestamp() 65 | > (start_ts 66 | .checked_add_signed( 67 | chrono::Duration::from_std(subs.aggregate.update_interval) 68 | .expect("Shouldn't be out of range"), 69 | ) 70 | .expect("Shouldn't overflow")) 71 | .timestamp() 72 | { 73 | // Increment time, store accum in aggregates, store index we last used 74 | let interval_dur = 75 | chrono::Duration::from_std(subs.aggregate.update_interval) 76 | .expect("Shouldn't be out of range"); 77 | while start_ts 78 | .checked_add_signed(interval_dur) 79 | .expect("Shouldn't overflow") 80 | < log.created_at 81 | { 82 | start_ts = start_ts 83 | .checked_add_signed(interval_dur) 84 | .expect("Shouldn't overflow"); 85 | } 86 | debug!( 87 | "Aggregate interval start_ts incremented to = {:?}", 88 | start_ts 89 | ); 90 | aggregates.push((start_ts, std::mem::replace(&mut accum, HashMap::new()))); 91 | last_index = idx; 92 | } else { 93 | let l_time = log.log["time"].as_str(); 94 | let l_target = log.log["target"].as_str(); 95 | let l_name = log.log["name"].as_str(); 96 | if let (Some(Some(time)), Some(target), Some(name)) = 97 | (l_time.map(|x| x.parse::().ok()), l_target, l_name) 98 | { 99 | accum 100 | .entry((target.to_string(), name.to_string())) 101 | .or_insert(Vec::new()) 102 | .push(time); 103 | } else { 104 | debug!( 105 | "Unable to parse `time`, `target` or `name` from log: {:?}", 106 | log.log 107 | ) 108 | } 109 | } 110 | } 111 | // remove measurements that have been aggregated 112 | subs.aggregate_remainder = subs.aggregate_remainder.split_off(last_index); 113 | let mut results = Vec::new(); 114 | // Go through each aggregate interval 115 | for (ndt, aggregate_map) in aggregates { 116 | // Go through each target/name partitioned array of measurements for this interval 117 | trace!( 118 | "Aggregate time: {:?}, aggregate map len = {}", 119 | ndt, 120 | aggregate_map.len() 121 | ); 122 | for ((target, name), mut measurements) in aggregate_map { 123 | trace!( 124 | "In target: {} name: {} - measurements.len = {}", 125 | target, 126 | name, 127 | measurements.len() 128 | ); 129 | let time: i64; 130 | match subs.aggregate.aggregate_type { 131 | AggregateType::Mean => { 132 | time = measurements.iter().sum::() / measurements.len() as i64 133 | } 134 | AggregateType::Median => { 135 | measurements.sort(); 136 | time = measurements[(measurements.len() as f64 * 0.5) as usize] 137 | } 138 | AggregateType::Min => { 139 | time = measurements 140 | .iter() 141 | .min() 142 | .expect("Iterator should not be empty") 143 | .to_owned() 144 | } 145 | AggregateType::Max => { 146 | time = measurements 147 | .iter() 148 | .max() 149 | .expect("Iterator should not be empty") 150 | .to_owned() 151 | } 152 | AggregateType::Percentile90 => { 153 | measurements.sort(); 154 | time = measurements[(measurements.len() as f64 * 0.9) as usize] 155 | } 156 | } 157 | let agm = AggregateMeasurement { 158 | time: time.clone(), 159 | name: name.clone(), 160 | target: target.clone(), 161 | values: "".to_string(), 162 | created_at: ndt.clone(), 163 | }; 164 | results.push(agm); 165 | } 166 | } 167 | if results.is_empty() { 168 | return Ok(()); 169 | } 170 | let message = AggregateDataMessage { 171 | peer_message: PeerMessage { 172 | peer_id: msg.peer_message.peer_id, 173 | msg: msg.peer_message.msg, 174 | }, 175 | data: results, 176 | }; 177 | ctx.text(json!(message).to_string()) 178 | } 179 | None => { 180 | trace!("No match for aggregate subscription in PeerDataResponse"); 181 | ctx.text(json!(msg).to_string()) 182 | } 183 | } 184 | Ok(()) 185 | } 186 | } 187 | 188 | #[derive(Serialize, Debug)] 189 | struct AggregateMeasurement { 190 | time: i64, 191 | name: String, 192 | target: String, 193 | values: String, 194 | created_at: NaiveDateTime, 195 | } 196 | 197 | #[derive(Serialize, Debug)] 198 | struct AggregateDataMessage { 199 | peer_message: PeerMessage, 200 | data: Vec, 201 | } 202 | 203 | impl Actor for WebSocket { 204 | type Context = ws::WebsocketContext; 205 | 206 | // Start heartbeat and updates on new connection 207 | fn started(&mut self, ctx: &mut Self::Context) { 208 | // Ensure we can keep sufficient backlog to survive a temp disconnect 209 | ctx.set_mailbox_capacity(64); 210 | self.hb(ctx); 211 | self.metrics.inc_concurrent_feed_count(); 212 | } 213 | } 214 | 215 | /// Handler for `ws::Message` 216 | impl StreamHandler> for WebSocket { 217 | fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { 218 | match msg { 219 | Ok(ws::Message::Ping(msg)) => { 220 | self.hb = Instant::now(); 221 | ctx.pong(&msg); 222 | } 223 | Ok(ws::Message::Pong(_)) => { 224 | self.hb = Instant::now(); 225 | } 226 | Ok(ws::Message::Text(text)) => { 227 | if let Err(e) = self.process_message(text, ctx) { 228 | trace!("Unable to decode message: {}", e); 229 | ctx.text(json!({ "error": e }).to_string()); 230 | } 231 | } 232 | Ok(ws::Message::Binary(_bin)) => (), 233 | Ok(ws::Message::Close(_)) => { 234 | ctx.stop(); 235 | } 236 | _ => ctx.stop(), 237 | } 238 | } 239 | } 240 | 241 | impl WebSocket { 242 | fn new(cache: Data>, metrics: actix_web::web::Data) -> Self { 243 | Self { 244 | hb: Instant::now(), 245 | cache, 246 | metrics, 247 | aggregate_subscriptions: HashMap::new(), 248 | } 249 | } 250 | 251 | fn process_message( 252 | &mut self, 253 | text: String, 254 | ctx: &mut ::Context, 255 | ) -> Result<(), &'static str> { 256 | if let Ok(j) = serde_json::from_str::(&text) { 257 | let peer_id: String = j["peer_id"] 258 | .as_str() 259 | .ok_or("`peer_id` not found")? 260 | .to_owned(); 261 | let msg = j["msg"].as_str().ok_or("`msg` not found")?.to_owned(); 262 | let mut start_time: Option = None; 263 | let interest = match j["interest"].as_str().ok_or("`interest` not found")? { 264 | "subscribe" => { 265 | start_time = Some( 266 | j["start_time"] 267 | .as_str() 268 | .ok_or("`start_time` not found")? 269 | .parse::() 270 | .map_err(|_| "unable to parse `start_time`")?, 271 | ); 272 | Interest::Subscribe 273 | } 274 | "unsubscribe" => Interest::Unsubscribe, 275 | _ => return Err("`interest` must be either `subscribe` or `unsubscribe`"), 276 | }; 277 | let subscription = Subscription { 278 | peer_id, 279 | msg, 280 | subscriber_addr: ctx.address().recipient(), 281 | start_time, 282 | interest, 283 | }; 284 | 285 | self.handle_aggregate_subscription(&subscription, j)?; 286 | 287 | match self.cache.try_send(subscription) { 288 | Ok(_) => debug!("Sent subscription"), 289 | Err(e) => { 290 | error!("Could not send subscription due to: {:?}", e); 291 | return Err("Internal server error"); 292 | } 293 | } 294 | } 295 | Ok(()) 296 | } 297 | 298 | fn handle_aggregate_subscription( 299 | &mut self, 300 | subscription: &Subscription, 301 | json: Value, 302 | ) -> Result<(), &'static str> { 303 | let peer_message = PeerMessage { 304 | peer_id: subscription.peer_id.to_owned(), 305 | msg: subscription.msg.to_owned(), 306 | }; 307 | if subscription.interest == Interest::Unsubscribe { 308 | self.aggregate_subscriptions.remove(&peer_message); 309 | return Ok(()); 310 | } 311 | if json.get("aggregate_type").is_none() { 312 | return Ok(()); 313 | } 314 | let aggregate_type = json["aggregate_type"].as_str(); 315 | if let Some(a_type) = aggregate_type { 316 | if a_type.len() == 0 { 317 | return Ok(()); 318 | } 319 | let aggregate_interval = json["aggregate_interval"].as_u64(); 320 | if let Some(a_interval) = aggregate_interval { 321 | let aggregate_type: AggregateType = a_type.try_into()?; 322 | let aggregate = Aggregate { 323 | aggregate_type, 324 | key: "time".to_string(), 325 | update_interval: Duration::from_secs(a_interval), 326 | }; 327 | let aggregate_subscription = AggregateSubscription { 328 | subscription: subscription.to_owned(), 329 | aggregate, 330 | aggregate_remainder: Default::default(), 331 | }; 332 | if let Some(mut existing_sub) = self 333 | .aggregate_subscriptions 334 | .insert(peer_message, aggregate_subscription) 335 | { 336 | existing_sub.subscription.interest = Interest::Unsubscribe; 337 | match self.cache.try_send(existing_sub.subscription) { 338 | Ok(_) => debug!("Sent unsubscribe for existing aggregate subscription"), 339 | Err(e) => { 340 | error!("Could not send unsubscribe due to: {:?}", e); 341 | return Err("Internal server error"); 342 | } 343 | } 344 | } 345 | } else { 346 | return Err("Unable to parse `aggregate_type` or `aggregate_interval`"); 347 | } 348 | } 349 | Ok(()) 350 | } 351 | 352 | fn hb(&self, ctx: &mut ::Context) { 353 | ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| { 354 | if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT_S { 355 | info!("Websocket Client heartbeat failed, disconnecting!"); 356 | ctx.stop(); 357 | return; 358 | } 359 | ctx.ping(b""); 360 | }); 361 | } 362 | } 363 | 364 | #[derive(Hash, Debug, Clone)] 365 | pub enum AggregateType { 366 | Mean, 367 | Median, 368 | Min, 369 | Max, 370 | Percentile90, 371 | } 372 | 373 | impl TryFrom<&str> for AggregateType { 374 | type Error = &'static str; 375 | 376 | fn try_from(value: &str) -> Result { 377 | match value.to_lowercase().as_str() { 378 | "mean" => Ok(AggregateType::Mean), 379 | "median" => Ok(AggregateType::Median), 380 | "min" => Ok(AggregateType::Min), 381 | "max" => Ok(AggregateType::Max), 382 | "percentile90" => Ok(AggregateType::Percentile90), 383 | _ => Err("Unable to parse AggregateType"), 384 | } 385 | } 386 | } 387 | 388 | #[derive(Hash, Debug, Clone)] 389 | pub struct Aggregate { 390 | pub aggregate_type: AggregateType, 391 | pub key: String, 392 | pub update_interval: Duration, 393 | } 394 | 395 | #[derive(Debug)] 396 | struct AggregateSubscription { 397 | subscription: Subscription, 398 | aggregate: Aggregate, 399 | aggregate_remainder: VecDeque, 400 | } 401 | -------------------------------------------------------------------------------- /src/web/metrics.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use actix_web::{http::StatusCode, HttpRequest, HttpResponse, Result as AWResult}; 18 | use std::fmt; 19 | use std::sync::atomic::{AtomicU64, Ordering}; 20 | use std::sync::Arc; 21 | use sysinfo::{NetworkExt, NetworksExt, System, SystemExt}; 22 | 23 | #[derive(Clone, Default)] 24 | pub struct Metrics { 25 | ws_message_count: Arc, 26 | ws_connected_count: Arc, 27 | ws_dropped_count: Arc, 28 | ws_bytes_received: Arc, 29 | req_count: Arc, 30 | feeds_connected: Arc, 31 | feeds_disconnected: Arc, 32 | system: Arc, 33 | } 34 | 35 | impl Metrics { 36 | pub fn inc_ws_message_count(&self) { 37 | self.ws_message_count.fetch_add(1, Ordering::Relaxed); 38 | } 39 | pub fn inc_ws_connected_count(&self) { 40 | self.ws_connected_count.fetch_add(1, Ordering::Relaxed); 41 | } 42 | pub fn inc_ws_dropped_count(&self) { 43 | self.ws_dropped_count.fetch_add(1, Ordering::Relaxed); 44 | } 45 | pub fn inc_ws_bytes_received(&self, n: u64) { 46 | self.ws_bytes_received.fetch_add(n, Ordering::Relaxed); 47 | } 48 | pub fn inc_req_count(&self) { 49 | self.req_count.fetch_add(1, Ordering::Relaxed); 50 | } 51 | pub fn inc_concurrent_feed_count(&self) { 52 | self.feeds_connected.fetch_add(1, Ordering::Relaxed); 53 | } 54 | pub fn dec_concurrent_feed_count(&self) { 55 | self.feeds_disconnected.fetch_add(1, Ordering::Relaxed); 56 | } 57 | 58 | fn bytes_io(&self, sys: &System) -> (u64, u64) { 59 | let mut total_sent = 0; 60 | let mut total_rec = 0; 61 | for (_, data) in sys.get_networks().iter() { 62 | total_sent += data.get_total_transmitted(); 63 | total_rec += data.get_total_received(); 64 | } 65 | (total_sent, total_rec) 66 | } 67 | } 68 | 69 | const WS_MESSAGE_COUNT_TEMPLATE: &str = 70 | "# HELP substrate_message_count Number of binary and text messages received - (does not include PING/PONG messages)\n\ 71 | # TYPE substrate_message_count counter\n\ 72 | substrate_message_count "; 73 | 74 | const WS_CONNECTED_COUNT_TEMPLATE: &str = 75 | "# HELP nodes_connected_count Total number of WS connections made since launch.\n\ 76 | # TYPE nodes_connected_count counter\n\ 77 | nodes_connected_count "; 78 | 79 | const WS_DROPPED_COUNT_TEMPLATE: &str = 80 | "# HELP nodes_dropped_count Total number of WS connections dropped since launch.\n\ 81 | # TYPE nodes_dropped_count counter\n\ 82 | nodes_dropped_count "; 83 | 84 | const CURRENT_SUBSTRATE_CONNECTIONS_TEMPLATE: &str = 85 | "# HELP current_substrate_connections Number of WS substrate connections sending data.\n\ 86 | # TYPE current_substrate_connections gauge\n\ 87 | current_substrate_connections "; 88 | 89 | const SUBSTRATE_BYTES_RECEIVED_TEMPLATE: &str = 90 | "# HELP substrate_bytes_received Total bytes received in binary and text WS messages from substrate clients.\n\ 91 | # TYPE substrate_bytes_received counter\n\ 92 | substrate_bytes_received "; 93 | 94 | const BYTES_RECEIVED_TEMPLATE: &str = 95 | "# HELP bytes_received Total bytes received in binary and text WS messages.\n\ 96 | # TYPE bytes_received counter\n\ 97 | bytes_received "; 98 | 99 | const BYTES_SENT_TEMPLATE: &str = 100 | "# HELP bytes_sent Total bytes received in binary and text WS messages.\n\ 101 | # TYPE bytes_sent counter\n\ 102 | bytes_sent "; 103 | 104 | const REQ_COUNT_TEMPLATE: &str = 105 | "# HELP requests Number of get requests to non WS routes, also excluding metrics route.\n\ 106 | # TYPE requests counter\n\ 107 | requests "; 108 | 109 | const CURRENT_FEED_COUNT_TEMPLATE: &str = 110 | "# HELP current_feed_connections Number of WS feed connections consuming data.\n\ 111 | # TYPE current_feed_connections gauge\n\ 112 | current_feed_connections "; 113 | 114 | const FEEDS_CONNECTED_TEMPLATE: &str = 115 | "# HELP feeds_connected Number of WS connections for live feed\n\ 116 | # TYPE feeds_connected counter\n\ 117 | feeds_connected "; 118 | 119 | const FEEDS_DROPPED_TEMPLATE: &str = 120 | "# HELP feeds_disconnected Number of WS disconnections for live feed\n\ 121 | # TYPE feeds_disconnected counter\n\ 122 | feeds_disconnected "; 123 | 124 | const LOAD_AVG_ONE_TEMPLATE: &str = "# HELP load_avg_one System load average one minute\n\ 125 | # TYPE load_avg_one gauge\n\ 126 | load_avg_one "; 127 | 128 | const LOAD_AVG_FIVE_TEMPLATE: &str = "# HELP load_avg_five System load average five minutes\n\ 129 | # TYPE load_avg_five gauge\n\ 130 | load_avg_five "; 131 | 132 | const LOAD_AVG_FIFTEEN_TEMPLATE: &str = 133 | "# HELP load_avg_fifteen System load average fifteen minutes\n\ 134 | # TYPE load_avg_fifteen gauge\n\ 135 | load_avg_fifteen "; 136 | 137 | const TOTAL_MEM_TEMPLATE: &str = "# HELP total_mem Total system RAM (KiB)\n\ 138 | # TYPE total_mem gauge\n\ 139 | total_mem "; 140 | 141 | const USED_MEM_TEMPLATE: &str = "# HELP used_mem Used system RAM (KiB)\n\ 142 | # TYPE used_mem gauge\n\ 143 | used_mem "; 144 | 145 | const TOTAL_SWAP_TEMPLATE: &str = "# HELP total_swap Total system swap (KiB)\n\ 146 | # TYPE total_swap gauge\n\ 147 | total_swap "; 148 | 149 | const USED_SWAP_TEMPLATE: &str = "# HELP used_swap Used swap (KiB)\n\ 150 | # TYPE used_swap gauge\n\ 151 | used_swap "; 152 | 153 | impl fmt::Display for Metrics { 154 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 155 | let sys = System::new_all(); 156 | let (total_sent, total_rec) = self.bytes_io(&sys); 157 | let load_avg = sys.get_load_average(); 158 | write!( 159 | f, 160 | "{}{}\n\ 161 | {}{}\n\ 162 | {}{}\n\ 163 | {}{}\n\ 164 | {}{}\n\ 165 | {}{}\n\ 166 | {}{}\n\ 167 | {}{}\n\ 168 | {}{}\n\ 169 | {}{}\n\ 170 | {}{}\n\ 171 | {}{}\n\ 172 | {}{}\n\ 173 | {}{}\n\ 174 | {}{}\n\ 175 | {}{}\n\ 176 | {}{}\n\ 177 | {}{}\n", 178 | WS_MESSAGE_COUNT_TEMPLATE, 179 | self.ws_message_count.load(Ordering::Relaxed), 180 | WS_CONNECTED_COUNT_TEMPLATE, 181 | self.ws_connected_count.load(Ordering::Relaxed), 182 | WS_DROPPED_COUNT_TEMPLATE, 183 | self.ws_dropped_count.load(Ordering::Relaxed), 184 | CURRENT_SUBSTRATE_CONNECTIONS_TEMPLATE, 185 | self.ws_connected_count.load(Ordering::Relaxed) 186 | - self.ws_dropped_count.load(Ordering::Relaxed), 187 | SUBSTRATE_BYTES_RECEIVED_TEMPLATE, 188 | self.ws_bytes_received.load(Ordering::Relaxed), 189 | BYTES_RECEIVED_TEMPLATE, 190 | total_rec, 191 | BYTES_SENT_TEMPLATE, 192 | total_sent, 193 | REQ_COUNT_TEMPLATE, 194 | self.req_count.load(Ordering::Relaxed), 195 | CURRENT_FEED_COUNT_TEMPLATE, 196 | self.feeds_connected.load(Ordering::Relaxed) 197 | - self.feeds_disconnected.load(Ordering::Relaxed), 198 | FEEDS_CONNECTED_TEMPLATE, 199 | self.feeds_connected.load(Ordering::Relaxed), 200 | FEEDS_DROPPED_TEMPLATE, 201 | self.feeds_disconnected.load(Ordering::Relaxed), 202 | LOAD_AVG_ONE_TEMPLATE, 203 | load_avg.one, 204 | LOAD_AVG_FIVE_TEMPLATE, 205 | load_avg.five, 206 | LOAD_AVG_FIFTEEN_TEMPLATE, 207 | load_avg.fifteen, 208 | TOTAL_MEM_TEMPLATE, 209 | sys.get_total_memory(), 210 | USED_MEM_TEMPLATE, 211 | sys.get_used_memory(), 212 | TOTAL_SWAP_TEMPLATE, 213 | sys.get_total_swap(), 214 | USED_SWAP_TEMPLATE, 215 | sys.get_used_swap(), 216 | ) 217 | } 218 | } 219 | 220 | pub fn configure(cfg: &mut actix_web::web::ServiceConfig) { 221 | cfg.service(actix_web::web::scope("/metrics/").route("", actix_web::web::get().to(root))); 222 | } 223 | 224 | async fn root( 225 | _r: HttpRequest, 226 | metrics: actix_web::web::Data, 227 | ) -> AWResult { 228 | Ok(HttpResponse::build(StatusCode::OK) 229 | .content_type("text/plain; version=0.0.4; charset=utf-8") 230 | .body(metrics.to_string())) 231 | } 232 | -------------------------------------------------------------------------------- /src/web/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | pub mod benchmarks; 18 | pub mod dashboard; 19 | pub mod feed; 20 | pub mod metrics; 21 | pub mod nodes; 22 | pub mod reputation; 23 | pub mod root; 24 | pub mod stats; 25 | 26 | use crate::db::filters::Filters; 27 | 28 | pub fn get_filters(req: &actix_web::HttpRequest) -> Filters { 29 | match actix_web::web::Query::::from_query(&req.query_string()) { 30 | Ok(f) => f.clone(), 31 | Err(_) => { 32 | warn!("Error deserializing Filters from querystring"); 33 | Filters::default() 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/web/nodes.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use super::get_filters; 18 | use super::metrics::Metrics; 19 | use crate::db::nodes::{LogsQuery, NodesQuery, StatsQuery}; 20 | use crate::db::*; 21 | use actix::prelude::*; 22 | use actix_web::{HttpRequest, HttpResponse}; 23 | 24 | pub fn configure(cfg: &mut actix_web::web::ServiceConfig) { 25 | cfg.service( 26 | actix_web::web::scope("/nodes/") 27 | // .route( 28 | // "/{peer_id}/peer_counts", 29 | // actix_web::web::get().to(peer_counts), 30 | // ) 31 | .route("/logs/", actix_web::web::get().to(logs)) 32 | .route("/log_stats/", actix_web::web::get().to(log_stats)) 33 | .route("", actix_web::web::get().to(all_nodes)), 34 | ); 35 | } 36 | 37 | async fn all_nodes( 38 | req: HttpRequest, 39 | db: actix_web::web::Data>, 40 | metrics: actix_web::web::Data, 41 | ) -> Result { 42 | metrics.inc_req_count(); 43 | let filters = get_filters(&req); 44 | match db.send(NodesQuery(filters)).await? { 45 | Ok(r) => Ok(HttpResponse::Ok().json(json!(r))), 46 | Err(e) => { 47 | error!("Could not complete all_nodes query: {}", e); 48 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 49 | } 50 | } 51 | } 52 | 53 | async fn log_stats( 54 | req: HttpRequest, 55 | db: actix_web::web::Data>, 56 | metrics: actix_web::web::Data, 57 | ) -> Result { 58 | metrics.inc_req_count(); 59 | let filters = get_filters(&req); 60 | match db.send(StatsQuery(filters)).await? { 61 | Ok(r) => Ok(HttpResponse::Ok().json(json!(r))), 62 | Err(e) => { 63 | error!("Could not complete log_stats query: {}", e); 64 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 65 | } 66 | } 67 | } 68 | 69 | //async fn peer_counts( 70 | // req: HttpRequest, 71 | // db: actix_web::web::Data>, 72 | // metrics: actix_web::web::Data, 73 | //) -> Result { 74 | // metrics.inc_req_count(); 75 | // let filters = get_filters(&req); 76 | // match db.send(LogsQuery(filters)).await? { 77 | // Ok(r) => Ok(HttpResponse::Ok().json(json!(r))), 78 | // Err(e) => { 79 | // error!("Could not complete query: {}", e); 80 | // Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 81 | // } 82 | // } 83 | //} 84 | 85 | async fn logs( 86 | req: HttpRequest, 87 | db: actix_web::web::Data>, 88 | metrics: actix_web::web::Data, 89 | ) -> Result { 90 | metrics.inc_req_count(); 91 | let filters = get_filters(&req); 92 | match db.send(LogsQuery(filters)).await? { 93 | Ok(r) => Ok(HttpResponse::Ok().json(json!(r))), 94 | Err(e) => { 95 | error!("Could not complete logs query: {}", e); 96 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/web/reputation.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use super::get_filters; 18 | use super::metrics::Metrics; 19 | use crate::db::{ 20 | reputation::{PeerReputationQuery, PeerReputationsQuery}, 21 | DbExecutor, 22 | }; 23 | use actix::prelude::*; 24 | use actix_web::{HttpRequest, HttpResponse}; 25 | 26 | pub fn configure(cfg: &mut actix_web::web::ServiceConfig) { 27 | cfg.service( 28 | actix_web::web::scope("/reputation/") 29 | .route("/logged/", actix_web::web::get().to(logged)) 30 | .route("/mock/{qty}/", actix_web::web::get().to(mock)) 31 | .route("/{peer_id}/", actix_web::web::get().to(single)) 32 | .route("", actix_web::web::get().to(all)), 33 | ); 34 | } 35 | 36 | async fn logged( 37 | req: HttpRequest, 38 | db: actix_web::web::Data>, 39 | metrics: actix_web::web::Data, 40 | ) -> Result { 41 | metrics.inc_req_count(); 42 | let filters = get_filters(&req); 43 | let res = db.send(PeerReputationsQuery::Logged(filters)).await?; 44 | match res { 45 | Ok(r) => Ok(HttpResponse::Ok().json(json!(r))), 46 | Err(e) => { 47 | error!("Could not complete stats query: {:?}", e); 48 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 49 | } 50 | } 51 | } 52 | 53 | async fn single( 54 | req: HttpRequest, 55 | db: actix_web::web::Data>, 56 | metrics: actix_web::web::Data, 57 | ) -> Result { 58 | metrics.inc_req_count(); 59 | let peer_id = req 60 | .match_info() 61 | .get("peer_id") 62 | .expect("peer_id should be available because the route matched") 63 | .to_string(); 64 | let filters = get_filters(&req); 65 | let res = db.send(PeerReputationQuery { peer_id, filters }).await?; 66 | match res { 67 | Ok(r) => Ok(HttpResponse::Ok().json(json!(r))), 68 | Err(e) => { 69 | error!("Could not complete single peer reputation query: {:?}", e); 70 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 71 | } 72 | } 73 | } 74 | 75 | async fn all( 76 | req: HttpRequest, 77 | db: actix_web::web::Data>, 78 | metrics: actix_web::web::Data, 79 | ) -> Result { 80 | metrics.inc_req_count(); 81 | let filters = get_filters(&req); 82 | let res = db.send(PeerReputationsQuery::All(filters)).await?; 83 | match res { 84 | Ok(r) => Ok(HttpResponse::Ok().json(json!(r))), 85 | Err(e) => { 86 | error!("Could not complete all peer reputation query: {:?}", e); 87 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 88 | } 89 | } 90 | } 91 | 92 | async fn mock( 93 | req: HttpRequest, 94 | db: actix_web::web::Data>, 95 | metrics: actix_web::web::Data, 96 | ) -> Result { 97 | metrics.inc_req_count(); 98 | let qty: usize = match req 99 | .match_info() 100 | .get("qty") 101 | .expect("qty should be available because the route matched") 102 | .to_string() 103 | .parse() 104 | { 105 | Ok(v) => v, 106 | _ => std::usize::MAX, 107 | }; 108 | let res = db.send(PeerReputationsQuery::Mock(qty)).await?; 109 | match res { 110 | Ok(r) => Ok(HttpResponse::Ok().json(json!(r))), 111 | Err(e) => { 112 | error!("Could not complete mock reputation query: {:?}", e); 113 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/web/root.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use super::metrics::Metrics; 18 | use crate::db::{ 19 | models::{NewPeerConnection, NewSubstrateLog, PeerConnection}, 20 | DbExecutor, 21 | }; 22 | use crate::{LogBuffer, CLIENT_TIMEOUT_S, HEARTBEAT_INTERVAL, WS_MAX_PAYLOAD}; 23 | use actix::prelude::*; 24 | use actix_http::ws::Codec; 25 | use actix_web::{error, Error, HttpRequest, HttpResponse}; 26 | use actix_web_actors::ws; 27 | use chrono::DateTime; 28 | use serde_json::Value; 29 | use std::fmt; 30 | use std::time::Instant; 31 | 32 | #[derive(Default, Debug)] 33 | struct MessageCount { 34 | ping: u64, 35 | pong: u64, 36 | text: u64, 37 | binary: u64, 38 | } 39 | 40 | impl fmt::Display for MessageCount { 41 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 42 | write!( 43 | f, 44 | "ping: {}, pong: {}, text: {}, binary {}", 45 | self.ping, self.pong, self.text, self.binary 46 | ) 47 | } 48 | } 49 | 50 | struct NodeSocket { 51 | hb: Instant, 52 | ip: String, 53 | db: actix_web::web::Data>, 54 | log_buffer: actix_web::web::Data>, 55 | metrics: actix_web::web::Data, 56 | // Indicate if the logs should be saved to a different table that is not automatically purged 57 | peer_connection: PeerConnection, 58 | msg_count: MessageCount, 59 | } 60 | 61 | impl Drop for NodeSocket { 62 | fn drop(&mut self) { 63 | self.metrics.inc_ws_dropped_count(); 64 | debug!("Dropped WS connection to ip: {}", self.ip); 65 | } 66 | } 67 | 68 | impl NodeSocket { 69 | async fn new( 70 | ip: String, 71 | db: actix_web::web::Data>, 72 | log_buffer: actix_web::web::Data>, 73 | metrics: actix_web::web::Data, 74 | audit: bool, 75 | ) -> Result { 76 | Ok(Self { 77 | peer_connection: Self::create_peer_connection(&db, &ip, audit).await?, 78 | ip, 79 | db, 80 | log_buffer, 81 | metrics, 82 | hb: Instant::now(), 83 | msg_count: MessageCount::default(), 84 | }) 85 | } 86 | 87 | async fn create_peer_connection( 88 | db: &actix_web::web::Data>, 89 | ip: &str, 90 | audit: bool, 91 | ) -> Result { 92 | let res = db 93 | .send(NewPeerConnection { 94 | ip_addr: String::from(ip), // 95 | peer_id: None, 96 | name: None, 97 | chain: None, 98 | version: None, 99 | authority: None, 100 | startup_time: None, 101 | implementation: None, 102 | audit, 103 | }) 104 | .await; 105 | match res { 106 | Ok(v) => v, 107 | Err(e) => { 108 | error!("Failed to send NewPeerConnection to DB actor - {:?}", e); 109 | Err("Failed to send".to_string()) 110 | } 111 | } 112 | } 113 | 114 | fn hb(&self, ctx: &mut ::Context) { 115 | let ip = self.ip.clone(); 116 | ctx.run_interval(*HEARTBEAT_INTERVAL, move |act, ctx| { 117 | if Instant::now().duration_since(act.hb) > *CLIENT_TIMEOUT_S { 118 | info!("Websocket heartbeat failed for: {} - DISCONNECTING", ip); 119 | ctx.stop(); 120 | return; 121 | } 122 | ctx.ping(&[]); 123 | }); 124 | } 125 | 126 | fn update_peer_id(&mut self, peer_id: &str) { 127 | debug!("Found peerId: {}, for ip address: {}", &peer_id, &self.ip); 128 | self.peer_connection.peer_id = Some(peer_id.to_string()); 129 | match self.db.try_send(self.peer_connection.clone()) { 130 | Ok(_) => debug!( 131 | "Saved new peer connection record (ID: {:?}) for peer_id: {}", 132 | self.peer_connection.id, peer_id 133 | ), 134 | _ => error!( 135 | "Failed to send updated PeerConnection to DB actor for peer_connection_id: {}", 136 | self.peer_connection.id 137 | ), 138 | } 139 | } 140 | 141 | fn update_peer_info(&mut self, log: &Value) { 142 | if let Some(peer_id) = log["network_id"].as_str() { 143 | self.peer_connection.peer_id = Some(peer_id.to_string()); 144 | debug!("Found peerId: {}, for ip address: {}", peer_id, &self.ip); 145 | } 146 | if let Some(name) = log["name"].as_str() { 147 | self.peer_connection.name = Some(name.to_string()); 148 | } 149 | if let Some(chain) = log["chain"].as_str() { 150 | self.peer_connection.chain = Some(chain.to_string()); 151 | } 152 | if let Some(version) = log["version"].as_str() { 153 | self.peer_connection.version = Some(version.to_string()); 154 | } 155 | if let Some(authority) = log["authority"].as_bool() { 156 | self.peer_connection.authority = Some(authority); 157 | } 158 | if let Some(startup_time) = log["startup_time"].as_str() { 159 | if let Ok(startup_time) = startup_time.parse::() { 160 | self.peer_connection.startup_time = Some(startup_time); 161 | } 162 | } 163 | if let Some(implementation) = log["implementation"].as_str() { 164 | self.peer_connection.implementation = Some(implementation.to_string()); 165 | } 166 | match self.db.try_send(self.peer_connection.clone()) { 167 | Ok(_) => debug!( 168 | "Saved new peer connection record (ID: {:?}) for peer_id: {:?}", 169 | self.peer_connection.id, self.peer_connection.peer_id 170 | ), 171 | _ => error!( 172 | "Failed to send updated PeerConnection to DB actor for peer_connection_id: {}", 173 | self.peer_connection.id 174 | ), 175 | } 176 | } 177 | } 178 | 179 | impl Actor for NodeSocket { 180 | type Context = ws::WebsocketContext; 181 | 182 | // Initiate the heartbeat process on start 183 | fn started(&mut self, ctx: &mut Self::Context) { 184 | self.hb(ctx); 185 | } 186 | } 187 | 188 | // Handler for ws::Message 189 | impl StreamHandler> for NodeSocket { 190 | fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { 191 | let ip = self.ip.clone(); 192 | let mut logs: Option = None; 193 | match msg { 194 | Ok(ws::Message::Ping(msg)) => { 195 | self.msg_count.ping += 1; 196 | debug!("PING from: {}", ip); 197 | self.hb = Instant::now(); 198 | ctx.pong(&msg); 199 | } 200 | Ok(ws::Message::Pong(_)) => { 201 | self.msg_count.pong += 1; 202 | debug!("PONG from: {} - message count: ({})", ip, self.msg_count); 203 | self.hb = Instant::now(); 204 | } 205 | Ok(ws::Message::Text(text)) => { 206 | self.metrics 207 | .inc_ws_bytes_received(text.as_bytes().len() as u64); 208 | self.msg_count.text += 1; 209 | logs = match serde_json::from_str(&text) { 210 | Ok(a) => Some(a), 211 | Err(e) => { 212 | error!("{:?}", e); 213 | return; 214 | } 215 | }; 216 | } 217 | Ok(ws::Message::Binary(bin)) => { 218 | self.metrics.inc_ws_bytes_received(bin.len() as u64); 219 | self.msg_count.binary += 1; 220 | logs = match serde_json::from_slice(&bin[..]) { 221 | Ok(a) => Some(a), 222 | Err(e) => { 223 | error!("{:?}", e); 224 | return; 225 | } 226 | }; 227 | } 228 | Ok(ws::Message::Close(_)) => { 229 | info!( 230 | "Close received, disconnecting: {} - message count: ({})", 231 | ip, self.msg_count 232 | ); 233 | ctx.stop(); 234 | } 235 | // ws::Message::Nop => (), 236 | _ => ctx.stop(), 237 | } 238 | if let Some(logs) = logs { 239 | self.metrics.inc_ws_message_count(); 240 | if self.peer_connection.peer_id.is_none() { 241 | debug!("Searching for peerId for ip address: {}", &ip); 242 | if let Some(_msg @ "system.connected") = logs["msg"].as_str() { 243 | self.update_peer_info(&logs); 244 | // Support older versions of substrate 245 | } else if let Some(peer_id) = logs["state"]["peerId"].as_str() { 246 | self.update_peer_id(peer_id); 247 | } else if let Some(peer_id) = logs["network_state"]["peerId"].as_str() { 248 | self.update_peer_id(peer_id); 249 | } 250 | } 251 | if let Some(ts) = logs["ts"].as_str() { 252 | if let Ok(ts_utc) = DateTime::parse_from_rfc3339(ts) { 253 | self.log_buffer 254 | .try_send(NewSubstrateLog { 255 | peer_connection_id: self.peer_connection.id, 256 | created_at: ts_utc.naive_utc(), 257 | logs, 258 | }) 259 | .unwrap_or_else(|e| { 260 | error!("Failed to send NewSubstrateLog to DB actor - {:?}", e) 261 | }); 262 | } else { 263 | warn!("Unable to parse_from_rfc3339 for timestamp: {:?}", ts); 264 | } 265 | } else { 266 | warn!("Unable to find timestamp in logs: {:?}", logs); 267 | } 268 | } 269 | } 270 | } 271 | 272 | pub fn configure(cfg: &mut actix_web::web::ServiceConfig) { 273 | cfg.service( 274 | actix_web::web::scope("/") 275 | .route("audit", actix_web::web::get().to(ws_index_permanent)) 276 | .route("", actix_web::web::get().to(ws_index)), 277 | ); 278 | } 279 | 280 | // Websocket handshake and start actor 281 | async fn ws_index( 282 | r: HttpRequest, 283 | stream: actix_web::web::Payload, 284 | db: actix_web::web::Data>, 285 | log_buffer: actix_web::web::Data>, 286 | metrics: actix_web::web::Data, 287 | ) -> Result { 288 | establish_connection(r, stream, db, log_buffer, metrics, false).await 289 | } 290 | 291 | // Websocket handshake and start actor 292 | async fn ws_index_permanent( 293 | r: HttpRequest, 294 | stream: actix_web::web::Payload, 295 | db: actix_web::web::Data>, 296 | log_buffer: actix_web::web::Data>, 297 | metrics: actix_web::web::Data, 298 | ) -> Result { 299 | establish_connection(r, stream, db, log_buffer, metrics, true).await 300 | } 301 | 302 | async fn establish_connection( 303 | r: HttpRequest, 304 | stream: actix_web::web::Payload, 305 | db: actix_web::web::Data>, 306 | log_buffer: actix_web::web::Data>, 307 | metrics: actix_web::web::Data, 308 | audit: bool, 309 | ) -> Result { 310 | let ip = r 311 | .connection_info() 312 | .remote_addr() 313 | .unwrap_or("Unable to decode remote IP") 314 | .to_string(); 315 | debug_headers(&r); 316 | info!("Establishing ws connection to node: {}", ip); 317 | match NodeSocket::new(ip.clone(), db, log_buffer, metrics.clone(), audit).await { 318 | Ok(ns) => { 319 | metrics.inc_ws_connected_count(); 320 | debug!( 321 | "Created PeerConnection record, id: {}, for ip: {}", 322 | ns.peer_connection.id, ip 323 | ); 324 | let mut res = ws::handshake(&r)?; 325 | let codec = Codec::new().max_size(*WS_MAX_PAYLOAD); 326 | let ws_context = ws::WebsocketContext::with_codec(ns, stream, codec); 327 | Ok(res.streaming(ws_context)) 328 | } 329 | Err(e) => { 330 | error!( 331 | "Unable to save PeerConnection, aborting WS handshake for ip: {}", 332 | ip 333 | ); 334 | Err(error::ErrorInternalServerError(e)) 335 | } 336 | } 337 | } 338 | 339 | fn debug_headers(req: &HttpRequest) { 340 | let head = req.head(); 341 | let headers = head.headers(); 342 | debug!( 343 | "HTTP peer_addr (could be proxy): {:?}", 344 | head.peer_addr 345 | .expect("Should always have access to peer_addr from request") 346 | ); 347 | for (k, v) in headers.iter() { 348 | trace!("HEADER MAP: Key: {}", k); 349 | trace!("HEADER MAP: Value: {:?}", v); 350 | } 351 | } 352 | -------------------------------------------------------------------------------- /src/web/stats.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Parity Technologies (UK) Ltd. 2 | // This file is part of Substrate Analytics. 3 | 4 | // Substrate Analytics is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // Substrate Analytics is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with Substrate Analytics. If not, see . 16 | 17 | use super::metrics::Metrics; 18 | use crate::db::{stats::Query, DbExecutor}; 19 | use actix::prelude::*; 20 | use actix_web::{http::StatusCode, HttpResponse, Result as AWResult}; 21 | 22 | lazy_static! { 23 | static ref VERSION_INFO: String = format!( 24 | "{}{}{}", 25 | env!("CARGO_PKG_NAME"), 26 | env!("CARGO_PKG_VERSION"), 27 | env!("CARGO_PKG_DESCRIPTION") 28 | ); 29 | } 30 | 31 | pub fn configure(cfg: &mut actix_web::web::ServiceConfig) { 32 | cfg.service( 33 | actix_web::web::scope("/stats/") 34 | .route("/db/", actix_web::web::get().to(send_query)) 35 | .route("/version/", actix_web::web::get().to(version)), 36 | ); 37 | } 38 | 39 | async fn send_query( 40 | db: actix_web::web::Data>, 41 | metrics: actix_web::web::Data, 42 | ) -> Result { 43 | metrics.inc_req_count(); 44 | let res = db.send(Query::Db).await?; 45 | match res { 46 | Ok(r) => Ok(HttpResponse::Ok().json(r)), 47 | Err(e) => { 48 | error!("Could not complete stats query: {}", e); 49 | Ok(HttpResponse::InternalServerError().json(json!("Error while processing query"))) 50 | } 51 | } 52 | } 53 | 54 | async fn version(metrics: actix_web::web::Data) -> AWResult { 55 | metrics.inc_req_count(); 56 | Ok(HttpResponse::build(StatusCode::OK) 57 | .content_type("text/html; charset=utf-8") 58 | .body(&*VERSION_INFO)) 59 | } 60 | -------------------------------------------------------------------------------- /static/benchmarks/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Substrate Analytics - Benchmarks 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 69 | 70 | 73 | 74 |
75 |
76 | 77 |
78 |
79 |
80 | 81 |
82 |
83 | 84 | 234 | 235 | 236 | -------------------------------------------------------------------------------- /static/profiling/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Substrate Analytics - Live Profiling 6 | 7 | 8 | 9 | 10 | 11 | 13 | 14 | 15 | 16 | 17 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 150 | 151 | 154 | 155 |
156 |
157 | 158 | 267 | 268 | 403 | 404 | 405 | -------------------------------------------------------------------------------- /static/reputation/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Substrate Analytics - Live Profiling 6 | 7 | 8 | 9 | 10 | 11 | 13 | 14 | 15 | 16 | 17 | 72 | 73 | 111 | 112 | 115 | 116 |
117 |
118 | 119 | 242 | 243 | 377 | 378 | 379 | --------------------------------------------------------------------------------