├── .cargo └── config ├── .dockerignore ├── .drone.yml ├── .env ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── Jenkinsfile ├── LICENSE ├── README.md ├── docker-compose.yml.example ├── docker ├── drone │ ├── Dockerfile │ └── manifest.tmpl └── prod │ └── docker-compose.yml ├── flake.lock ├── flake.nix ├── relay.nix ├── scss └── index.scss ├── src ├── admin.rs ├── admin │ ├── client.rs │ └── routes.rs ├── apub.rs ├── args.rs ├── build.rs ├── collector.rs ├── config.rs ├── data.rs ├── data │ ├── actor.rs │ ├── last_online.rs │ ├── media.rs │ ├── node.rs │ └── state.rs ├── db.rs ├── error.rs ├── extractors.rs ├── future.rs ├── jobs.rs ├── jobs │ ├── apub.rs │ ├── apub │ │ ├── announce.rs │ │ ├── follow.rs │ │ ├── forward.rs │ │ ├── reject.rs │ │ └── undo.rs │ ├── contact.rs │ ├── deliver.rs │ ├── deliver_many.rs │ ├── instance.rs │ ├── nodeinfo.rs │ ├── process_listeners.rs │ └── record_last_online.rs ├── main.rs ├── middleware.rs ├── middleware │ ├── payload.rs │ ├── timings.rs │ ├── verifier.rs │ └── webfinger.rs ├── requests.rs ├── routes.rs ├── routes │ ├── actor.rs │ ├── healthz.rs │ ├── inbox.rs │ ├── index.rs │ ├── media.rs │ ├── nodeinfo.rs │ └── statics.rs ├── spawner.rs └── telegram.rs ├── systemd ├── example-relay.service ├── example-relay.service.env └── example-relay.socket └── templates ├── admin.rs.html ├── index.rs.html ├── info.rs.html └── instance.rs.html /.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = ["--cfg", "tokio_unstable"] 3 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | /target 2 | /artifacts 3 | /sled 4 | .dockerignore 5 | Dockerfile 6 | Jenkinsfile 7 | -------------------------------------------------------------------------------- /.drone.yml: -------------------------------------------------------------------------------- 1 | kind: pipeline 2 | type: docker 3 | name: clippy 4 | 5 | platform: 6 | arch: amd64 7 | 8 | clone: 9 | disable: true 10 | 11 | steps: 12 | - name: clone 13 | image: alpine/git:latest 14 | user: root 15 | commands: 16 | - git clone $DRONE_GIT_HTTP_URL . 17 | - git checkout $DRONE_COMMIT 18 | - chown -R 991:991 . 19 | 20 | - name: clippy 21 | image: asonix/rust-builder:latest-linux-amd64 22 | pull: always 23 | commands: 24 | - rustup component add clippy 25 | - cargo clippy --no-deps -- -D warnings 26 | 27 | trigger: 28 | event: 29 | - push 30 | - pull_request 31 | - tag 32 | 33 | --- 34 | 35 | kind: pipeline 36 | type: docker 37 | name: tests 38 | 39 | platform: 40 | arch: amd64 41 | 42 | clone: 43 | disable: true 44 | 45 | steps: 46 | - name: clone 47 | image: alpine/git:latest 48 | user: root 49 | commands: 50 | - git clone $DRONE_GIT_HTTP_URL . 51 | - git checkout $DRONE_COMMIT 52 | - chown -R 991:991 . 53 | 54 | - name: tests 55 | image: asonix/rust-builder:latest-linux-amd64 56 | pull: always 57 | commands: 58 | - cargo test 59 | 60 | trigger: 61 | event: 62 | - push 63 | - pull_request 64 | - tag 65 | 66 | --- 67 | 68 | kind: pipeline 69 | type: docker 70 | name: check-amd64 71 | 72 | platform: 73 | arch: amd64 74 | 75 | clone: 76 | disable: true 77 | 78 | steps: 79 | - name: clone 80 | image: alpine/git:latest 81 | user: root 82 | commands: 83 | - git clone $DRONE_GIT_HTTP_URL . 84 | - git checkout $DRONE_COMMIT 85 | - chown -R 991:991 . 86 | 87 | - name: check 88 | image: asonix/rust-builder:latest-linux-amd64 89 | pull: always 90 | commands: 91 | - cargo check --target=$TARGET 92 | 93 | trigger: 94 | event: 95 | - push 96 | - pull_request 97 | 98 | --- 99 | 100 | kind: pipeline 101 | type: docker 102 | name: build-amd64 103 | 104 | platform: 105 | arch: amd64 106 | 107 | clone: 108 | disable: true 109 | 110 | steps: 111 | - name: clone 112 | image: alpine/git:latest 113 | user: root 114 | commands: 115 | - git clone $DRONE_GIT_HTTP_URL . 116 | - git checkout $DRONE_COMMIT 117 | - chown -R 991:991 . 118 | 119 | - name: build 120 | image: asonix/rust-builder:latest-linux-amd64 121 | pull: always 122 | commands: 123 | - cargo build --target=$TARGET --release 124 | - $TOOL-strip target/$TARGET/release/relay 125 | - cp target/$TARGET/release/relay . 126 | - cp relay relay-linux-amd64 127 | 128 | - name: push 129 | image: plugins/docker:20 130 | settings: 131 | username: asonix 132 | password: 133 | from_secret: dockerhub_token 134 | repo: asonix/relay 135 | dockerfile: docker/drone/Dockerfile 136 | auto_tag: true 137 | auto_tag_suffix: linux-amd64 138 | build_args: 139 | - REPO_ARCH=amd64 140 | 141 | - name: publish 142 | image: plugins/gitea-release:1 143 | settings: 144 | api_key: 145 | from_secret: gitea_token 146 | base_url: https://git.asonix.dog 147 | files: 148 | - relay-linux-amd64 149 | 150 | depends_on: 151 | - clippy 152 | - tests 153 | 154 | trigger: 155 | event: 156 | - tag 157 | 158 | --- 159 | 160 | kind: pipeline 161 | type: docker 162 | name: check-arm64v8 163 | 164 | platform: 165 | arch: amd64 166 | 167 | clone: 168 | disable: true 169 | 170 | steps: 171 | - name: clone 172 | image: alpine/git:latest 173 | user: root 174 | commands: 175 | - git clone $DRONE_GIT_HTTP_URL . 176 | - git checkout $DRONE_COMMIT 177 | - chown -R 991:991 . 178 | 179 | - name: check 180 | image: asonix/rust-builder:latest-linux-arm64v8 181 | pull: always 182 | commands: 183 | - cargo check --target=$TARGET 184 | 185 | trigger: 186 | event: 187 | - push 188 | - pull_request 189 | 190 | --- 191 | 192 | kind: pipeline 193 | type: docker 194 | name: build-arm64v8 195 | 196 | platform: 197 | arch: amd64 198 | 199 | clone: 200 | disable: true 201 | 202 | steps: 203 | - name: clone 204 | image: alpine/git:latest 205 | user: root 206 | commands: 207 | - git clone $DRONE_GIT_HTTP_URL . 208 | - git checkout $DRONE_COMMIT 209 | - chown -R 991:991 . 210 | 211 | - name: build 212 | image: asonix/rust-builder:latest-linux-arm64v8 213 | pull: always 214 | commands: 215 | - cargo build --target=$TARGET --release 216 | - $TOOL-strip target/$TARGET/release/relay 217 | - cp target/$TARGET/release/relay . 218 | - cp relay relay-linux-arm64v8 219 | 220 | - name: push 221 | image: plugins/docker:20 222 | settings: 223 | username: asonix 224 | password: 225 | from_secret: dockerhub_token 226 | repo: asonix/relay 227 | dockerfile: docker/drone/Dockerfile 228 | auto_tag: true 229 | auto_tag_suffix: linux-arm64v8 230 | build_args: 231 | - REPO_ARCH=arm64v8 232 | 233 | - name: publish 234 | image: plugins/gitea-release:1 235 | settings: 236 | api_key: 237 | from_secret: gitea_token 238 | base_url: https://git.asonix.dog 239 | files: 240 | - relay-linux-arm64v8 241 | 242 | depends_on: 243 | - clippy 244 | - tests 245 | 246 | trigger: 247 | event: 248 | - tag 249 | 250 | --- 251 | 252 | kind: pipeline 253 | type: docker 254 | name: check-arm32v7 255 | 256 | platform: 257 | arch: amd64 258 | 259 | clone: 260 | disable: true 261 | 262 | steps: 263 | - name: clone 264 | image: alpine/git:latest 265 | user: root 266 | commands: 267 | - git clone $DRONE_GIT_HTTP_URL . 268 | - git checkout $DRONE_COMMIT 269 | - chown -R 991:991 . 270 | 271 | - name: check 272 | image: asonix/rust-builder:latest-linux-arm32v7 273 | pull: always 274 | commands: 275 | - cargo check --target=$TARGET 276 | 277 | trigger: 278 | event: 279 | - push 280 | - pull_request 281 | 282 | --- 283 | 284 | kind: pipeline 285 | type: docker 286 | name: build-arm32v7 287 | 288 | platform: 289 | arch: amd64 290 | 291 | clone: 292 | disable: true 293 | 294 | steps: 295 | - name: clone 296 | image: alpine/git:latest 297 | user: root 298 | commands: 299 | - git clone $DRONE_GIT_HTTP_URL . 300 | - git checkout $DRONE_COMMIT 301 | - chown -R 991:991 . 302 | 303 | - name: build 304 | image: asonix/rust-builder:latest-linux-arm32v7 305 | pull: always 306 | commands: 307 | - cargo build --target=$TARGET --release 308 | - $TOOL-strip target/$TARGET/release/relay 309 | - cp target/$TARGET/release/relay . 310 | - cp relay relay-linux-arm32v7 311 | 312 | - name: push 313 | image: plugins/docker:20 314 | settings: 315 | username: asonix 316 | password: 317 | from_secret: dockerhub_token 318 | repo: asonix/relay 319 | dockerfile: docker/drone/Dockerfile 320 | auto_tag: true 321 | auto_tag_suffix: linux-arm32v7 322 | build_args: 323 | - REPO_ARCH=arm32v7 324 | 325 | - name: publish 326 | image: plugins/gitea-release:1 327 | settings: 328 | api_key: 329 | from_secret: gitea_token 330 | base_url: https://git.asonix.dog 331 | files: 332 | - relay-linux-arm32v7 333 | 334 | depends_on: 335 | - clippy 336 | - tests 337 | 338 | trigger: 339 | event: 340 | - tag 341 | 342 | --- 343 | 344 | kind: pipeline 345 | type: docker 346 | name: manifest 347 | 348 | platform: 349 | arch: amd64 350 | 351 | clone: 352 | disable: true 353 | 354 | steps: 355 | - name: clone 356 | image: alpine/git:latest 357 | user: root 358 | commands: 359 | - git clone $DRONE_GIT_HTTP_URL . 360 | - git checkout $DRONE_COMMIT 361 | - chown -R 991:991 . 362 | 363 | - name: manifest 364 | image: plugins/manifest:1 365 | settings: 366 | username: asonix 367 | password: 368 | from_secret: dockerhub_token 369 | dump: true 370 | auto_tag: true 371 | ignore_missing: true 372 | spec: docker/drone/manifest.tmpl 373 | 374 | 375 | depends_on: 376 | - build-amd64 377 | - build-arm64v8 378 | - build-arm32v7 379 | 380 | trigger: 381 | event: 382 | - tag 383 | 384 | --- 385 | 386 | kind: pipeline 387 | type: docker 388 | name: publish-crate 389 | 390 | platform: 391 | arch: amd64 392 | 393 | clone: 394 | disable: true 395 | 396 | steps: 397 | - name: clone 398 | image: alpine/git:latest 399 | user: root 400 | commands: 401 | - git clone $DRONE_GIT_HTTP_URL . 402 | - git checkout $DRONE_COMMIT 403 | - chown -R 991:991 . 404 | 405 | - name: publish 406 | image: asonix/rust-builder:latest-linux-amd64 407 | pull: always 408 | environment: 409 | CRATES_IO_TOKEN: 410 | from_secret: crates_io_token 411 | commands: 412 | - cargo publish --token $CRATES_IO_TOKEN 413 | 414 | depends_on: 415 | - build-amd64 416 | - build-arm64v8 417 | - build-arm32v7 418 | 419 | trigger: 420 | event: 421 | - tag 422 | -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | HOSTNAME=localhost:8079 2 | PORT=8079 3 | HTTPS=false 4 | DEBUG=true 5 | RESTRICTED_MODE=true 6 | VALIDATE_SIGNATURES=false 7 | API_TOKEN=somesecretpassword 8 | FOOTER_BLURB="Contact @asonix for inquiries" 9 | LOCAL_DOMAINS="masto.asonix.dog" 10 | LOCAL_BLURB="

Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!

" 11 | # OPENTELEMETRY_URL=http://localhost:4317 12 | PROMETHEUS_ADDR=127.0.0.1 13 | PROMETHEUS_PORT=9000 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /artifacts 3 | /sled 4 | /.direnv 5 | /.envrc 6 | /result 7 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ap-relay" 3 | description = "A simple activitypub relay" 4 | version = "0.3.106" 5 | authors = ["asonix "] 6 | license = "AGPL-3.0" 7 | readme = "README.md" 8 | repository = "https://git.asonix.dog/asonix/ap-relay" 9 | keywords = ["activitypub", "relay"] 10 | edition = "2021" 11 | build = "src/build.rs" 12 | 13 | [[bin]] 14 | name = "relay" 15 | path = "src/main.rs" 16 | 17 | [features] 18 | console = ["dep:console-subscriber"] 19 | default = [] 20 | 21 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 22 | 23 | [dependencies] 24 | anyhow = "1.0" 25 | actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_21"] } 26 | actix-webfinger = { version = "0.5.0", default-features = false } 27 | activitystreams = "0.7.0-alpha.25" 28 | activitystreams-ext = "0.1.0-alpha.3" 29 | ammonia = "3.1.0" 30 | async-cpupool = "0.2.0" 31 | bcrypt = "0.15" 32 | base64 = "0.21" 33 | clap = { version = "4.0.0", features = ["derive"] } 34 | config = "0.13.0" 35 | console-subscriber = { version = "0.2", optional = true } 36 | dashmap = "5.1.0" 37 | dotenv = "0.15.0" 38 | flume = "0.11.0" 39 | lru = "0.12.0" 40 | metrics = "0.22.0" 41 | metrics-exporter-prometheus = { version = "0.13.0", default-features = false, features = [ 42 | "http-listener", 43 | ] } 44 | metrics-util = "0.16.0" 45 | mime = "0.3.16" 46 | minify-html = "0.15.0" 47 | opentelemetry = "0.21" 48 | opentelemetry_sdk = { version = "0.21", features = ["rt-tokio"] } 49 | opentelemetry-otlp = "0.14" 50 | pin-project-lite = "0.2.9" 51 | # pinned to metrics-util 52 | quanta = "0.12.0" 53 | rand = "0.8" 54 | reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "stream"]} 55 | reqwest-middleware = "0.2" 56 | reqwest-tracing = "0.4.5" 57 | ring = "0.17.5" 58 | rsa = { version = "0.9" } 59 | rsa-magic-public-key = "0.8.0" 60 | rustls = "0.21.0" 61 | rustls-pemfile = "1.0.1" 62 | serde = { version = "1.0", features = ["derive"] } 63 | serde_json = "1.0" 64 | sled = "0.34.7" 65 | teloxide = { version = "0.12.0", default-features = false, features = [ 66 | "ctrlc_handler", 67 | "macros", 68 | "rustls", 69 | ] } 70 | thiserror = "1.0" 71 | time = { version = "0.3.17", features = ["serde"] } 72 | tracing = "0.1" 73 | tracing-error = "0.2" 74 | tracing-log = "0.2" 75 | tracing-opentelemetry = "0.22" 76 | tracing-subscriber = { version = "0.3", features = [ 77 | "ansi", 78 | "env-filter", 79 | "fmt", 80 | ] } 81 | tokio = { version = "1", features = ["full", "tracing"] } 82 | uuid = { version = "1", features = ["v4", "serde"] } 83 | streem = "0.2.0" 84 | 85 | [dependencies.background-jobs] 86 | version = "0.17.0" 87 | default-features = false 88 | features = ["error-logging", "metrics", "tokio"] 89 | 90 | [dependencies.http-signature-normalization-actix] 91 | version = "0.11.0" 92 | default-features = false 93 | features = ["server", "ring"] 94 | 95 | [dependencies.http-signature-normalization-reqwest] 96 | version = "0.11.0" 97 | default-features = false 98 | features = ["middleware", "ring"] 99 | 100 | [dependencies.tracing-actix-web] 101 | version = "0.7.9" 102 | 103 | [build-dependencies] 104 | anyhow = "1.0" 105 | dotenv = "0.15.0" 106 | ructe = { version = "0.17.0", features = ["sass", "mime03"] } 107 | toml = "0.8.0" 108 | 109 | [profile.dev.package.rsa] 110 | opt-level = 3 111 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1.4 2 | FROM alpine:3.19 AS alpine 3 | ARG TARGETPLATFORM 4 | 5 | RUN \ 6 | --mount=type=cache,id=$TARGETPLATFORM-alpine,target=/var/cache/apk,sharing=locked \ 7 | set -eux; \ 8 | apk add -U libgcc; 9 | 10 | ################################################################################ 11 | 12 | FROM alpine AS alpine-dev 13 | ARG TARGETPLATFORM 14 | 15 | RUN \ 16 | --mount=type=cache,id=$TARGETPLATFORM-alpine,target=/var/cache/apk,sharing=locked \ 17 | set -eux; \ 18 | apk add -U musl-dev; 19 | 20 | ################################################################################ 21 | 22 | FROM --platform=$BUILDPLATFORM rust:1 AS builder 23 | ARG BUILDPLATFORM 24 | ARG TARGETPLATFORM 25 | 26 | RUN \ 27 | --mount=type=cache,id=$BUILDPLATFORM-debian,target=/var/cache,sharing=locked \ 28 | --mount=type=cache,id=$BUILDPLATFORM-debian,target=/var/lib/apt,sharing=locked \ 29 | set -eux; \ 30 | case "${TARGETPLATFORM}" in \ 31 | linux/i386) \ 32 | dpkgArch='i386'; \ 33 | ;; \ 34 | linux/amd64) \ 35 | dpkgArch='amd64'; \ 36 | ;; \ 37 | linux/arm64) \ 38 | dpkgArch='arm64'; \ 39 | ;; \ 40 | *) echo "unsupported architecture"; exit 1 ;; \ 41 | esac; \ 42 | dpkg --add-architecture $dpkgArch; \ 43 | apt-get update; \ 44 | apt-get install -y --no-install-recommends \ 45 | musl-dev:$dpkgArch \ 46 | musl-tools:$dpkgArch \ 47 | ; 48 | 49 | WORKDIR /opt/aode-relay 50 | 51 | RUN set -eux; \ 52 | case "${TARGETPLATFORM}" in \ 53 | linux/i386) arch='i686';; \ 54 | linux/amd64) arch='x86_64';; \ 55 | linux/arm64) arch='aarch64';; \ 56 | *) echo "unsupported architecture"; exit 1 ;; \ 57 | esac; \ 58 | rustup target add "${arch}-unknown-linux-musl"; 59 | 60 | ADD Cargo.lock Cargo.toml /opt/aode-relay/ 61 | RUN cargo fetch; 62 | 63 | ADD . /opt/aode-relay 64 | COPY --link --from=alpine-dev / /opt/alpine/ 65 | 66 | RUN set -eux; \ 67 | case "${TARGETPLATFORM}" in \ 68 | linux/i386) arch='i686';; \ 69 | linux/amd64) arch='x86_64';; \ 70 | linux/arm64) arch='aarch64';; \ 71 | *) echo "unsupported architecture"; exit 1 ;; \ 72 | esac; \ 73 | ln -s "target/${arch}-unknown-linux-musl/release/relay" "aode-relay"; \ 74 | export RUSTFLAGS="-C target-cpu=generic -C linker=${arch}-linux-musl-gcc -C target-feature=-crt-static -C link-self-contained=no -L /opt/alpine/lib -L /opt/alpine/usr/lib"; \ 75 | cargo build --frozen --release --target="${arch}-unknown-linux-musl"; 76 | 77 | ################################################################################ 78 | 79 | FROM alpine 80 | ARG TARGETPLATFORM 81 | 82 | RUN \ 83 | --mount=type=cache,id=$TARGETPLATFORM-alpine,target=/var/cache/apk,sharing=locked \ 84 | set -eux; \ 85 | apk add -U ca-certificates curl tini; 86 | 87 | COPY --link --from=builder /opt/aode-relay/aode-relay /usr/local/bin/aode-relay 88 | 89 | # Smoke test 90 | RUN /usr/local/bin/aode-relay --help 91 | 92 | # Some base env configuration 93 | ENV ADDR 0.0.0.0 94 | ENV PORT 8080 95 | ENV DEBUG false 96 | ENV VALIDATE_SIGNATURES true 97 | ENV HTTPS false 98 | ENV PRETTY_LOG false 99 | ENV PUBLISH_BLOCKS true 100 | ENV SLED_PATH "/var/lib/aode-relay/sled/db-0.34" 101 | ENV RUST_LOG warn 102 | 103 | VOLUME "/var/lib/aode-relay" 104 | 105 | ENTRYPOINT ["/sbin/tini", "--"] 106 | 107 | CMD ["/usr/local/bin/aode-relay"] 108 | 109 | EXPOSE 8080 110 | 111 | HEALTHCHECK CMD curl -sSf "localhost:$PORT/healthz" > /dev/null || exit 1 112 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent none 3 | stages { 4 | stage('Test') { 5 | agent { 6 | docker { 7 | image 'ghcr.io/cleanc-lab/rust:1.72.0-slim-bookworm' 8 | args '--privileged --net=host -v /var/run/docker.sock:/var/run/docker.sock' 9 | } 10 | } 11 | stages { 12 | stage('Build') { 13 | steps { 14 | sh 'cargo build' 15 | } 16 | } 17 | stage('Nextest') { 18 | steps { 19 | sh '/usr/local/cargo/bin/cargo-nextest nextest run' 20 | } 21 | } 22 | } 23 | } 24 | stage('Docker') { 25 | agent { 26 | docker { 27 | image 'docker:24-cli' 28 | args '--privileged -v /var/run/docker.sock:/var/run/docker.sock' 29 | } 30 | } 31 | when { 32 | anyOf { 33 | branch 'interstellar-next'; 34 | branch 'interstellar-dev'; 35 | buildingTag(); 36 | } 37 | } 38 | environment { 39 | DOCKER_REGISTRY = 'ghcr.io' 40 | GITHUB_ORG = 'interstellar-relay-community' 41 | DOCKER_IMAGE = "${env.DOCKER_REGISTRY}/${env.GITHUB_ORG}/aode-relay" 42 | GHCR_TOKEN = credentials('siliconforest-jenkins-github-pat-package-rw') 43 | } 44 | stages { 45 | stage('Prepare') { 46 | steps { 47 | script { 48 | if (env.BRANCH_NAME == 'interstellar-next') { 49 | env.DOCKER_TAG = 'latest' 50 | } else if (env.BRANCH_NAME == 'interstellar-dev') { 51 | env.DOCKER_TAG = 'develop' 52 | } else { 53 | env.DOCKER_TAG = env.TAG_NAME 54 | } 55 | } 56 | } 57 | } 58 | stage('Docker login') { 59 | steps { 60 | sh 'echo $GHCR_TOKEN_PSW | docker login ghcr.io -u $GHCR_TOKEN_USR --password-stdin' 61 | } 62 | } 63 | stage('Build') { 64 | matrix { 65 | axes { 66 | axis { 67 | name 'TARGET' 68 | values 'amd64', 'arm64' 69 | } 70 | } 71 | stages { 72 | stage('Build platform specific image') { 73 | steps { 74 | sh "docker buildx create --name container-${TARGET} --driver=docker-container" 75 | sh "docker buildx build --builder container-${TARGET} -t $DOCKER_IMAGE:$DOCKER_TAG-${TARGET} --platform linux/${TARGET} ." 76 | } 77 | } 78 | stage('Push platform specific image') { 79 | steps { 80 | sh "docker push $DOCKER_IMAGE:$DOCKER_TAG-${TARGET}" 81 | } 82 | } 83 | } 84 | } 85 | } 86 | stage('Docker manifest') { 87 | steps { 88 | sh "docker manifest create $DOCKER_IMAGE:$DOCKER_TAG --amend $DOCKER_IMAGE:$DOCKER_TAG-amd64 --amend $DOCKER_IMAGE:$DOCKER_TAG-arm64" 89 | } 90 | } 91 | stage('Docker push') { 92 | steps { 93 | sh "docker manifest push $DOCKER_IMAGE:$DOCKER_TAG" 94 | } 95 | } 96 | } 97 | post { 98 | always { 99 | sh 'docker logout "$DOCKER_REGISTRY"' 100 | } 101 | } 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AodeRelay 2 | _A simple and efficient activitypub relay_ 3 | 4 | Forked from original [Aode Relay](https://git.asonix.dog/asonix/relay), applied some customization on it. 5 | 6 | ### Installation 7 | #### Docker 8 | If running docker, you can start the relay with the following command: 9 | ``` 10 | $ sudo docker run --rm -it \ 11 | -v "$(pwd):/mnt/" \ 12 | -e ADDR=0.0.0.0 \ 13 | -e SLED_PATH=/mnt/sled/db-0.34 \ 14 | -p 8080:8080 \ 15 | asonix/relay:0.3.85 16 | ``` 17 | This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080 18 | #### Cargo 19 | With cargo installed, the relay can be installed to your cargo bin directory with the following command 20 | ``` 21 | $ cargo install ap-relay 22 | ``` 23 | Then it can be run with this: 24 | ``` 25 | $ ADDR=0.0.0.0 relay 26 | ``` 27 | This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080 28 | #### Source 29 | The relay can be launched directly from this git repository with the following commands: 30 | ``` 31 | $ git clone https://git.asonix.dog/asonix/relay 32 | $ ADDR=0.0.0.0 cargo run --release 33 | ``` 34 | 35 | ### Usage 36 | To simply run the server, the command is as follows 37 | ```bash 38 | $ ./relay 39 | ``` 40 | 41 | #### Administration 42 | > **NOTE:** The server _must be running_ in order to update the lists with the following commands 43 | 44 | To learn about any other tasks, the `--help` flag can be passed 45 | ```bash 46 | An activitypub relay 47 | 48 | Usage: relay [OPTIONS] 49 | 50 | Options: 51 | -b A list of domains that should be blocked 52 | -a A list of domains that should be allowed 53 | -u, --undo Undo allowing or blocking domains 54 | -h, --help Print help information 55 | ``` 56 | 57 | To add domains to the blocklist, use the `-b` flag and pass a list of domains 58 | ```bash 59 | $ ./relay -b asonix.dog blimps.xyz 60 | ``` 61 | To remove domains from the blocklist, simply pass the `-u` flag along with `-b` 62 | ```bash 63 | $ ./relay -ub asonix.dog blimps.xyz 64 | ``` 65 | The same rules apply for allowing domains, although domains are allowed with the `-a` flag 66 | ```bash 67 | $ ./relay -a asonix.dog blimps.xyz 68 | $ ./relay -ua asonix.dog blimps.xyz 69 | ``` 70 | 71 | ### Configuration 72 | By default, all these values are set to development values. These are read from the environment, or 73 | from the `.env` file in the working directory. 74 | ```env 75 | HOSTNAME=localhost:8080 76 | ADDR=127.0.0.1 77 | PORT=8080 78 | DEBUG=true 79 | RESTRICTED_MODE=false 80 | VALIDATE_SIGNATURES=false 81 | HTTPS=false 82 | PRETTY_LOG=true 83 | PUBLISH_BLOCKS=false 84 | SLED_PATH=./sled/db-0.34 85 | ``` 86 | To run this server in production, you'll likely want to set most of them 87 | ```env 88 | HOSTNAME=relay.my.tld 89 | ADDR=0.0.0.0 90 | PORT=8080 91 | DEBUG=false 92 | RESTRICTED_MODE=false 93 | VALIDATE_SIGNATURES=true 94 | HTTPS=true 95 | PRETTY_LOG=false 96 | PUBLISH_BLOCKS=true 97 | SLED_PATH=./sled/db-0.34 98 | RUST_LOG=warn 99 | API_TOKEN=somepasswordishtoken 100 | OPENTELEMETRY_URL=localhost:4317 101 | TELEGRAM_TOKEN=secret 102 | TELEGRAM_ADMIN_HANDLE=your_handle 103 | TLS_KEY=/path/to/key 104 | TLS_CERT=/path/to/cert 105 | FOOTER_BLURB="Contact @asonix for inquiries" 106 | LOCAL_DOMAINS=masto.asonix.dog 107 | LOCAL_BLURB="

Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!

" 108 | PROMETHEUS_ADDR=0.0.0.0 109 | PROMETHEUS_PORT=9000 110 | CLIENT_TIMEOUT=10 111 | DELIVER_CONCURRENCY=8 112 | SIGNATURE_THREADS=2 113 | ``` 114 | 115 | #### Descriptions 116 | ##### `HOSTNAME` 117 | The domain or IP address the relay is hosted on. If you launch the relay on `example.com`, that would be your HOSTNAME. The default is `localhost:8080` 118 | ##### `ADDR` 119 | The address the server binds to. By default, this is `127.0.0.1`, so for production cases it should be set to `0.0.0.0` or another public address. 120 | ##### `PORT` 121 | The port the server binds to, this is `8080` by default but can be changed if needed. 122 | ##### `DEBUG` 123 | Whether to print incoming activities to the console when requests hit the /inbox route. This defaults to `true`, but should be set to `false` in production cases. Since every activity sent to the relay is public anyway, this doesn't represent a security risk. 124 | ##### `RESTRICTED_MODE` 125 | This setting enables an 'allowlist' setup where only servers that have been explicitly enabled through the `relay -a` command can join the relay. This is `false` by default. If `RESTRICTED_MODE` is not enabled, then manually allowing domains with `relay -a` has no effect. 126 | ##### `VALIDATE_SIGNATURES` 127 | This setting enforces checking HTTP signatures on incoming activities. It defaults to `true` 128 | ##### `HTTPS` 129 | Whether the current server is running on an HTTPS port or not. This is used for generating URLs to the current running relay. By default it is set to `true` 130 | ##### `PUBLISH_BLOCKS` 131 | Whether or not to publish a list of blocked domains in the `nodeinfo` metadata for the server. It defaults to `false`. 132 | ##### `SLED_PATH` 133 | Where to store the on-disk database of connected servers. This defaults to `./sled/db-0.34`. 134 | ##### `RUST_LOG` 135 | The log level to print. Available levels are `ERROR`, `WARN`, `INFO`, `DEBUG`, and `TRACE`. You can also specify module paths to enable some logs but not others, such as `RUST_LOG=warn,tracing_actix_web=info,relay=info`. This defaults to `warn` 136 | ##### `SOURCE_REPO` 137 | The URL to the source code for the relay. This defaults to `https://git.asonix.dog/asonix/relay`, but should be changed if you're running a fork hosted elsewhere. 138 | ##### `REPOSITORY_COMMIT_BASE` 139 | The base path of the repository commit hash reference. For example, `/src/commit/` for Gitea, `/tree/` for GitLab. 140 | ##### `API_TOKEN` 141 | The Secret token used to access the admin APIs. This must be set for the commandline to function 142 | ##### `OPENTELEMETRY_URL` 143 | A URL for exporting opentelemetry spans. This is mostly useful for debugging. There is no default, since most people probably don't run an opentelemetry collector. 144 | ##### `TELEGRAM_TOKEN` 145 | A Telegram Bot Token for running the relay administration bot. There is no default. 146 | ##### `TELEGRAM_ADMIN_HANDLE` 147 | The handle of the telegram user allowed to administer the relay. There is no default. 148 | ##### `TLS_KEY` 149 | Optional - This is specified if you are running the relay directly on the internet and have a TLS key to provide HTTPS for your relay 150 | ##### `TLS_CERT` 151 | Optional - This is specified if you are running the relay directly on the internet and have a TLS certificate chain to provide HTTPS for your relay 152 | ##### `FOOTER_BLURB` 153 | Optional - Add custom notes in the footer of the page 154 | ##### `LOCAL_DOMAINS` 155 | Optional - domains of mastodon servers run by the same admin as the relay 156 | ##### `LOCAL_BLURB` 157 | Optional - description for the relay 158 | ##### `PROMETHEUS_ADDR` 159 | Optional - Address to bind to for serving the prometheus scrape endpoint 160 | ##### `PROMETHEUS_PORT` 161 | Optional - Port to bind to for serving the prometheus scrape endpoint 162 | ##### `CLIENT_TIMEOUT` 163 | Optional - How long the relay will hold open a connection (in seconds) to a remote server during 164 | fetches and deliveries. This defaults to 10 165 | ##### `DELIVER_CONCURRENCY` 166 | Optional - How many deliver requests the relay should allow to be in-flight per thread. the default 167 | is 8 168 | ##### `SIGNATURE_THREADS` 169 | Optional - Override number of threads used for signing and verifying requests. Default is 170 | `std::thread::available_parallelism()` (It tries to detect how many cores you have). If it cannot 171 | detect the correct number of cores, it falls back to 1. 172 | ##### 'PROXY_URL' 173 | Optional - URL of an HTTP proxy to forward outbound requests through 174 | ##### 'PROXY_USERNAME' 175 | Optional - username to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth 176 | ##### 'PROXY_PASSWORD' 177 | Optional - password to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth 178 | 179 | ### Subscribing 180 | Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings. 181 | For example, if the server is `https://relay.my.tld`, the correct URL would be 182 | `https://relay.my.tld/inbox`. 183 | 184 | Pleroma admins can subscribe to this relay by adding the `/actor` route to their relay settings. For 185 | example, if the server is `https://relay.my.tld`, the correct URL would be 186 | `https://relay.my.tld/actor`. 187 | 188 | ### Supported Activities 189 | - Accept Follow {remote-actor}, this is a no-op 190 | - Reject Follow {remote-actor}, an Undo Follow is sent to {remote-actor} 191 | - Announce {anything}, {anything} is Announced to listening servers 192 | - Create {anything}, {anything} is Announced to listening servers 193 | - Follow {self-actor}, become a listener of the relay, a Follow will be sent back 194 | - Follow Public, become a listener of the relay 195 | - Undo Follow {self-actor}, stop listening on the relay, an Undo Follow will be sent back 196 | - Undo Follow Public, stop listening on the relay 197 | - Delete {anything}, the Delete {anything} is relayed verbatim to listening servers. 198 | Note that this activity will likely be rejected by the listening servers unless it has been 199 | signed with a JSON-LD signature 200 | - Update {anything}, the Update {anything} is relayed verbatim to listening servers. 201 | Note that this activity will likely be rejected by the listening servers unless it has been 202 | signed with a JSON-LD signature 203 | - Add {anything}, the Add {anything} is relayed verbatim to listening servers. 204 | Note that this activity will likely be rejected by the listening servers unless it has been 205 | signed with a JSON-LD signature 206 | - Remove {anything}, the Remove {anything} is relayed verbatim to listening servers. 207 | Note that this activity will likely be rejected by the listening servers unless it has been 208 | signed with a JSON-LD signature 209 | 210 | ### Supported Discovery Protocols 211 | - Webfinger 212 | - NodeInfo 213 | 214 | ### Known issues 215 | Pleroma and Akkoma do not support validating JSON-LD signatures, meaning many activities such as Delete, Update, Add, and Remove will be rejected with a message similar to `WARN: Response from https://example.com/inbox, "Invalid HTTP Signature"`. This is normal and not an issue with the relay. 216 | 217 | ### Contributing 218 | Feel free to open issues for anything you find an issue with. Please note that any contributed code will be licensed under the AGPLv3. 219 | 220 | ### License 221 | Copyright © 2022 Riley Trautman 222 | 223 | AodeRelay is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. 224 | 225 | AodeRelay is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. This file is part of AodeRelay. 226 | 227 | You should have received a copy of the GNU General Public License along with AodeRelay. If not, see [http://www.gnu.org/licenses/](http://www.gnu.org/licenses/). 228 | -------------------------------------------------------------------------------- /docker-compose.yml.example: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | services: 4 | relay: 5 | #image: interstellarflights/aode-relay:edge 6 | image: ghcr.io/interstellar-relay-community/aode-relay:edge 7 | ports: 8 | - "8080:8080" 9 | - "8081:8081" 10 | restart: always 11 | volumes: 12 | - './relay:/var/lib/aode-relay' 13 | environment: 14 | - HOSTNAME=ap.example.com 15 | - DEBUG=true 16 | - RESTRICTED_MODE=true 17 | - VALIDATE_SIGNATURES=true 18 | - HTTPS=true 19 | - PRETTY_LOG=false 20 | - PUBLISH_BLOCKS=true 21 | - SOURCE_REPO=https://github.com/Interstellar-Relay-Community/aode-relay 22 | - REPOSITORY_COMMIT_BASE=/tree/ 23 | - PROMETHEUS_ADDR=0.0.0.0 24 | - PROMETHEUS_PORT=8081 25 | - API_TOKEN=[REDACTED] 26 | -------------------------------------------------------------------------------- /docker/drone/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG REPO_ARCH 2 | 3 | FROM asonix/rust-runner:latest-linux-$REPO_ARCH 4 | 5 | COPY relay /usr/local/bin/relay 6 | 7 | USER app 8 | EXPOSE 8080 9 | VOLUME /mnt 10 | ENTRYPOINT ["/sbin/tini", "--"] 11 | CMD ["/usr/local/bin/relay"] 12 | -------------------------------------------------------------------------------- /docker/drone/manifest.tmpl: -------------------------------------------------------------------------------- 1 | image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}} 2 | {{#if build.tags}} 3 | tags: 4 | {{#each build.tags}} 5 | - {{this}} 6 | {{/each}} 7 | {{/if}} 8 | manifests: 9 | - 10 | image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64 11 | platform: 12 | architecture: amd64 13 | os: linux 14 | - 15 | image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64v8 16 | platform: 17 | architecture: arm64 18 | os: linux 19 | variant: v8 20 | - 21 | image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm32v7 22 | platform: 23 | architecture: arm 24 | os: linux 25 | variant: v7 26 | -------------------------------------------------------------------------------- /docker/prod/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | services: 4 | relay: 5 | image: asonix/relay:0.3.85 6 | ports: 7 | - "8079:8079" 8 | restart: always 9 | environment: 10 | - HOSTNAME=relay.my.tld 11 | - ADDR=0.0.0.0 12 | - PORT=8080 13 | - DEBUG=false 14 | - RESTRICTED_MODE=false 15 | - VALIDATE_SIGNATURES=true 16 | - HTTPS=true 17 | - SLED_PATH=/mnt/sled/db-0.34 18 | - PRETTY_LOG=false 19 | - PUBLISH_BLOCKS=true 20 | - API_TOKEN=somepasswordishtoken 21 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1701680307, 9 | "narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "4022d587cbbfd70fe950c1e2083a02621806a725", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1705133751, 24 | "narHash": "sha256-rCIsyE80jgiOU78gCWN3A0wE0tR2GI5nH6MlS+HaaSQ=", 25 | "owner": "NixOS", 26 | "repo": "nixpkgs", 27 | "rev": "9b19f5e77dd906cb52dade0b7bd280339d2a1f3d", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "NixOS", 32 | "ref": "nixos-unstable", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "root": { 38 | "inputs": { 39 | "flake-utils": "flake-utils", 40 | "nixpkgs": "nixpkgs" 41 | } 42 | }, 43 | "systems": { 44 | "locked": { 45 | "lastModified": 1681028828, 46 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 47 | "owner": "nix-systems", 48 | "repo": "default", 49 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 50 | "type": "github" 51 | }, 52 | "original": { 53 | "owner": "nix-systems", 54 | "repo": "default", 55 | "type": "github" 56 | } 57 | } 58 | }, 59 | "root": "root", 60 | "version": 7 61 | } 62 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "relay"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 6 | flake-utils.url = "github:numtide/flake-utils"; 7 | }; 8 | 9 | outputs = { self, nixpkgs, flake-utils }: 10 | flake-utils.lib.eachDefaultSystem (system: 11 | let 12 | pkgs = import nixpkgs { 13 | inherit system; 14 | }; 15 | in 16 | { 17 | packages = rec { 18 | relay = pkgs.callPackage ./relay.nix { }; 19 | 20 | default = relay; 21 | }; 22 | 23 | apps = rec { 24 | dev = flake-utils.lib.mkApp { drv = self.packages.${system}.pict-rs-proxy; }; 25 | default = dev; 26 | }; 27 | 28 | devShell = with pkgs; mkShell { 29 | nativeBuildInputs = [ cargo cargo-outdated cargo-zigbuild clippy gcc protobuf rust-analyzer rustc rustfmt ]; 30 | 31 | RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}"; 32 | }; 33 | }); 34 | } 35 | -------------------------------------------------------------------------------- /relay.nix: -------------------------------------------------------------------------------- 1 | { lib 2 | , nixosTests 3 | , rustPlatform 4 | }: 5 | 6 | rustPlatform.buildRustPackage { 7 | pname = "relay"; 8 | version = "0.3.106"; 9 | src = ./.; 10 | cargoLock.lockFile = ./Cargo.lock; 11 | 12 | RUSTFLAGS = "--cfg tokio_unstable"; 13 | 14 | nativeBuildInputs = [ ]; 15 | 16 | passthru.tests = { inherit (nixosTests) relay; }; 17 | 18 | meta = with lib; { 19 | description = "An ActivityPub relay"; 20 | homepage = "https://git.asonix.dog/asonix/relay"; 21 | license = with licenses; [ agpl3Plus ]; 22 | }; 23 | } 24 | -------------------------------------------------------------------------------- /scss/index.scss: -------------------------------------------------------------------------------- 1 | body { 2 | background-color: #333; 3 | color: #f5f5f5; 4 | font-family: sans-serif; 5 | margin: 0; 6 | position: relative; 7 | min-height: 100vh; 8 | padding-bottom: 96px; 9 | } 10 | 11 | ul { 12 | margin: 0; 13 | padding: 0; 14 | list-style: none; 15 | } 16 | 17 | body, 18 | body * { 19 | box-sizing: border-box; 20 | } 21 | 22 | header { 23 | .header-text { 24 | max-width: 700px; 25 | margin: auto; 26 | padding: 24px 0; 27 | } 28 | 29 | h1 { 30 | margin: 0px; 31 | .smaller { 32 | font-size: 14px; 33 | font-weight: 400; 34 | } 35 | } 36 | 37 | p { 38 | margin: 0; 39 | margin-top: 8px; 40 | font-style: italic; 41 | } 42 | } 43 | 44 | article { 45 | background-color: #fff; 46 | color: #333; 47 | border: 1px solid #e5e5e5; 48 | box-shadow: 0 0 3px rgba(0, 0, 0, 0.1); 49 | border-radius: 3px; 50 | margin: 32px auto 0; 51 | max-width: 700px; 52 | padding-bottom: 32px; 53 | 54 | section { 55 | border-bottom: 1px solid #e5e5e5; 56 | 57 | > h4:first-child, 58 | > p:first-child { 59 | margin-top: 0; 60 | } 61 | > p:last-child { 62 | margin-bottom: 0; 63 | } 64 | } 65 | 66 | h3 { 67 | padding: 24px; 68 | margin: 0px; 69 | border-bottom: 1px solid #e5e5e5; 70 | } 71 | 72 | .info { 73 | padding-bottom: 36px; 74 | } 75 | 76 | li { 77 | padding-top: 36px; 78 | } 79 | 80 | .padded { 81 | padding: 0 24px; 82 | } 83 | 84 | .local-explainer, 85 | .joining { 86 | padding: 24px; 87 | } 88 | 89 | a { 90 | transition: color .2s cubic-bezier(.3,0,.5,1); 91 | 92 | &, 93 | &:focus, 94 | &:active { 95 | color: #c92a60; 96 | } 97 | 98 | &:hover { 99 | color: #9d2a60; 100 | } 101 | } 102 | } 103 | 104 | pre { 105 | border: 1px solid #e5e5e5; 106 | border-radius: 3px; 107 | background-color: #f5f5f5; 108 | padding: 8px; 109 | padding-left: 32px; 110 | padding-top: 10px; 111 | position: relative; 112 | 113 | &:before { 114 | content: ' '; 115 | display: block; 116 | position: absolute; 117 | top: 0; 118 | left: 0; 119 | bottom: 0; 120 | width: 24px; 121 | background-color: #e5e5e5; 122 | } 123 | } 124 | 125 | a { 126 | &, 127 | &:focus, 128 | &:active { 129 | color: #f9a6c2; 130 | } 131 | 132 | &:hover { 133 | color: #f2739f; 134 | } 135 | } 136 | 137 | footer { 138 | background-color: #333; 139 | color: #f5f5f5; 140 | position: absolute; 141 | padding: 16px 8px; 142 | bottom: 0; 143 | left: 0; 144 | right: 0; 145 | text-align: center; 146 | 147 | p { 148 | margin: 0; 149 | } 150 | } 151 | 152 | .instance, 153 | .info { 154 | h4 { 155 | font-size: 20px; 156 | margin: 0; 157 | } 158 | 159 | .instance-info { 160 | padding: 24px; 161 | padding-bottom: 36px; 162 | border-top: 1px solid #e5e5e5; 163 | background-color: #f5f5f5; 164 | 165 | .instance-description { 166 | margin: 0; 167 | margin-bottom: 24px; 168 | } 169 | .instance-admin { 170 | margin: 24px 0; 171 | } 172 | 173 | .description .please-stay { 174 | h3 { 175 | padding: 0; 176 | margin: 0; 177 | border-bottom: none; 178 | } 179 | ul { 180 | list-style: disc; 181 | padding-left: 24px; 182 | 183 | li { 184 | padding: 0; 185 | } 186 | } 187 | article section { 188 | border-bottom: none; 189 | } 190 | } 191 | } 192 | 193 | a { 194 | text-decoration: none; 195 | } 196 | } 197 | 198 | .admin { 199 | margin-top: 32px; 200 | display: flex; 201 | align-items: center; 202 | background-color: #fff; 203 | border: 1px solid #e5e5e5; 204 | border-radius: 3px; 205 | box-shadow: 0px 1px 3px rgba(0, 0, 0, 0.1); 206 | 207 | .display-name { 208 | font-weight: 600; 209 | font-size: 16px; 210 | margin: 0; 211 | } 212 | 213 | .username { 214 | font-size: 14px; 215 | color: #777; 216 | margin: 0; 217 | margin-top: 8px; 218 | } 219 | } 220 | 221 | .avatar { 222 | width: 80px; 223 | height: 80px; 224 | 225 | img { 226 | width: 100%; 227 | border-radius: 40px; 228 | border: 1px solid #333; 229 | background-color: #f5f5f5; 230 | box-shadow: 0px 0px 3px rgba(0, 0, 0, 0.1); 231 | } 232 | } 233 | 234 | @media(max-width: 700px) { 235 | header .header-text { 236 | padding: 24px; 237 | } 238 | 239 | article { 240 | border-left: none; 241 | border-right: none; 242 | border-radius: 0; 243 | } 244 | } 245 | 246 | @media(max-width: 500px) { 247 | .avatar { 248 | width: 60px; 249 | height: 60px; 250 | margin: 16px 24px; 251 | 252 | img { 253 | border-radius: 30px; 254 | } 255 | } 256 | } 257 | 258 | @media(max-width: 400px) { 259 | .avatar { 260 | width: 50px; 261 | height: 50px; 262 | margin: 16px 20px; 263 | 264 | img { 265 | border-radius: 25px; 266 | } 267 | } 268 | } 269 | 270 | @media(max-width: 360px) { 271 | .admin { 272 | flex-direction: column; 273 | } 274 | 275 | .right { 276 | margin: 16px; 277 | margin-top: 0; 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /src/admin.rs: -------------------------------------------------------------------------------- 1 | use activitystreams::iri_string::types::IriString; 2 | use std::collections::{BTreeMap, BTreeSet}; 3 | use time::OffsetDateTime; 4 | 5 | pub mod client; 6 | pub mod routes; 7 | 8 | #[derive(serde::Deserialize, serde::Serialize)] 9 | pub(crate) struct Domains { 10 | domains: Vec, 11 | } 12 | 13 | #[derive(serde::Deserialize, serde::Serialize)] 14 | pub(crate) struct AllowedDomains { 15 | pub(crate) allowed_domains: Vec, 16 | } 17 | 18 | #[derive(serde::Deserialize, serde::Serialize)] 19 | pub(crate) struct BlockedDomains { 20 | pub(crate) blocked_domains: Vec, 21 | } 22 | 23 | #[derive(serde::Deserialize, serde::Serialize)] 24 | pub(crate) struct ConnectedActors { 25 | pub(crate) connected_actors: Vec, 26 | } 27 | 28 | #[derive(serde::Deserialize, serde::Serialize)] 29 | pub(crate) struct LastSeen { 30 | pub(crate) last_seen: BTreeMap>, 31 | pub(crate) never: Vec, 32 | } 33 | -------------------------------------------------------------------------------- /src/admin/client.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen}, 3 | collector::Snapshot, 4 | config::{AdminUrlKind, Config}, 5 | error::{Error, ErrorKind}, 6 | extractors::XApiToken, 7 | }; 8 | use actix_web::http::header::Header; 9 | use reqwest_middleware::ClientWithMiddleware; 10 | use serde::de::DeserializeOwned; 11 | 12 | pub(crate) async fn allow( 13 | client: &ClientWithMiddleware, 14 | config: &Config, 15 | domains: Vec, 16 | ) -> Result<(), Error> { 17 | post_domains(client, config, domains, AdminUrlKind::Allow).await 18 | } 19 | 20 | pub(crate) async fn disallow( 21 | client: &ClientWithMiddleware, 22 | config: &Config, 23 | domains: Vec, 24 | ) -> Result<(), Error> { 25 | post_domains(client, config, domains, AdminUrlKind::Disallow).await 26 | } 27 | 28 | pub(crate) async fn block( 29 | client: &ClientWithMiddleware, 30 | config: &Config, 31 | domains: Vec, 32 | ) -> Result<(), Error> { 33 | post_domains(client, config, domains, AdminUrlKind::Block).await 34 | } 35 | 36 | pub(crate) async fn unblock( 37 | client: &ClientWithMiddleware, 38 | config: &Config, 39 | domains: Vec, 40 | ) -> Result<(), Error> { 41 | post_domains(client, config, domains, AdminUrlKind::Unblock).await 42 | } 43 | 44 | pub(crate) async fn allowed( 45 | client: &ClientWithMiddleware, 46 | config: &Config, 47 | ) -> Result { 48 | get_results(client, config, AdminUrlKind::Allowed).await 49 | } 50 | 51 | pub(crate) async fn blocked( 52 | client: &ClientWithMiddleware, 53 | config: &Config, 54 | ) -> Result { 55 | get_results(client, config, AdminUrlKind::Blocked).await 56 | } 57 | 58 | pub(crate) async fn connected( 59 | client: &ClientWithMiddleware, 60 | config: &Config, 61 | ) -> Result { 62 | get_results(client, config, AdminUrlKind::Connected).await 63 | } 64 | 65 | pub(crate) async fn stats( 66 | client: &ClientWithMiddleware, 67 | config: &Config, 68 | ) -> Result { 69 | get_results(client, config, AdminUrlKind::Stats).await 70 | } 71 | 72 | pub(crate) async fn last_seen( 73 | client: &ClientWithMiddleware, 74 | config: &Config, 75 | ) -> Result { 76 | get_results(client, config, AdminUrlKind::LastSeen).await 77 | } 78 | 79 | async fn get_results( 80 | client: &ClientWithMiddleware, 81 | config: &Config, 82 | url_kind: AdminUrlKind, 83 | ) -> Result { 84 | let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?; 85 | 86 | let iri = config.generate_admin_url(url_kind); 87 | 88 | let res = client 89 | .get(iri.as_str()) 90 | .header(XApiToken::name(), x_api_token.to_string()) 91 | .send() 92 | .await 93 | .map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?; 94 | 95 | if !res.status().is_success() { 96 | return Err(ErrorKind::Status(iri.to_string(), res.status()).into()); 97 | } 98 | 99 | let t = res 100 | .json() 101 | .await 102 | .map_err(|e| ErrorKind::ReceiveResponse(iri.to_string(), e.to_string()))?; 103 | 104 | Ok(t) 105 | } 106 | 107 | async fn post_domains( 108 | client: &ClientWithMiddleware, 109 | config: &Config, 110 | domains: Vec, 111 | url_kind: AdminUrlKind, 112 | ) -> Result<(), Error> { 113 | let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?; 114 | 115 | let iri = config.generate_admin_url(url_kind); 116 | 117 | let res = client 118 | .post(iri.as_str()) 119 | .header(XApiToken::name(), x_api_token.to_string()) 120 | .json(&Domains { domains }) 121 | .send() 122 | .await 123 | .map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?; 124 | 125 | if !res.status().is_success() { 126 | tracing::warn!("Failed to allow domains"); 127 | } 128 | 129 | Ok(()) 130 | } 131 | -------------------------------------------------------------------------------- /src/admin/routes.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen}, 3 | collector::{MemoryCollector, Snapshot}, 4 | error::Error, 5 | extractors::Admin, 6 | }; 7 | use actix_web::{ 8 | web::{self, Data, Json}, 9 | HttpResponse, 10 | }; 11 | use std::collections::{BTreeMap, BTreeSet}; 12 | use time::OffsetDateTime; 13 | 14 | pub(crate) async fn allow( 15 | admin: Admin, 16 | Json(Domains { domains }): Json, 17 | ) -> Result { 18 | admin.db_ref().add_allows(domains).await?; 19 | 20 | Ok(HttpResponse::NoContent().finish()) 21 | } 22 | 23 | pub(crate) async fn disallow( 24 | admin: Admin, 25 | Json(Domains { domains }): Json, 26 | ) -> Result { 27 | admin.db_ref().remove_allows(domains).await?; 28 | 29 | Ok(HttpResponse::NoContent().finish()) 30 | } 31 | 32 | pub(crate) async fn block( 33 | admin: Admin, 34 | Json(Domains { domains }): Json, 35 | ) -> Result { 36 | admin.db_ref().add_blocks(domains).await?; 37 | 38 | Ok(HttpResponse::NoContent().finish()) 39 | } 40 | 41 | pub(crate) async fn unblock( 42 | admin: Admin, 43 | Json(Domains { domains }): Json, 44 | ) -> Result { 45 | admin.db_ref().remove_blocks(domains).await?; 46 | 47 | Ok(HttpResponse::NoContent().finish()) 48 | } 49 | 50 | pub(crate) async fn allowed(admin: Admin) -> Result, Error> { 51 | let allowed_domains = admin.db_ref().allows().await?; 52 | 53 | Ok(Json(AllowedDomains { allowed_domains })) 54 | } 55 | 56 | pub(crate) async fn blocked(admin: Admin) -> Result, Error> { 57 | let blocked_domains = admin.db_ref().blocks().await?; 58 | 59 | Ok(Json(BlockedDomains { blocked_domains })) 60 | } 61 | 62 | pub(crate) async fn connected(admin: Admin) -> Result, Error> { 63 | let connected_actors = admin.db_ref().connected_ids().await?; 64 | 65 | Ok(Json(ConnectedActors { connected_actors })) 66 | } 67 | 68 | pub(crate) async fn stats( 69 | _admin: Admin, 70 | collector: Data, 71 | ) -> Result, Error> { 72 | Ok(Json(collector.snapshot())) 73 | } 74 | 75 | pub(crate) async fn last_seen(admin: Admin) -> Result, Error> { 76 | let nodes = admin.db_ref().last_seen().await?; 77 | 78 | let mut last_seen: BTreeMap> = BTreeMap::new(); 79 | let mut never = Vec::new(); 80 | 81 | for (domain, datetime) in nodes { 82 | if let Some(datetime) = datetime { 83 | last_seen.entry(datetime).or_default().insert(domain); 84 | } else { 85 | never.push(domain); 86 | } 87 | } 88 | 89 | Ok(Json(LastSeen { last_seen, never })) 90 | } 91 | 92 | pub(crate) async fn get_authority_cfg( 93 | _admin: Admin, 94 | state: Data, 95 | domain: web::Path, 96 | ) -> Result, Error> { 97 | if let Some(cfg) = state.get_authority_cfg(&domain).await { 98 | Ok(Json(cfg)) 99 | } else { 100 | Err(crate::error::ErrorKind::NotFound.into()) 101 | } 102 | } 103 | 104 | pub(crate) async fn get_all_authority_cfg( 105 | _admin: Admin, 106 | state: Data, 107 | ) -> Result>, Error> { 108 | let cfg = state.get_all_authority_cfg().await; 109 | 110 | Ok(Json(cfg)) 111 | } 112 | 113 | pub(crate) async fn set_authority_cfg( 114 | _admin: Admin, 115 | state: Data, 116 | domain: web::Path, 117 | Json(cfg): Json, 118 | ) -> Result { 119 | state.set_authority_cfg(&domain, cfg).await; 120 | 121 | Ok(HttpResponse::NoContent().finish()) 122 | } 123 | 124 | pub(crate) async fn clear_authority_cfg( 125 | _admin: Admin, 126 | state: Data, 127 | domain: web::Path, 128 | ) -> Result { 129 | state.clear_authority_cfg(&domain).await; 130 | 131 | Ok(HttpResponse::NoContent().finish()) 132 | } 133 | -------------------------------------------------------------------------------- /src/apub.rs: -------------------------------------------------------------------------------- 1 | use activitystreams::{ 2 | activity::ActorAndObject, 3 | actor::{Actor, ApActor}, 4 | iri_string::types::IriString, 5 | unparsed::UnparsedMutExt, 6 | }; 7 | use activitystreams_ext::{Ext1, UnparsedExtension}; 8 | 9 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 10 | #[serde(rename_all = "camelCase")] 11 | pub struct PublicKeyInner { 12 | pub id: IriString, 13 | pub owner: IriString, 14 | pub public_key_pem: String, 15 | } 16 | 17 | impl std::fmt::Debug for PublicKeyInner { 18 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 19 | f.debug_struct("PublicKeyInner") 20 | .field("id", &self.id.to_string()) 21 | .field("owner", &self.owner.to_string()) 22 | .field("public_key_pem", &self.public_key_pem) 23 | .finish() 24 | } 25 | } 26 | 27 | #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] 28 | #[serde(rename_all = "camelCase")] 29 | pub struct PublicKey { 30 | pub public_key: PublicKeyInner, 31 | } 32 | 33 | #[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd, serde::Deserialize, serde::Serialize)] 34 | #[serde(rename_all = "PascalCase")] 35 | pub enum ValidTypes { 36 | Accept, 37 | Add, 38 | Announce, 39 | Create, 40 | Delete, 41 | Follow, 42 | Reject, 43 | Remove, 44 | Undo, 45 | Update, 46 | Move, 47 | } 48 | 49 | #[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd, serde::Deserialize, serde::Serialize)] 50 | #[serde(rename_all = "PascalCase")] 51 | pub enum UndoTypes { 52 | Follow, 53 | Announce, 54 | Create, 55 | } 56 | 57 | pub type AcceptedUndoObjects = ActorAndObject; 58 | pub type AcceptedActivities = ActorAndObject; 59 | pub type AcceptedActors = Ext1>, PublicKey>; 60 | 61 | impl UnparsedExtension for PublicKey 62 | where 63 | U: UnparsedMutExt, 64 | { 65 | type Error = serde_json::Error; 66 | 67 | fn try_from_unparsed(unparsed_mut: &mut U) -> Result { 68 | Ok(PublicKey { 69 | public_key: unparsed_mut.remove("publicKey")?, 70 | }) 71 | } 72 | 73 | fn try_into_unparsed(self, unparsed_mut: &mut U) -> Result<(), Self::Error> { 74 | unparsed_mut.insert("publicKey", self.public_key)?; 75 | Ok(()) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/args.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | 3 | #[derive(Debug, Parser)] 4 | #[structopt(name = "relay", about = "An activitypub relay")] 5 | pub(crate) struct Args { 6 | #[arg(short, help = "A list of domains that should be blocked")] 7 | blocks: Vec, 8 | 9 | #[arg(short, help = "A list of domains that should be allowed")] 10 | allowed: Vec, 11 | 12 | #[arg(short, long, help = "Undo allowing or blocking domains")] 13 | undo: bool, 14 | 15 | #[arg(short, long, help = "List allowed and blocked domains")] 16 | list: bool, 17 | 18 | #[arg(short, long, help = "Get statistics from the server")] 19 | stats: bool, 20 | 21 | #[arg( 22 | short, 23 | long, 24 | help = "List domains by when they were last succesfully contacted" 25 | )] 26 | contacted: bool, 27 | } 28 | 29 | impl Args { 30 | pub(crate) fn any(&self) -> bool { 31 | !self.blocks.is_empty() 32 | || !self.allowed.is_empty() 33 | || self.list 34 | || self.stats 35 | || self.contacted 36 | } 37 | 38 | pub(crate) fn new() -> Self { 39 | Self::parse() 40 | } 41 | 42 | pub(crate) fn blocks(&self) -> &[String] { 43 | &self.blocks 44 | } 45 | 46 | pub(crate) fn allowed(&self) -> &[String] { 47 | &self.allowed 48 | } 49 | 50 | pub(crate) fn undo(&self) -> bool { 51 | self.undo 52 | } 53 | 54 | pub(crate) fn list(&self) -> bool { 55 | self.list 56 | } 57 | 58 | pub(crate) fn stats(&self) -> bool { 59 | self.stats 60 | } 61 | 62 | pub(crate) fn contacted(&self) -> bool { 63 | self.contacted 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/build.rs: -------------------------------------------------------------------------------- 1 | use ructe::Ructe; 2 | use std::{fs::File, io::Read, path::Path, process::Command}; 3 | 4 | fn git_info() { 5 | if let Ok(output) = Command::new("git").args(["rev-parse", "HEAD"]).output() { 6 | if output.status.success() { 7 | let git_hash = String::from_utf8_lossy(&output.stdout); 8 | println!("cargo:rustc-env=GIT_HASH={git_hash}"); 9 | println!("cargo:rustc-env=GIT_SHORT_HASH={}", &git_hash[..8]) 10 | } 11 | } 12 | 13 | if let Ok(output) = Command::new("git") 14 | .args(["rev-parse", "--abbrev-ref", "HEAD"]) 15 | .output() 16 | { 17 | if output.status.success() { 18 | let git_branch = String::from_utf8_lossy(&output.stdout); 19 | println!("cargo:rustc-env=GIT_BRANCH={git_branch}"); 20 | } 21 | } 22 | } 23 | 24 | fn version_info() -> Result<(), anyhow::Error> { 25 | let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml"); 26 | 27 | let mut file = File::open(cargo_toml)?; 28 | 29 | let mut cargo_data = String::new(); 30 | file.read_to_string(&mut cargo_data)?; 31 | 32 | let data: toml::Value = toml::from_str(&cargo_data)?; 33 | 34 | if let Some(version) = data["package"]["version"].as_str() { 35 | println!("cargo:rustc-env=PKG_VERSION={version}"); 36 | } 37 | 38 | if let Some(name) = data["package"]["name"].as_str() { 39 | println!("cargo:rustc-env=PKG_NAME={name}"); 40 | } 41 | 42 | Ok(()) 43 | } 44 | 45 | fn main() -> Result<(), anyhow::Error> { 46 | dotenv::dotenv().ok(); 47 | 48 | git_info(); 49 | version_info()?; 50 | 51 | let mut ructe = Ructe::from_env()?; 52 | let mut statics = ructe.statics()?; 53 | statics.add_sass_file("scss/index.scss")?; 54 | ructe.compile_templates("templates")?; 55 | 56 | Ok(()) 57 | } 58 | -------------------------------------------------------------------------------- /src/collector.rs: -------------------------------------------------------------------------------- 1 | use metrics::{Key, Metadata, Recorder, SetRecorderError}; 2 | use metrics_util::{ 3 | registry::{AtomicStorage, GenerationalStorage, Recency, Registry}, 4 | MetricKindMask, Summary, 5 | }; 6 | use quanta::Clock; 7 | use std::{ 8 | collections::{BTreeMap, HashMap}, 9 | sync::{atomic::Ordering, Arc, RwLock}, 10 | time::Duration, 11 | }; 12 | 13 | const SECONDS: u64 = 1; 14 | const MINUTES: u64 = 60 * SECONDS; 15 | const HOURS: u64 = 60 * MINUTES; 16 | const DAYS: u64 = 24 * HOURS; 17 | 18 | type DistributionMap = BTreeMap, Summary>; 19 | 20 | #[derive(Clone)] 21 | pub struct MemoryCollector { 22 | inner: Arc, 23 | } 24 | 25 | struct Inner { 26 | descriptions: RwLock>, 27 | distributions: RwLock>, 28 | recency: Recency, 29 | registry: Registry>, 30 | } 31 | 32 | #[derive(Debug, serde::Deserialize, serde::Serialize)] 33 | struct Counter { 34 | labels: BTreeMap, 35 | value: u64, 36 | } 37 | 38 | impl std::fmt::Display for Counter { 39 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 40 | let labels = self 41 | .labels 42 | .iter() 43 | .map(|(k, v)| format!("{k}: {v}")) 44 | .collect::>() 45 | .join(", "); 46 | 47 | write!(f, "{labels} - {}", self.value) 48 | } 49 | } 50 | 51 | #[derive(Debug, serde::Deserialize, serde::Serialize)] 52 | struct Gauge { 53 | labels: BTreeMap, 54 | value: f64, 55 | } 56 | 57 | impl std::fmt::Display for Gauge { 58 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 59 | let labels = self 60 | .labels 61 | .iter() 62 | .map(|(k, v)| format!("{k}: {v}")) 63 | .collect::>() 64 | .join(", "); 65 | 66 | write!(f, "{labels} - {}", self.value) 67 | } 68 | } 69 | 70 | #[derive(Debug, serde::Deserialize, serde::Serialize)] 71 | struct Histogram { 72 | labels: BTreeMap, 73 | value: Vec<(f64, Option)>, 74 | } 75 | 76 | impl std::fmt::Display for Histogram { 77 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 78 | let labels = self 79 | .labels 80 | .iter() 81 | .map(|(k, v)| format!("{k}: {v}")) 82 | .collect::>() 83 | .join(", "); 84 | 85 | let value = self 86 | .value 87 | .iter() 88 | .map(|(k, v)| { 89 | if let Some(v) = v { 90 | format!("{k}: {v:.6}") 91 | } else { 92 | format!("{k}: None,") 93 | } 94 | }) 95 | .collect::>() 96 | .join(", "); 97 | 98 | write!(f, "{labels} - {value}") 99 | } 100 | } 101 | 102 | #[derive(Debug, serde::Deserialize, serde::Serialize)] 103 | pub(crate) struct Snapshot { 104 | counters: HashMap>, 105 | gauges: HashMap>, 106 | histograms: HashMap>, 107 | } 108 | 109 | const PAIRS: [((&str, &str), &str); 2] = [ 110 | ( 111 | ( 112 | "background-jobs.worker.started", 113 | "background-jobs.worker.finished", 114 | ), 115 | "background-jobs.worker.running", 116 | ), 117 | ( 118 | ( 119 | "background-jobs.job.started", 120 | "background-jobs.job.finished", 121 | ), 122 | "background-jobs.job.running", 123 | ), 124 | ]; 125 | 126 | #[derive(Default)] 127 | struct MergeCounter { 128 | start: Option, 129 | finish: Option, 130 | } 131 | 132 | impl MergeCounter { 133 | fn merge(self) -> Option { 134 | match (self.start, self.finish) { 135 | (Some(start), Some(end)) => Some(Counter { 136 | labels: start.labels, 137 | value: start.value.saturating_sub(end.value), 138 | }), 139 | (Some(only), None) => Some(only), 140 | (None, Some(only)) => Some(Counter { 141 | labels: only.labels, 142 | value: 0, 143 | }), 144 | (None, None) => None, 145 | } 146 | } 147 | } 148 | 149 | impl Snapshot { 150 | pub(crate) fn present(self) { 151 | if !self.counters.is_empty() { 152 | println!("Counters"); 153 | let mut merging = HashMap::new(); 154 | for (key, counters) in self.counters { 155 | if let Some(((start, _), name)) = PAIRS 156 | .iter() 157 | .find(|((start, finish), _)| *start == key || *finish == key) 158 | { 159 | let entry = merging.entry(name).or_insert_with(HashMap::new); 160 | 161 | for counter in counters { 162 | let merge_counter = entry 163 | .entry(counter.labels.clone()) 164 | .or_insert_with(MergeCounter::default); 165 | if key == *start { 166 | merge_counter.start = Some(counter); 167 | } else { 168 | merge_counter.finish = Some(counter); 169 | } 170 | } 171 | 172 | continue; 173 | } 174 | 175 | println!("\t{key}"); 176 | for counter in counters { 177 | println!("\t\t{counter}"); 178 | } 179 | } 180 | 181 | for (key, counters) in merging { 182 | println!("\t{key}"); 183 | 184 | for (_, counter) in counters { 185 | if let Some(counter) = counter.merge() { 186 | println!("\t\t{counter}"); 187 | } 188 | } 189 | } 190 | } 191 | 192 | if !self.gauges.is_empty() { 193 | println!("Gauges"); 194 | for (key, gauges) in self.gauges { 195 | println!("\t{key}"); 196 | 197 | for gauge in gauges { 198 | println!("\t\t{gauge}"); 199 | } 200 | } 201 | } 202 | 203 | if !self.histograms.is_empty() { 204 | println!("Histograms"); 205 | for (key, histograms) in self.histograms { 206 | println!("\t{key}"); 207 | 208 | for histogram in histograms { 209 | println!("\t\t{histogram}"); 210 | } 211 | } 212 | } 213 | } 214 | } 215 | 216 | fn key_to_parts(key: &Key) -> (String, Vec<(String, String)>) { 217 | let labels = key 218 | .labels() 219 | .map(|label| (label.key().to_string(), label.value().to_string())) 220 | .collect(); 221 | let name = key.name().to_string(); 222 | (name, labels) 223 | } 224 | 225 | impl Inner { 226 | fn snapshot_counters(&self) -> HashMap> { 227 | let mut counters = HashMap::new(); 228 | 229 | for (key, counter) in self.registry.get_counter_handles() { 230 | let gen = counter.get_generation(); 231 | if !self.recency.should_store_counter(&key, gen, &self.registry) { 232 | continue; 233 | } 234 | 235 | let (name, labels) = key_to_parts(&key); 236 | let value = counter.get_inner().load(Ordering::Acquire); 237 | counters.entry(name).or_insert_with(Vec::new).push(Counter { 238 | labels: labels.into_iter().collect(), 239 | value, 240 | }); 241 | } 242 | 243 | counters 244 | } 245 | 246 | fn snapshot_gauges(&self) -> HashMap> { 247 | let mut gauges = HashMap::new(); 248 | 249 | for (key, gauge) in self.registry.get_gauge_handles() { 250 | let gen = gauge.get_generation(); 251 | if !self.recency.should_store_gauge(&key, gen, &self.registry) { 252 | continue; 253 | } 254 | 255 | let (name, labels) = key_to_parts(&key); 256 | let value = f64::from_bits(gauge.get_inner().load(Ordering::Acquire)); 257 | gauges.entry(name).or_insert_with(Vec::new).push(Gauge { 258 | labels: labels.into_iter().collect(), 259 | value, 260 | }) 261 | } 262 | 263 | gauges 264 | } 265 | 266 | fn snapshot_histograms(&self) -> HashMap> { 267 | for (key, histogram) in self.registry.get_histogram_handles() { 268 | let gen = histogram.get_generation(); 269 | let (name, labels) = key_to_parts(&key); 270 | 271 | if !self 272 | .recency 273 | .should_store_histogram(&key, gen, &self.registry) 274 | { 275 | let mut d = self.distributions.write().unwrap(); 276 | let delete_by_name = if let Some(by_name) = d.get_mut(&name) { 277 | by_name.remove(&labels); 278 | by_name.is_empty() 279 | } else { 280 | false 281 | }; 282 | drop(d); 283 | 284 | if delete_by_name { 285 | self.descriptions.write().unwrap().remove(&name); 286 | } 287 | 288 | continue; 289 | } 290 | 291 | let mut d = self.distributions.write().unwrap(); 292 | let outer_entry = d.entry(name.clone()).or_default(); 293 | 294 | let entry = outer_entry 295 | .entry(labels) 296 | .or_insert_with(Summary::with_defaults); 297 | 298 | histogram.get_inner().clear_with(|samples| { 299 | for sample in samples { 300 | entry.add(*sample); 301 | } 302 | }) 303 | } 304 | 305 | let d = self.distributions.read().unwrap().clone(); 306 | d.into_iter() 307 | .map(|(key, value)| { 308 | ( 309 | key, 310 | value 311 | .into_iter() 312 | .map(|(labels, summary)| Histogram { 313 | labels: labels.into_iter().collect(), 314 | value: [0.001, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99, 1.0] 315 | .into_iter() 316 | .map(|q| (q, summary.quantile(q))) 317 | .collect(), 318 | }) 319 | .collect(), 320 | ) 321 | }) 322 | .collect() 323 | } 324 | 325 | fn snapshot(&self) -> Snapshot { 326 | Snapshot { 327 | counters: self.snapshot_counters(), 328 | gauges: self.snapshot_gauges(), 329 | histograms: self.snapshot_histograms(), 330 | } 331 | } 332 | } 333 | 334 | impl MemoryCollector { 335 | pub(crate) fn new() -> Self { 336 | MemoryCollector { 337 | inner: Arc::new(Inner { 338 | descriptions: Default::default(), 339 | distributions: Default::default(), 340 | recency: Recency::new( 341 | Clock::new(), 342 | MetricKindMask::ALL, 343 | Some(Duration::from_secs(5 * DAYS)), 344 | ), 345 | registry: Registry::new(GenerationalStorage::atomic()), 346 | }), 347 | } 348 | } 349 | 350 | pub(crate) fn snapshot(&self) -> Snapshot { 351 | self.inner.snapshot() 352 | } 353 | 354 | fn add_description_if_missing( 355 | &self, 356 | key: &metrics::KeyName, 357 | description: metrics::SharedString, 358 | ) { 359 | let mut d = self.inner.descriptions.write().unwrap(); 360 | d.entry(key.as_str().to_owned()).or_insert(description); 361 | } 362 | 363 | pub(crate) fn install(&self) -> Result<(), SetRecorderError> { 364 | metrics::set_global_recorder(self.clone()) 365 | } 366 | } 367 | 368 | impl Recorder for MemoryCollector { 369 | fn describe_counter( 370 | &self, 371 | key: metrics::KeyName, 372 | _: Option, 373 | description: metrics::SharedString, 374 | ) { 375 | self.add_description_if_missing(&key, description) 376 | } 377 | 378 | fn describe_gauge( 379 | &self, 380 | key: metrics::KeyName, 381 | _: Option, 382 | description: metrics::SharedString, 383 | ) { 384 | self.add_description_if_missing(&key, description) 385 | } 386 | 387 | fn describe_histogram( 388 | &self, 389 | key: metrics::KeyName, 390 | _: Option, 391 | description: metrics::SharedString, 392 | ) { 393 | self.add_description_if_missing(&key, description) 394 | } 395 | 396 | fn register_counter(&self, key: &Key, _: &Metadata<'_>) -> metrics::Counter { 397 | self.inner 398 | .registry 399 | .get_or_create_counter(key, |c| c.clone().into()) 400 | } 401 | 402 | fn register_gauge(&self, key: &Key, _: &Metadata<'_>) -> metrics::Gauge { 403 | self.inner 404 | .registry 405 | .get_or_create_gauge(key, |c| c.clone().into()) 406 | } 407 | 408 | fn register_histogram(&self, key: &Key, _: &Metadata<'_>) -> metrics::Histogram { 409 | self.inner 410 | .registry 411 | .get_or_create_histogram(key, |c| c.clone().into()) 412 | } 413 | } 414 | -------------------------------------------------------------------------------- /src/data.rs: -------------------------------------------------------------------------------- 1 | mod actor; 2 | mod last_online; 3 | mod media; 4 | mod node; 5 | mod state; 6 | 7 | pub(crate) use actor::ActorCache; 8 | pub(crate) use last_online::LastOnline; 9 | pub(crate) use media::MediaCache; 10 | pub(crate) use node::{Node, NodeCache, NodeConfig}; 11 | pub(crate) use state::State; 12 | -------------------------------------------------------------------------------- /src/data/actor.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | apub::AcceptedActors, 3 | db::{Actor, Db}, 4 | error::{Error, ErrorKind}, 5 | requests::{BreakerStrategy, Requests}, 6 | }; 7 | use activitystreams::{iri_string::types::IriString, prelude::*}; 8 | use std::time::{Duration, SystemTime}; 9 | 10 | const REFETCH_DURATION: Duration = Duration::from_secs(60 * 30); 11 | 12 | #[derive(Debug)] 13 | pub enum MaybeCached { 14 | Cached(T), 15 | Fetched(T), 16 | } 17 | 18 | impl MaybeCached { 19 | pub(crate) fn is_cached(&self) -> bool { 20 | matches!(self, MaybeCached::Cached(_)) 21 | } 22 | 23 | pub(crate) fn into_inner(self) -> T { 24 | match self { 25 | MaybeCached::Cached(t) | MaybeCached::Fetched(t) => t, 26 | } 27 | } 28 | } 29 | 30 | #[derive(Clone, Debug)] 31 | pub struct ActorCache { 32 | db: Db, 33 | } 34 | 35 | impl ActorCache { 36 | pub(crate) fn new(db: Db) -> Self { 37 | ActorCache { db } 38 | } 39 | 40 | #[tracing::instrument(level = "debug" name = "Get Actor", skip_all, fields(id = id.to_string().as_str()))] 41 | pub(crate) async fn get( 42 | &self, 43 | id: &IriString, 44 | requests: &Requests, 45 | ) -> Result, Error> { 46 | if let Some(actor) = self.db.actor(id.clone()).await? { 47 | if actor.saved_at + REFETCH_DURATION > SystemTime::now() { 48 | return Ok(MaybeCached::Cached(actor)); 49 | } 50 | } 51 | 52 | self.get_no_cache(id, requests) 53 | .await 54 | .map(MaybeCached::Fetched) 55 | } 56 | 57 | #[tracing::instrument(level = "debug", name = "Add Connection", skip(self))] 58 | pub(crate) async fn add_connection(&self, actor: Actor) -> Result<(), Error> { 59 | self.db.add_connection(actor.id.clone()).await?; 60 | self.db.save_actor(actor).await 61 | } 62 | 63 | #[tracing::instrument(level = "debug", name = "Remove Connection", skip(self))] 64 | pub(crate) async fn remove_connection(&self, actor: &Actor) -> Result<(), Error> { 65 | self.db.remove_connection(actor.id.clone()).await 66 | } 67 | 68 | #[tracing::instrument(level = "debug", name = "Fetch remote actor", skip_all, fields(id = id.to_string().as_str()))] 69 | pub(crate) async fn get_no_cache( 70 | &self, 71 | id: &IriString, 72 | requests: &Requests, 73 | ) -> Result { 74 | let accepted_actor = requests 75 | .fetch::(id, BreakerStrategy::Require2XX) 76 | .await?; 77 | 78 | let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?; 79 | let accepted_actor_id = accepted_actor 80 | .id(input_authority.host(), input_authority.port())? 81 | .ok_or(ErrorKind::MissingId)?; 82 | 83 | let inbox = get_inbox(&accepted_actor)?.clone(); 84 | 85 | let actor = Actor { 86 | id: accepted_actor_id.clone(), 87 | public_key: accepted_actor.ext_one.public_key.public_key_pem, 88 | public_key_id: accepted_actor.ext_one.public_key.id, 89 | inbox, 90 | saved_at: SystemTime::now(), 91 | }; 92 | 93 | self.db.save_actor(actor.clone()).await?; 94 | 95 | Ok(actor) 96 | } 97 | } 98 | 99 | fn get_inbox(actor: &AcceptedActors) -> Result<&IriString, Error> { 100 | Ok(actor 101 | .endpoints()? 102 | .and_then(|e| e.shared_inbox.as_ref()) 103 | .unwrap_or(actor.inbox()?)) 104 | } 105 | -------------------------------------------------------------------------------- /src/data/last_online.rs: -------------------------------------------------------------------------------- 1 | use activitystreams::iri_string::types::IriStr; 2 | use std::{collections::HashMap, sync::Mutex}; 3 | use time::OffsetDateTime; 4 | 5 | pub(crate) struct LastOnline { 6 | domains: Mutex>, 7 | } 8 | 9 | impl LastOnline { 10 | pub(crate) fn mark_seen(&self, iri: &IriStr) { 11 | if let Some(authority) = iri.authority_str() { 12 | self.domains 13 | .lock() 14 | .unwrap() 15 | .insert(authority.to_string(), OffsetDateTime::now_utc()); 16 | } 17 | } 18 | 19 | pub(crate) fn take(&self) -> HashMap { 20 | std::mem::take(&mut *self.domains.lock().unwrap()) 21 | } 22 | 23 | pub(crate) fn empty() -> Self { 24 | Self { 25 | domains: Mutex::new(HashMap::default()), 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/data/media.rs: -------------------------------------------------------------------------------- 1 | use crate::{db::Db, error::Error}; 2 | use activitystreams::iri_string::types::IriString; 3 | use uuid::Uuid; 4 | 5 | #[derive(Clone, Debug)] 6 | pub struct MediaCache { 7 | db: Db, 8 | } 9 | 10 | impl MediaCache { 11 | pub(crate) fn new(db: Db) -> Self { 12 | MediaCache { db } 13 | } 14 | 15 | #[tracing::instrument(level = "debug", name = "Get media uuid", skip_all, fields(url = url.to_string().as_str()))] 16 | pub(crate) async fn get_uuid(&self, url: IriString) -> Result, Error> { 17 | self.db.media_id(url).await 18 | } 19 | 20 | #[tracing::instrument(level = "debug", name = "Get media url", skip(self))] 21 | pub(crate) async fn get_url(&self, uuid: Uuid) -> Result, Error> { 22 | self.db.media_url(uuid).await 23 | } 24 | 25 | #[tracing::instrument(name = "Store media url", skip_all, fields(url = url.to_string().as_str()))] 26 | pub(crate) async fn store_url(&self, url: IriString) -> Result { 27 | let uuid = Uuid::new_v4(); 28 | 29 | self.db.save_url(url, uuid).await?; 30 | 31 | Ok(uuid) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/data/node.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | db::{Contact, Db, Info, Instance}, 3 | error::{Error, ErrorKind}, 4 | }; 5 | use activitystreams::{iri, iri_string::types::IriString}; 6 | use std::time::{Duration, SystemTime}; 7 | use std::collections::HashSet; 8 | 9 | #[derive(Clone, Debug)] 10 | pub struct NodeCache { 11 | db: Db, 12 | } 13 | 14 | #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] 15 | pub struct NodeConfig { 16 | /// Probaility in prob * 256 form. 17 | pub(crate) probability: u8, 18 | /// Enable probabilitistic delivery. 19 | pub(crate) enable_probability: bool, 20 | /// Set of authorities to filter. 21 | pub(crate) authority_set: HashSet, 22 | /// Apply authority_set as allowlist instead of denylist. 23 | pub(crate) is_allowlist: bool, 24 | /// Receive only mode. Silently drop all incoming activities. 25 | pub(crate) receive_only: bool, 26 | } 27 | 28 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 29 | pub struct Node { 30 | pub(crate) base: IriString, 31 | pub(crate) info: Option, 32 | pub(crate) instance: Option, 33 | pub(crate) contact: Option, 34 | } 35 | 36 | impl std::fmt::Debug for Node { 37 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 38 | f.debug_struct("Node") 39 | .field("base", &self.base.to_string()) 40 | .field("info", &self.info) 41 | .field("instance", &self.instance) 42 | .field("contact", &self.contact) 43 | .finish() 44 | } 45 | } 46 | 47 | impl NodeCache { 48 | pub(crate) fn new(db: Db) -> Self { 49 | NodeCache { db } 50 | } 51 | 52 | #[tracing::instrument(level = "debug", name = "Get nodes", skip(self))] 53 | pub(crate) async fn nodes(&self) -> Result, Error> { 54 | let infos = self.db.connected_info().await?; 55 | let instances = self.db.connected_instance().await?; 56 | let contacts = self.db.connected_contact().await?; 57 | 58 | let vec = self 59 | .db 60 | .connected_ids() 61 | .await? 62 | .into_iter() 63 | .map(move |actor_id| { 64 | let info = infos.get(&actor_id).cloned(); 65 | let instance = instances.get(&actor_id).cloned(); 66 | let contact = contacts.get(&actor_id).cloned(); 67 | 68 | Node::new(actor_id).map(|node| node.info(info).instance(instance).contact(contact)) 69 | }) 70 | .collect::, Error>>()?; 71 | 72 | Ok(vec) 73 | } 74 | 75 | #[tracing::instrument(level = "debug", name = "Is NodeInfo Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))] 76 | pub(crate) async fn is_nodeinfo_outdated(&self, actor_id: IriString) -> bool { 77 | self.db 78 | .info(actor_id) 79 | .await 80 | .map(|opt| opt.map(|info| info.outdated()).unwrap_or(true)) 81 | .unwrap_or(true) 82 | } 83 | 84 | #[tracing::instrument(level = "debug", name = "Is Contact Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))] 85 | pub(crate) async fn is_contact_outdated(&self, actor_id: IriString) -> bool { 86 | self.db 87 | .contact(actor_id) 88 | .await 89 | .map(|opt| opt.map(|contact| contact.outdated()).unwrap_or(true)) 90 | .unwrap_or(true) 91 | } 92 | 93 | #[tracing::instrument(level = "debug", name = "Is Instance Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))] 94 | pub(crate) async fn is_instance_outdated(&self, actor_id: IriString) -> bool { 95 | self.db 96 | .instance(actor_id) 97 | .await 98 | .map(|opt| opt.map(|instance| instance.outdated()).unwrap_or(true)) 99 | .unwrap_or(true) 100 | } 101 | 102 | #[tracing::instrument(level = "debug", name = "Save node info", skip_all, fields(actor_id = actor_id.to_string().as_str(), software, version, reg))] 103 | pub(crate) async fn set_info( 104 | &self, 105 | actor_id: IriString, 106 | software: String, 107 | version: String, 108 | reg: bool, 109 | ) -> Result<(), Error> { 110 | self.db 111 | .save_info( 112 | actor_id, 113 | Info { 114 | software, 115 | version, 116 | reg, 117 | updated: SystemTime::now(), 118 | }, 119 | ) 120 | .await 121 | } 122 | 123 | #[tracing::instrument( 124 | level = "debug", 125 | name = "Save instance info", 126 | skip_all, 127 | fields( 128 | actor_id = actor_id.to_string().as_str(), 129 | title, 130 | description, 131 | version, 132 | reg, 133 | requires_approval 134 | ) 135 | )] 136 | pub(crate) async fn set_instance( 137 | &self, 138 | actor_id: IriString, 139 | title: String, 140 | description: String, 141 | version: String, 142 | reg: bool, 143 | requires_approval: bool, 144 | ) -> Result<(), Error> { 145 | self.db 146 | .save_instance( 147 | actor_id, 148 | Instance { 149 | title, 150 | description, 151 | version, 152 | reg, 153 | requires_approval, 154 | updated: SystemTime::now(), 155 | }, 156 | ) 157 | .await 158 | } 159 | 160 | #[tracing::instrument( 161 | level = "debug", 162 | name = "Save contact info", 163 | skip_all, 164 | fields( 165 | actor_id = actor_id.to_string().as_str(), 166 | username, 167 | display_name, 168 | url = url.to_string().as_str(), 169 | avatar = avatar.to_string().as_str() 170 | ) 171 | )] 172 | pub(crate) async fn set_contact( 173 | &self, 174 | actor_id: IriString, 175 | username: String, 176 | display_name: String, 177 | url: IriString, 178 | avatar: IriString, 179 | ) -> Result<(), Error> { 180 | self.db 181 | .save_contact( 182 | actor_id, 183 | Contact { 184 | username, 185 | display_name, 186 | url, 187 | avatar, 188 | updated: SystemTime::now(), 189 | }, 190 | ) 191 | .await 192 | } 193 | } 194 | 195 | impl Node { 196 | fn new(url: IriString) -> Result { 197 | let authority = url.authority_str().ok_or(ErrorKind::MissingDomain)?; 198 | let scheme = url.scheme_str(); 199 | 200 | let base = iri!(format!("{scheme}://{authority}")); 201 | 202 | Ok(Node { 203 | base, 204 | info: None, 205 | instance: None, 206 | contact: None, 207 | }) 208 | } 209 | 210 | fn info(mut self, info: Option) -> Self { 211 | self.info = info; 212 | self 213 | } 214 | 215 | fn instance(mut self, instance: Option) -> Self { 216 | self.instance = instance; 217 | self 218 | } 219 | 220 | fn contact(mut self, contact: Option) -> Self { 221 | self.contact = contact; 222 | self 223 | } 224 | } 225 | 226 | static TEN_MINUTES: Duration = Duration::from_secs(60 * 10); 227 | 228 | impl Info { 229 | pub(crate) fn outdated(&self) -> bool { 230 | self.updated + TEN_MINUTES < SystemTime::now() 231 | } 232 | } 233 | 234 | impl Instance { 235 | pub(crate) fn outdated(&self) -> bool { 236 | self.updated + TEN_MINUTES < SystemTime::now() 237 | } 238 | } 239 | 240 | impl Contact { 241 | pub(crate) fn outdated(&self) -> bool { 242 | self.updated + TEN_MINUTES < SystemTime::now() 243 | } 244 | } 245 | -------------------------------------------------------------------------------- /src/data/state.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | data::NodeCache, 3 | db::Db, 4 | error::Error, 5 | requests::{Breakers, Requests}, 6 | spawner::Spawner, 7 | }; 8 | use activitystreams::iri_string::types::IriString; 9 | use actix_web::web; 10 | use lru::LruCache; 11 | use rand::thread_rng; 12 | use reqwest_middleware::ClientWithMiddleware; 13 | use rsa::{RsaPrivateKey, RsaPublicKey}; 14 | use std::sync::{Arc, RwLock}; 15 | use std::collections::HashMap; 16 | 17 | use super::LastOnline; 18 | use super::node::NodeConfig; 19 | 20 | #[derive(Clone)] 21 | pub struct State { 22 | pub(crate) requests: Requests, 23 | pub(crate) public_key: RsaPublicKey, 24 | object_cache: Arc>>, 25 | pub(crate) node_cache: NodeCache, 26 | pub(crate) node_config: Arc>>, 27 | breakers: Breakers, 28 | pub(crate) last_online: Arc, 29 | pub(crate) db: Db, 30 | } 31 | 32 | impl std::fmt::Debug for State { 33 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 34 | f.debug_struct("State") 35 | .field("node_cache", &self.node_cache) 36 | .field("breakers", &self.breakers) 37 | .field("db", &self.db) 38 | .finish() 39 | } 40 | } 41 | 42 | impl State { 43 | #[tracing::instrument( 44 | level = "debug", 45 | name = "Get inboxes for other domains", 46 | skip_all, 47 | fields( 48 | existing_inbox = existing_inbox.to_string().as_str(), 49 | authority 50 | ) 51 | )] 52 | pub(crate) async fn inboxes_without( 53 | &self, 54 | existing_inbox: &IriString, 55 | authority: &str, 56 | ) -> Result, Error> { 57 | Ok(self 58 | .db 59 | .inboxes() 60 | .await? 61 | .iter() 62 | .filter_map(|inbox| { 63 | if let Some(authority_str) = inbox.authority_str() { 64 | if inbox != existing_inbox && authority_str != authority { 65 | return Some(inbox.clone()); 66 | } 67 | } 68 | 69 | None 70 | }) 71 | .collect()) 72 | } 73 | 74 | pub(crate) async fn set_authority_cfg(&self, authority: &str, cfg: NodeConfig) { 75 | self.node_config.write().unwrap().insert(authority.to_string(), cfg); 76 | } 77 | 78 | pub(crate) async fn clear_authority_cfg(&self, authority: &str) { 79 | self.node_config.write().unwrap().remove(authority); 80 | } 81 | 82 | pub(crate) async fn get_authority_cfg(&self, authority: &str) -> Option { 83 | self.node_config.read().unwrap().get(authority).cloned() 84 | } 85 | 86 | pub(crate) async fn get_all_authority_cfg(&self) -> HashMap { 87 | self.node_config.read().unwrap().clone() 88 | } 89 | 90 | pub(crate) fn is_cached(&self, object_id: &IriString) -> bool { 91 | self.object_cache.read().unwrap().contains(object_id) 92 | } 93 | 94 | pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) { 95 | self.object_cache.write().unwrap().put(object_id, actor_id); 96 | } 97 | 98 | pub(crate) fn is_connected(&self, iri: &IriString) -> bool { 99 | self.breakers.should_try(iri) 100 | } 101 | 102 | #[tracing::instrument(level = "debug", name = "Building state", skip_all)] 103 | pub(crate) async fn build( 104 | db: Db, 105 | key_id: String, 106 | spawner: Spawner, 107 | client: ClientWithMiddleware, 108 | node_config: HashMap, 109 | ) -> Result { 110 | let private_key = if let Ok(Some(key)) = db.private_key().await { 111 | tracing::debug!("Using existing key"); 112 | key 113 | } else { 114 | tracing::info!("Generating new keys"); 115 | let key = web::block(move || { 116 | let mut rng = thread_rng(); 117 | RsaPrivateKey::new(&mut rng, 4096) 118 | }) 119 | .await??; 120 | 121 | db.update_private_key(&key).await?; 122 | 123 | key 124 | }; 125 | 126 | let public_key = private_key.to_public_key(); 127 | 128 | let breakers = Breakers::default(); 129 | let last_online = Arc::new(LastOnline::empty()); 130 | 131 | let requests = Requests::new( 132 | key_id, 133 | private_key, 134 | breakers.clone(), 135 | last_online.clone(), 136 | spawner, 137 | client, 138 | ); 139 | 140 | let state = State { 141 | requests, 142 | public_key, 143 | object_cache: Arc::new(RwLock::new(LruCache::new( 144 | (1024 * 8).try_into().expect("nonzero"), 145 | ))), 146 | node_cache: NodeCache::new(db.clone()), 147 | node_config: Arc::new(RwLock::new(node_config)), 148 | breakers, 149 | db, 150 | last_online, 151 | }; 152 | 153 | Ok(state) 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use activitystreams::checked::CheckError; 2 | use actix_web::{ 3 | error::{BlockingError, ResponseError}, 4 | http::StatusCode, 5 | HttpResponse, 6 | }; 7 | use http_signature_normalization_reqwest::SignError; 8 | use std::{convert::Infallible, fmt::Debug, io}; 9 | use tokio::task::JoinError; 10 | use tracing_error::SpanTrace; 11 | 12 | pub(crate) struct Error { 13 | context: String, 14 | kind: ErrorKind, 15 | } 16 | 17 | impl Error { 18 | pub(crate) fn is_breaker(&self) -> bool { 19 | matches!(self.kind, ErrorKind::Breaker) 20 | } 21 | 22 | pub(crate) fn is_not_found(&self) -> bool { 23 | matches!(self.kind, ErrorKind::Status(_, StatusCode::NOT_FOUND)) 24 | } 25 | 26 | pub(crate) fn is_bad_request(&self) -> bool { 27 | matches!(self.kind, ErrorKind::Status(_, StatusCode::BAD_REQUEST)) 28 | } 29 | 30 | pub(crate) fn is_gone(&self) -> bool { 31 | matches!(self.kind, ErrorKind::Status(_, StatusCode::GONE)) 32 | } 33 | 34 | pub(crate) fn is_malformed_json(&self) -> bool { 35 | matches!(self.kind, ErrorKind::Json(_)) 36 | } 37 | } 38 | 39 | impl std::fmt::Debug for Error { 40 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 41 | writeln!(f, "{:?}", self.kind) 42 | } 43 | } 44 | 45 | impl std::fmt::Display for Error { 46 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 47 | writeln!(f, "{}", self.kind)?; 48 | std::fmt::Display::fmt(&self.context, f) 49 | } 50 | } 51 | 52 | impl std::error::Error for Error { 53 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 54 | self.kind.source() 55 | } 56 | } 57 | 58 | impl From for Error 59 | where 60 | ErrorKind: From, 61 | { 62 | fn from(error: T) -> Self { 63 | Error { 64 | context: SpanTrace::capture().to_string(), 65 | kind: error.into(), 66 | } 67 | } 68 | } 69 | 70 | #[derive(Debug, thiserror::Error)] 71 | pub(crate) enum ErrorKind { 72 | #[error("Error queueing job, {0}")] 73 | Queue(anyhow::Error), 74 | 75 | #[error("Error in configuration, {0}")] 76 | Config(#[from] config::ConfigError), 77 | 78 | #[error("Couldn't parse key, {0}")] 79 | Pkcs8(#[from] rsa::pkcs8::Error), 80 | 81 | #[error("Couldn't encode public key, {0}")] 82 | Spki(#[from] rsa::pkcs8::spki::Error), 83 | 84 | #[error("Couldn't sign request")] 85 | SignRequest, 86 | 87 | #[error("Couldn't make request")] 88 | Reqwest(#[from] reqwest::Error), 89 | 90 | #[error("Couldn't build client")] 91 | ReqwestMiddleware(#[from] reqwest_middleware::Error), 92 | 93 | #[error("Couldn't parse IRI, {0}")] 94 | ParseIri(#[from] activitystreams::iri_string::validate::Error), 95 | 96 | #[error("Couldn't normalize IRI, {0}")] 97 | NormalizeIri(#[from] std::collections::TryReserveError), 98 | 99 | #[error("Couldn't perform IO, {0}")] 100 | Io(#[from] io::Error), 101 | 102 | #[error("Couldn't sign string, {0}")] 103 | Rsa(rsa::errors::Error), 104 | 105 | #[error("Couldn't use db, {0}")] 106 | Sled(#[from] sled::Error), 107 | 108 | #[error("Couldn't do the json thing, {0}")] 109 | Json(#[from] serde_json::Error), 110 | 111 | #[error("Couldn't sign request, {0}")] 112 | Sign(#[from] SignError), 113 | 114 | #[error("Couldn't sign digest")] 115 | Signature(#[from] rsa::signature::Error), 116 | 117 | #[error("Couldn't verify signature")] 118 | VerifySignature, 119 | 120 | #[error("Failed to encode key der")] 121 | DerEncode, 122 | 123 | #[error("Couldn't parse the signature header")] 124 | HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue), 125 | 126 | #[error("Couldn't decode base64")] 127 | Base64(#[from] base64::DecodeError), 128 | 129 | #[error("Actor ({0}), or Actor's server, is not subscribed")] 130 | NotSubscribed(String), 131 | 132 | #[error("Actor is not allowed, {0}")] 133 | NotAllowed(String), 134 | 135 | #[error("Cannot make decisions for foreign actor, {0}")] 136 | WrongActor(String), 137 | 138 | #[error("Actor ({0}) tried to submit another actor's ({1}) payload")] 139 | BadActor(String, String), 140 | 141 | #[error("Signature verification is required, but no signature was given")] 142 | NoSignature(Option), 143 | 144 | #[error("Wrong ActivityPub kind, {0}")] 145 | Kind(String), 146 | 147 | #[error("Too many CPUs, {0}")] 148 | CpuCount(#[from] std::num::TryFromIntError), 149 | 150 | #[error("{0}")] 151 | HostMismatch(#[from] CheckError), 152 | 153 | #[error("Couldn't flush buffer")] 154 | FlushBuffer, 155 | 156 | #[error("Invalid algorithm provided to verifier, {0}")] 157 | Algorithm(String), 158 | 159 | #[error("Object has already been relayed")] 160 | Duplicate, 161 | 162 | #[error("Couldn't send request to {0}, {1}")] 163 | SendRequest(String, String), 164 | 165 | #[error("Couldn't receive request response from {0}, {1}")] 166 | ReceiveResponse(String, String), 167 | 168 | #[error("Response from {0} has invalid status code, {1}")] 169 | Status(String, StatusCode), 170 | 171 | #[error("Expected an Object, found something else")] 172 | ObjectFormat, 173 | 174 | #[error("Expected a single object, found array")] 175 | ObjectCount, 176 | 177 | #[error("Input is missing a 'type' field")] 178 | MissingKind, 179 | 180 | #[error("Input is missing a 'id' field")] 181 | MissingId, 182 | 183 | #[error("IriString is missing a domain")] 184 | MissingDomain, 185 | 186 | #[error("URI is missing domain field")] 187 | Domain, 188 | 189 | #[error("Blocking operation was canceled")] 190 | Canceled, 191 | 192 | #[error("Not trying request due to failed breaker")] 193 | Breaker, 194 | 195 | #[error("Failed to extract fields from {0}")] 196 | Extract(&'static str), 197 | 198 | #[error("No API Token supplied")] 199 | MissingApiToken, 200 | 201 | #[error("Resource not found")] 202 | NotFound, 203 | } 204 | 205 | impl ResponseError for Error { 206 | fn status_code(&self) -> StatusCode { 207 | match self.kind { 208 | ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => { 209 | StatusCode::FORBIDDEN 210 | } 211 | ErrorKind::NotSubscribed(_) => StatusCode::UNAUTHORIZED, 212 | ErrorKind::Duplicate => StatusCode::ACCEPTED, 213 | ErrorKind::NotFound => StatusCode::NOT_FOUND, 214 | ErrorKind::Kind(_) 215 | | ErrorKind::MissingKind 216 | | ErrorKind::MissingId 217 | | ErrorKind::ObjectCount 218 | | ErrorKind::NoSignature(_) => StatusCode::BAD_REQUEST, 219 | _ => StatusCode::INTERNAL_SERVER_ERROR, 220 | } 221 | } 222 | 223 | fn error_response(&self) -> HttpResponse { 224 | HttpResponse::build(self.status_code()) 225 | .insert_header(("Content-Type", "application/activity+json")) 226 | .body( 227 | serde_json::to_string(&serde_json::json!({ 228 | "error": self.kind.to_string(), 229 | })) 230 | .unwrap_or_else(|_| "{}".to_string()), 231 | ) 232 | } 233 | } 234 | 235 | impl From for ErrorKind { 236 | fn from(_: BlockingError) -> Self { 237 | ErrorKind::Canceled 238 | } 239 | } 240 | 241 | impl From for ErrorKind { 242 | fn from(_: JoinError) -> Self { 243 | ErrorKind::Canceled 244 | } 245 | } 246 | 247 | impl From for ErrorKind { 248 | fn from(i: Infallible) -> Self { 249 | match i {} 250 | } 251 | } 252 | 253 | impl From for ErrorKind { 254 | fn from(e: rsa::errors::Error) -> Self { 255 | ErrorKind::Rsa(e) 256 | } 257 | } 258 | 259 | impl From for ErrorKind { 260 | fn from(_: http_signature_normalization_actix::Canceled) -> Self { 261 | Self::Canceled 262 | } 263 | } 264 | 265 | impl From for ErrorKind { 266 | fn from(_: http_signature_normalization_reqwest::Canceled) -> Self { 267 | Self::Canceled 268 | } 269 | } 270 | -------------------------------------------------------------------------------- /src/extractors.rs: -------------------------------------------------------------------------------- 1 | use actix_web::{ 2 | dev::Payload, 3 | error::ParseError, 4 | http::{ 5 | header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue}, 6 | StatusCode, 7 | }, 8 | web::Data, 9 | FromRequest, HttpMessage, HttpRequest, HttpResponse, ResponseError, 10 | }; 11 | use bcrypt::{BcryptError, DEFAULT_COST}; 12 | use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn}; 13 | use std::{convert::Infallible, str::FromStr, time::Instant}; 14 | use tracing_error::SpanTrace; 15 | 16 | use crate::{db::Db, future::LocalBoxFuture, spawner::Spawner}; 17 | 18 | #[derive(Clone)] 19 | pub(crate) struct AdminConfig { 20 | hashed_api_token: String, 21 | } 22 | 23 | impl AdminConfig { 24 | pub(crate) fn build(api_token: &str) -> Result { 25 | Ok(AdminConfig { 26 | hashed_api_token: bcrypt::hash(api_token, DEFAULT_COST).map_err(Error::bcrypt_hash)?, 27 | }) 28 | } 29 | 30 | fn verify(&self, token: XApiToken) -> Result { 31 | bcrypt::verify(token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify) 32 | } 33 | } 34 | 35 | pub(crate) struct Admin { 36 | db: Data, 37 | } 38 | 39 | type PrepareTuple = (Data, Data, Data, XApiToken); 40 | 41 | impl Admin { 42 | fn prepare_verify(req: &HttpRequest) -> Result { 43 | let hashed_api_token = req 44 | .app_data::>() 45 | .ok_or_else(Error::missing_config)? 46 | .clone(); 47 | 48 | let x_api_token = XApiToken::parse(req).map_err(Error::parse_header)?; 49 | 50 | let db = req 51 | .app_data::>() 52 | .ok_or_else(Error::missing_db)? 53 | .clone(); 54 | 55 | let spawner = req 56 | .app_data::>() 57 | .ok_or_else(Error::missing_spawner)? 58 | .clone(); 59 | 60 | Ok((db, hashed_api_token, spawner, x_api_token)) 61 | } 62 | 63 | #[tracing::instrument(level = "debug", skip_all)] 64 | async fn verify( 65 | hashed_api_token: Data, 66 | spawner: Data, 67 | x_api_token: XApiToken, 68 | ) -> Result<(), Error> { 69 | let span = tracing::Span::current(); 70 | if spawner 71 | .spawn_blocking(move || span.in_scope(|| hashed_api_token.verify(x_api_token))) 72 | .await 73 | .map_err(Error::canceled)?? 74 | { 75 | return Ok(()); 76 | } 77 | 78 | Err(Error::invalid()) 79 | } 80 | 81 | pub(crate) fn db_ref(&self) -> &Db { 82 | &self.db 83 | } 84 | } 85 | 86 | #[derive(Debug, thiserror::Error)] 87 | #[error("Failed authentication")] 88 | pub(crate) struct Error { 89 | context: String, 90 | #[source] 91 | kind: ErrorKind, 92 | } 93 | 94 | impl Error { 95 | fn invalid() -> Self { 96 | Error { 97 | context: SpanTrace::capture().to_string(), 98 | kind: ErrorKind::Invalid, 99 | } 100 | } 101 | 102 | fn missing_config() -> Self { 103 | Error { 104 | context: SpanTrace::capture().to_string(), 105 | kind: ErrorKind::MissingConfig, 106 | } 107 | } 108 | 109 | fn missing_db() -> Self { 110 | Error { 111 | context: SpanTrace::capture().to_string(), 112 | kind: ErrorKind::MissingDb, 113 | } 114 | } 115 | 116 | fn missing_spawner() -> Self { 117 | Error { 118 | context: SpanTrace::capture().to_string(), 119 | kind: ErrorKind::MissingSpawner, 120 | } 121 | } 122 | 123 | fn bcrypt_verify(e: BcryptError) -> Self { 124 | Error { 125 | context: SpanTrace::capture().to_string(), 126 | kind: ErrorKind::BCryptVerify(e), 127 | } 128 | } 129 | 130 | fn bcrypt_hash(e: BcryptError) -> Self { 131 | Error { 132 | context: SpanTrace::capture().to_string(), 133 | kind: ErrorKind::BCryptHash(e), 134 | } 135 | } 136 | 137 | fn parse_header(e: ParseError) -> Self { 138 | Error { 139 | context: SpanTrace::capture().to_string(), 140 | kind: ErrorKind::ParseHeader(e), 141 | } 142 | } 143 | 144 | fn canceled(_: Canceled) -> Self { 145 | Error { 146 | context: SpanTrace::capture().to_string(), 147 | kind: ErrorKind::Canceled, 148 | } 149 | } 150 | } 151 | 152 | #[derive(Debug, thiserror::Error)] 153 | enum ErrorKind { 154 | #[error("Invalid API Token")] 155 | Invalid, 156 | 157 | #[error("Missing Config")] 158 | MissingConfig, 159 | 160 | #[error("Missing Db")] 161 | MissingDb, 162 | 163 | #[error("Missing Spawner")] 164 | MissingSpawner, 165 | 166 | #[error("Panic in verify")] 167 | Canceled, 168 | 169 | #[error("Verifying")] 170 | BCryptVerify(#[source] BcryptError), 171 | 172 | #[error("Hashing")] 173 | BCryptHash(#[source] BcryptError), 174 | 175 | #[error("Parse Header")] 176 | ParseHeader(#[source] ParseError), 177 | } 178 | 179 | impl ResponseError for Error { 180 | fn status_code(&self) -> StatusCode { 181 | match self.kind { 182 | ErrorKind::Invalid | ErrorKind::ParseHeader(_) => StatusCode::BAD_REQUEST, 183 | _ => StatusCode::INTERNAL_SERVER_ERROR, 184 | } 185 | } 186 | 187 | fn error_response(&self) -> HttpResponse { 188 | HttpResponse::build(self.status_code()) 189 | .json(serde_json::json!({ "msg": self.kind.to_string() })) 190 | } 191 | } 192 | 193 | impl FromRequest for Admin { 194 | type Error = Error; 195 | type Future = LocalBoxFuture<'static, Result>; 196 | 197 | fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { 198 | let now = Instant::now(); 199 | let res = Self::prepare_verify(req); 200 | Box::pin(async move { 201 | let (db, c, s, t) = res?; 202 | Self::verify(c, s, t).await?; 203 | metrics::histogram!("relay.admin.verify") 204 | .record(now.elapsed().as_micros() as f64 / 1_000_000_f64); 205 | Ok(Admin { db }) 206 | }) 207 | } 208 | } 209 | 210 | pub(crate) struct XApiToken(String); 211 | 212 | impl XApiToken { 213 | pub(crate) fn new(token: String) -> Self { 214 | Self(token) 215 | } 216 | } 217 | 218 | impl Header for XApiToken { 219 | fn name() -> HeaderName { 220 | HeaderName::from_static("x-api-token") 221 | } 222 | 223 | fn parse(msg: &M) -> Result { 224 | from_one_raw_str(msg.headers().get(Self::name())) 225 | } 226 | } 227 | 228 | impl TryIntoHeaderValue for XApiToken { 229 | type Error = InvalidHeaderValue; 230 | 231 | fn try_into_value(self) -> Result { 232 | HeaderValue::from_str(&self.0) 233 | } 234 | } 235 | 236 | impl FromStr for XApiToken { 237 | type Err = Infallible; 238 | 239 | fn from_str(s: &str) -> Result { 240 | Ok(XApiToken(s.to_string())) 241 | } 242 | } 243 | 244 | impl std::fmt::Display for XApiToken { 245 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 246 | self.0.fmt(f) 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /src/future.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin}; 2 | 3 | pub(crate) type LocalBoxFuture<'a, T> = Pin + 'a>>; 4 | pub(crate) type BoxFuture<'a, T> = Pin + Send + 'a>>; 5 | -------------------------------------------------------------------------------- /src/jobs.rs: -------------------------------------------------------------------------------- 1 | pub mod apub; 2 | mod contact; 3 | mod deliver; 4 | mod deliver_many; 5 | mod instance; 6 | mod nodeinfo; 7 | mod process_listeners; 8 | mod record_last_online; 9 | 10 | pub(crate) use self::{ 11 | contact::QueryContact, deliver::Deliver, deliver_many::DeliverMany, instance::QueryInstance, 12 | nodeinfo::QueryNodeinfo, 13 | }; 14 | 15 | use crate::{ 16 | config::Config, 17 | data::{ActorCache, MediaCache, State}, 18 | error::{Error, ErrorKind}, 19 | jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline}, 20 | }; 21 | use background_jobs::{ 22 | memory_storage::{Storage, TokioTimer}, 23 | metrics::MetricsStorage, 24 | tokio::{QueueHandle, WorkerConfig}, 25 | Job, 26 | }; 27 | use std::time::Duration; 28 | 29 | fn debug_object(activity: &serde_json::Value) -> &serde_json::Value { 30 | let mut object = &activity["object"]["type"]; 31 | 32 | if object.is_null() { 33 | object = &activity["object"]["id"]; 34 | } 35 | 36 | if object.is_null() { 37 | object = &activity["object"]; 38 | } 39 | 40 | object 41 | } 42 | 43 | pub(crate) fn create_workers( 44 | state: State, 45 | actors: ActorCache, 46 | media: MediaCache, 47 | config: Config, 48 | ) -> std::io::Result { 49 | let deliver_concurrency = config.deliver_concurrency(); 50 | 51 | let queue_handle = WorkerConfig::new( 52 | MetricsStorage::wrap(Storage::new(TokioTimer)), 53 | move |queue_handle| { 54 | JobState::new( 55 | state.clone(), 56 | actors.clone(), 57 | JobServer::new(queue_handle), 58 | media.clone(), 59 | config.clone(), 60 | ) 61 | }, 62 | ) 63 | .register::() 64 | .register::() 65 | .register::() 66 | .register::() 67 | .register::() 68 | .register::() 69 | .register::() 70 | .register::() 71 | .register::() 72 | .register::() 73 | .register::() 74 | .register::() 75 | .set_worker_count("maintenance", 2) 76 | .set_worker_count("apub", 2) 77 | .set_worker_count("deliver", deliver_concurrency) 78 | .start()?; 79 | 80 | queue_handle.every(Duration::from_secs(60 * 5), Listeners)?; 81 | queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline)?; 82 | 83 | Ok(JobServer::new(queue_handle)) 84 | } 85 | 86 | #[derive(Clone, Debug)] 87 | pub(crate) struct JobState { 88 | state: State, 89 | actors: ActorCache, 90 | config: Config, 91 | media: MediaCache, 92 | job_server: JobServer, 93 | } 94 | 95 | #[derive(Clone)] 96 | pub(crate) struct JobServer { 97 | remote: QueueHandle, 98 | } 99 | 100 | impl std::fmt::Debug for JobServer { 101 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 102 | f.debug_struct("JobServer") 103 | .field("queue_handle", &"QueueHandle") 104 | .finish() 105 | } 106 | } 107 | 108 | impl JobState { 109 | fn new( 110 | state: State, 111 | actors: ActorCache, 112 | job_server: JobServer, 113 | media: MediaCache, 114 | config: Config, 115 | ) -> Self { 116 | JobState { 117 | state, 118 | actors, 119 | config, 120 | media, 121 | job_server, 122 | } 123 | } 124 | } 125 | 126 | impl JobServer { 127 | fn new(remote_handle: QueueHandle) -> Self { 128 | JobServer { 129 | remote: remote_handle, 130 | } 131 | } 132 | 133 | pub(crate) async fn queue(&self, job: J) -> Result<(), Error> 134 | where 135 | J: Job, 136 | { 137 | self.remote 138 | .queue(job) 139 | .await 140 | .map_err(ErrorKind::Queue) 141 | .map_err(Into::into) 142 | } 143 | } 144 | 145 | struct Boolish { 146 | inner: bool, 147 | } 148 | 149 | impl std::ops::Deref for Boolish { 150 | type Target = bool; 151 | 152 | fn deref(&self) -> &Self::Target { 153 | &self.inner 154 | } 155 | } 156 | 157 | impl<'de> serde::Deserialize<'de> for Boolish { 158 | fn deserialize(deserializer: D) -> Result 159 | where 160 | D: serde::Deserializer<'de>, 161 | { 162 | #[derive(serde::Deserialize)] 163 | #[serde(untagged)] 164 | enum BoolThing { 165 | Bool(bool), 166 | String(String), 167 | } 168 | 169 | let thing: BoolThing = serde::Deserialize::deserialize(deserializer)?; 170 | 171 | match thing { 172 | BoolThing::Bool(inner) => Ok(Boolish { inner }), 173 | BoolThing::String(s) if s.to_lowercase() == "false" => Ok(Boolish { inner: false }), 174 | BoolThing::String(_) => Ok(Boolish { inner: true }), 175 | } 176 | } 177 | } 178 | 179 | #[cfg(test)] 180 | mod tests { 181 | use super::Boolish; 182 | 183 | #[test] 184 | fn boolish_works() { 185 | const CASES: &[(&str, bool)] = &[ 186 | ("false", false), 187 | ("\"false\"", false), 188 | ("\"FALSE\"", false), 189 | ("true", true), 190 | ("\"true\"", true), 191 | ("\"anything else\"", true), 192 | ]; 193 | 194 | for (case, output) in CASES { 195 | let b: Boolish = serde_json::from_str(case).unwrap(); 196 | assert_eq!(*b, *output); 197 | } 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /src/jobs/apub.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::{Config, UrlKind}, 3 | data::State, 4 | db::Actor, 5 | error::{Error, ErrorKind}, 6 | }; 7 | use activitystreams::{ 8 | activity::{Follow as AsFollow, Undo as AsUndo}, 9 | context, 10 | iri_string::types::IriString, 11 | prelude::*, 12 | security, 13 | }; 14 | use std::convert::TryInto; 15 | 16 | mod announce; 17 | mod follow; 18 | mod forward; 19 | mod reject; 20 | mod undo; 21 | 22 | pub(crate) use self::{ 23 | announce::Announce, follow::Follow, forward::Forward, reject::Reject, undo::Undo, 24 | }; 25 | 26 | async fn get_inboxes( 27 | state: &State, 28 | actor: &Actor, 29 | object_id: &IriString, 30 | ) -> Result, Error> { 31 | let authority = object_id 32 | .authority_str() 33 | .ok_or(ErrorKind::Domain)? 34 | .to_string(); 35 | 36 | state.inboxes_without(&actor.inbox, &authority).await 37 | } 38 | 39 | fn prepare_activity( 40 | mut t: T, 41 | id: impl TryInto, 42 | to: impl TryInto, 43 | ) -> Result 44 | where 45 | T: ObjectExt + BaseExt, 46 | Error: From + From, 47 | { 48 | t.set_id(id.try_into()?) 49 | .set_many_tos(vec![to.try_into()?]) 50 | .set_many_contexts(vec![context(), security()]); 51 | Ok(t) 52 | } 53 | 54 | // Generate a type that says "I want to stop following you" 55 | fn generate_undo_follow( 56 | config: &Config, 57 | actor_id: &IriString, 58 | my_id: &IriString, 59 | ) -> Result { 60 | let mut follow = AsFollow::new(my_id.clone(), actor_id.clone()); 61 | 62 | follow.set_id(config.generate_url(UrlKind::Activity)); 63 | 64 | let undo = AsUndo::new(my_id.clone(), follow.into_any_base()?); 65 | 66 | prepare_activity(undo, config.generate_url(UrlKind::Actor), actor_id.clone()) 67 | } 68 | -------------------------------------------------------------------------------- /src/jobs/apub/announce.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::{Config, UrlKind}, 3 | db::Actor, 4 | error::{Error, ErrorKind}, 5 | future::BoxFuture, 6 | jobs::{ 7 | apub::{get_inboxes, prepare_activity}, 8 | DeliverMany, JobState, 9 | }, 10 | }; 11 | use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString}; 12 | use background_jobs::Job; 13 | 14 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 15 | pub(crate) struct Announce { 16 | object_id: IriString, 17 | actor: Actor, 18 | } 19 | 20 | impl std::fmt::Debug for Announce { 21 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 22 | f.debug_struct("Announce") 23 | .field("object_id", &self.object_id.to_string()) 24 | .field("actor_id", &self.actor.id) 25 | .finish() 26 | } 27 | } 28 | 29 | impl Announce { 30 | pub fn new(object_id: IriString, actor: Actor) -> Self { 31 | Announce { object_id, actor } 32 | } 33 | 34 | #[tracing::instrument(name = "Announce", skip(state))] 35 | async fn perform(self, state: JobState) -> Result<(), Error> { 36 | let activity_id = state.config.generate_url(UrlKind::Activity); 37 | 38 | let authority = self.actor.id.authority_str().ok_or_else(|| { 39 | ErrorKind::MissingDomain 40 | })?; 41 | 42 | if let Ok(node_config) = state.state.node_config.read() { 43 | tracing::info!("Checking if {} is receive-only", authority); 44 | if let Some(cfg) = node_config.get(authority) { 45 | if cfg.receive_only { 46 | tracing::info!("{} is receive-only, skipping", authority); 47 | return Ok(()) 48 | } 49 | } 50 | } else { 51 | tracing::warn!("Failed to read node config, skipping receive-only check"); 52 | } 53 | 54 | let announce = generate_announce(&state.config, &activity_id, &self.object_id)?; 55 | let inboxes = get_inboxes(&state.state, &self.actor, &self.object_id).await?; 56 | state 57 | .job_server 58 | .queue(DeliverMany::new(inboxes, announce, authority.to_owned(), true)?) 59 | .await?; 60 | 61 | state.state.cache(self.object_id, activity_id); 62 | Ok(()) 63 | } 64 | } 65 | 66 | // Generate a type that says "Look at this object" 67 | fn generate_announce( 68 | config: &Config, 69 | activity_id: &IriString, 70 | object_id: &IriString, 71 | ) -> Result { 72 | let announce = AsAnnounce::new(config.generate_url(UrlKind::Actor), object_id.clone()); 73 | 74 | prepare_activity( 75 | announce, 76 | activity_id.clone(), 77 | config.generate_url(UrlKind::Followers), 78 | ) 79 | } 80 | 81 | impl Job for Announce { 82 | type State = JobState; 83 | type Future = BoxFuture<'static, anyhow::Result<()>>; 84 | 85 | const NAME: &'static str = "relay::jobs::apub::Announce"; 86 | const QUEUE: &'static str = "apub"; 87 | 88 | fn run(self, state: Self::State) -> Self::Future { 89 | Box::pin(async move { self.perform(state).await.map_err(Into::into) }) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/jobs/apub/follow.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | apub::AcceptedActivities, 3 | config::{Config, UrlKind}, 4 | db::Actor, 5 | error::{Error, ErrorKind}, 6 | future::BoxFuture, 7 | jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo}, 8 | }; 9 | use activitystreams::{ 10 | activity::{Accept as AsAccept, Follow as AsFollow}, 11 | iri_string::types::IriString, 12 | prelude::*, 13 | }; 14 | use background_jobs::Job; 15 | 16 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 17 | pub(crate) struct Follow { 18 | input: AcceptedActivities, 19 | actor: Actor, 20 | } 21 | 22 | impl std::fmt::Debug for Follow { 23 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 24 | f.debug_struct("Follow") 25 | .field("input", &self.input.id_unchecked()) 26 | .field("actor", &self.actor.id) 27 | .finish() 28 | } 29 | } 30 | 31 | impl Follow { 32 | pub fn new(input: AcceptedActivities, actor: Actor) -> Self { 33 | Follow { input, actor } 34 | } 35 | 36 | #[tracing::instrument(name = "Follow", skip(state))] 37 | async fn perform(self, state: JobState) -> Result<(), Error> { 38 | let my_id = state.config.generate_url(UrlKind::Actor); 39 | 40 | // if following relay directly, not just following 'public', followback 41 | if self.input.object_is(&my_id) 42 | && !state.state.db.is_connected(self.actor.id.clone()).await? 43 | { 44 | let follow = generate_follow(&state.config, &self.actor.id, &my_id)?; 45 | state 46 | .job_server 47 | .queue(Deliver::new(self.actor.inbox.clone(), follow)?) 48 | .await?; 49 | } 50 | 51 | state.actors.add_connection(self.actor.clone()).await?; 52 | 53 | let accept = generate_accept_follow( 54 | &state.config, 55 | &self.actor.id, 56 | self.input.id_unchecked().ok_or(ErrorKind::MissingId)?, 57 | &my_id, 58 | )?; 59 | 60 | state 61 | .job_server 62 | .queue(Deliver::new(self.actor.inbox, accept)?) 63 | .await?; 64 | 65 | state 66 | .job_server 67 | .queue(QueryInstance::new(self.actor.id.clone())) 68 | .await?; 69 | 70 | state 71 | .job_server 72 | .queue(QueryNodeinfo::new(self.actor.id)) 73 | .await?; 74 | 75 | Ok(()) 76 | } 77 | } 78 | 79 | // Generate a type that says "I want to follow you" 80 | fn generate_follow( 81 | config: &Config, 82 | actor_id: &IriString, 83 | my_id: &IriString, 84 | ) -> Result { 85 | let follow = AsFollow::new(my_id.clone(), actor_id.clone()); 86 | 87 | prepare_activity( 88 | follow, 89 | config.generate_url(UrlKind::Activity), 90 | actor_id.clone(), 91 | ) 92 | } 93 | 94 | // Generate a type that says "I accept your follow request" 95 | fn generate_accept_follow( 96 | config: &Config, 97 | actor_id: &IriString, 98 | input_id: &IriString, 99 | my_id: &IriString, 100 | ) -> Result { 101 | let mut follow = AsFollow::new(actor_id.clone(), my_id.clone()); 102 | 103 | follow.set_id(input_id.clone()); 104 | 105 | let accept = AsAccept::new(my_id.clone(), follow.into_any_base()?); 106 | 107 | prepare_activity( 108 | accept, 109 | config.generate_url(UrlKind::Activity), 110 | actor_id.clone(), 111 | ) 112 | } 113 | 114 | impl Job for Follow { 115 | type State = JobState; 116 | type Future = BoxFuture<'static, anyhow::Result<()>>; 117 | 118 | const NAME: &'static str = "relay::jobs::apub::Follow"; 119 | const QUEUE: &'static str = "apub"; 120 | 121 | fn run(self, state: Self::State) -> Self::Future { 122 | Box::pin(async move { self.perform(state).await.map_err(Into::into) }) 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/jobs/apub/forward.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | apub::AcceptedActivities, 3 | db::Actor, 4 | error::{Error, ErrorKind}, 5 | future::BoxFuture, 6 | jobs::{apub::get_inboxes, DeliverMany, JobState}, 7 | }; 8 | use activitystreams::prelude::*; 9 | use background_jobs::Job; 10 | 11 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 12 | pub(crate) struct Forward { 13 | input: AcceptedActivities, 14 | actor: Actor, 15 | } 16 | 17 | impl std::fmt::Debug for Forward { 18 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 19 | f.debug_struct("Forward") 20 | .field("input", &self.input.id_unchecked()) 21 | .field("actor", &self.actor.id) 22 | .finish() 23 | } 24 | } 25 | 26 | impl Forward { 27 | pub fn new(input: AcceptedActivities, actor: Actor) -> Self { 28 | Forward { input, actor } 29 | } 30 | 31 | #[tracing::instrument(name = "Forward", skip(state))] 32 | async fn perform(self, state: JobState) -> Result<(), Error> { 33 | let object_id = self 34 | .input 35 | .object_unchecked() 36 | .as_single_id() 37 | .ok_or(ErrorKind::MissingId)?; 38 | 39 | let authority = self.actor.id.authority_str().ok_or_else(|| { 40 | ErrorKind::MissingDomain 41 | })?; 42 | 43 | let inboxes = get_inboxes(&state.state, &self.actor, object_id).await?; 44 | 45 | state 46 | .job_server 47 | .queue(DeliverMany::new(inboxes, self.input, authority.to_owned(), false)?) 48 | .await?; 49 | 50 | Ok(()) 51 | } 52 | } 53 | 54 | impl Job for Forward { 55 | type State = JobState; 56 | type Future = BoxFuture<'static, anyhow::Result<()>>; 57 | 58 | const NAME: &'static str = "relay::jobs::apub::Forward"; 59 | const QUEUE: &'static str = "apub"; 60 | 61 | fn run(self, state: Self::State) -> Self::Future { 62 | Box::pin(async move { self.perform(state).await.map_err(Into::into) }) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/jobs/apub/reject.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::UrlKind, 3 | db::Actor, 4 | error::Error, 5 | future::BoxFuture, 6 | jobs::{apub::generate_undo_follow, Deliver, JobState}, 7 | }; 8 | use background_jobs::Job; 9 | 10 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 11 | pub(crate) struct Reject(pub(crate) Actor); 12 | 13 | impl std::fmt::Debug for Reject { 14 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 15 | f.debug_struct("Reject").field("actor", &self.0.id).finish() 16 | } 17 | } 18 | 19 | impl Reject { 20 | #[tracing::instrument(name = "Reject", skip(state))] 21 | async fn perform(self, state: JobState) -> Result<(), Error> { 22 | state.actors.remove_connection(&self.0).await?; 23 | 24 | let my_id = state.config.generate_url(UrlKind::Actor); 25 | let undo = generate_undo_follow(&state.config, &self.0.id, &my_id)?; 26 | 27 | state 28 | .job_server 29 | .queue(Deliver::new(self.0.inbox, undo)?) 30 | .await?; 31 | 32 | Ok(()) 33 | } 34 | } 35 | 36 | impl Job for Reject { 37 | type State = JobState; 38 | type Future = BoxFuture<'static, anyhow::Result<()>>; 39 | 40 | const NAME: &'static str = "relay::jobs::apub::Reject"; 41 | const QUEUE: &'static str = "apub"; 42 | 43 | fn run(self, state: Self::State) -> Self::Future { 44 | Box::pin(async move { self.perform(state).await.map_err(Into::into) }) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/jobs/apub/undo.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | apub::AcceptedActivities, 3 | config::UrlKind, 4 | db::Actor, 5 | error::Error, 6 | future::BoxFuture, 7 | jobs::{apub::generate_undo_follow, Deliver, JobState}, 8 | }; 9 | use activitystreams::prelude::BaseExt; 10 | use background_jobs::Job; 11 | 12 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 13 | pub(crate) struct Undo { 14 | input: AcceptedActivities, 15 | actor: Actor, 16 | } 17 | 18 | impl std::fmt::Debug for Undo { 19 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 20 | f.debug_struct("Undo") 21 | .field("input", &self.input.id_unchecked()) 22 | .field("actor", &self.actor.id) 23 | .finish() 24 | } 25 | } 26 | 27 | impl Undo { 28 | pub(crate) fn new(input: AcceptedActivities, actor: Actor) -> Self { 29 | Undo { input, actor } 30 | } 31 | 32 | #[tracing::instrument(name = "Undo", skip(state))] 33 | async fn perform(self, state: JobState) -> Result<(), Error> { 34 | let was_following = state.state.db.is_connected(self.actor.id.clone()).await?; 35 | 36 | state.actors.remove_connection(&self.actor).await?; 37 | 38 | if was_following { 39 | let my_id = state.config.generate_url(UrlKind::Actor); 40 | let undo = generate_undo_follow(&state.config, &self.actor.id, &my_id)?; 41 | state 42 | .job_server 43 | .queue(Deliver::new(self.actor.inbox, undo)?) 44 | .await?; 45 | } 46 | 47 | Ok(()) 48 | } 49 | } 50 | 51 | impl Job for Undo { 52 | type State = JobState; 53 | type Future = BoxFuture<'static, anyhow::Result<()>>; 54 | 55 | const NAME: &'static str = "relay::jobs::apub::Undo"; 56 | const QUEUE: &'static str = "apub"; 57 | 58 | fn run(self, state: Self::State) -> Self::Future { 59 | Box::pin(async move { self.perform(state).await.map_err(Into::into) }) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/jobs/contact.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | apub::AcceptedActors, 3 | error::{Error, ErrorKind}, 4 | future::BoxFuture, 5 | jobs::JobState, 6 | requests::BreakerStrategy, 7 | }; 8 | use activitystreams::{iri_string::types::IriString, object::Image, prelude::*}; 9 | use background_jobs::Job; 10 | 11 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 12 | pub(crate) struct QueryContact { 13 | actor_id: IriString, 14 | contact_id: IriString, 15 | } 16 | 17 | impl std::fmt::Debug for QueryContact { 18 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 19 | f.debug_struct("QueryContact") 20 | .field("actor_id", &self.actor_id.to_string()) 21 | .field("contact_id", &self.contact_id.to_string()) 22 | .finish() 23 | } 24 | } 25 | 26 | impl QueryContact { 27 | pub(crate) fn new(actor_id: IriString, contact_id: IriString) -> Self { 28 | QueryContact { 29 | actor_id, 30 | contact_id, 31 | } 32 | } 33 | 34 | async fn perform(self, state: JobState) -> Result<(), Error> { 35 | let contact_outdated = state 36 | .state 37 | .node_cache 38 | .is_contact_outdated(self.actor_id.clone()) 39 | .await; 40 | 41 | if !contact_outdated { 42 | return Ok(()); 43 | } 44 | 45 | let contact = match state 46 | .state 47 | .requests 48 | .fetch::(&self.contact_id, BreakerStrategy::Allow404AndBelow) 49 | .await 50 | { 51 | Ok(contact) => contact, 52 | Err(e) if e.is_breaker() => { 53 | tracing::debug!("Not retrying due to failed breaker"); 54 | return Ok(()); 55 | } 56 | Err(e) => return Err(e), 57 | }; 58 | 59 | let (username, display_name, url, avatar) = 60 | to_contact(contact).ok_or(ErrorKind::Extract("contact"))?; 61 | 62 | state 63 | .state 64 | .node_cache 65 | .set_contact(self.actor_id, username, display_name, url, avatar) 66 | .await?; 67 | 68 | Ok(()) 69 | } 70 | } 71 | 72 | fn to_contact(contact: AcceptedActors) -> Option<(String, String, IriString, IriString)> { 73 | let username = contact.preferred_username()?.to_owned(); 74 | let display_name = contact.name()?.as_one()?.as_xsd_string()?.to_owned(); 75 | 76 | let url = contact.url()?.as_single_id()?.to_owned(); 77 | let any_base = contact.icon()?.as_one()?; 78 | 79 | let avatar = Image::from_any_base(any_base.clone()) 80 | .ok()?? 81 | .url()? 82 | .as_single_id()? 83 | .to_owned(); 84 | 85 | Some((username, display_name, url, avatar)) 86 | } 87 | 88 | impl Job for QueryContact { 89 | type State = JobState; 90 | type Future = BoxFuture<'static, anyhow::Result<()>>; 91 | 92 | const NAME: &'static str = "relay::jobs::QueryContact"; 93 | const QUEUE: &'static str = "maintenance"; 94 | 95 | fn run(self, state: Self::State) -> Self::Future { 96 | Box::pin(async move { self.perform(state).await.map_err(Into::into) }) 97 | } 98 | } 99 | 100 | #[cfg(test)] 101 | mod tests { 102 | use super::to_contact; 103 | 104 | const HYNET_ADMIN: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://soc.hyena.network/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[{"name":"Website","type":"PropertyValue","value":"https://hyena.network/"},{"name":"Services","type":"PropertyValue","value":"Pleroma, Invidious, SearX, XMPP"},{"name":"CW","type":"PropertyValue","value":"all long posts"}],"capabilities":{"acceptsChatMessages":true},"discoverable":true,"endpoints":{"oauthAuthorizationEndpoint":"https://soc.hyena.network/oauth/authorize","oauthRegistrationEndpoint":"https://soc.hyena.network/api/v1/apps","oauthTokenEndpoint":"https://soc.hyena.network/oauth/token","sharedInbox":"https://soc.hyena.network/inbox","uploadMedia":"https://soc.hyena.network/api/ap/upload_media"},"followers":"https://soc.hyena.network/users/HyNET/followers","following":"https://soc.hyena.network/users/HyNET/following","icon":{"type":"Image","url":"https://soc.hyena.network/media/ab149b1e0196ffdbecc6830c7f6f1a14dd8d8408ec7db0f1e8ad9d40e600ea73.gif"},"id":"https://soc.hyena.network/users/HyNET","image":{"type":"Image","url":"https://soc.hyena.network/media/12ba78d3015e13aa65ac4e106e574dd7bf959614585f10ce85de40e0148da677.png"},"inbox":"https://soc.hyena.network/users/HyNET/inbox","manuallyApprovesFollowers":false,"name":"HyNET Announcement System :glider:","outbox":"https://soc.hyena.network/users/HyNET/outbox","preferredUsername":"HyNET","publicKey":{"id":"https://soc.hyena.network/users/HyNET#main-key","owner":"https://soc.hyena.network/users/HyNET","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyF74womumWRhR7RW4Q6a\n2+Av/Ue8QHiKwjQARJEakbKnKgkI5FRFVVOfMiYVJp/juNt4GLgK15panBqJa9Yt\nWACiHQjBd2yVI5tIHiae0uBj5SdUVuduoycVLG0lpJsg12p8m/vL1oaeLqehTqa6\nsYplQh1GCLet0cUdn/66Cj2pAPD3V7Bz3VnG+oyXIsGQbBB8RHnWhFH8b0qQOyur\nJRAB8aye6QAL2sQbfISM2lycWzNeIHkqsUb7FdqdhQ+Ze0rETRGDkOO2Qvpg0hQm\n6owMsHnHA/DzyOHLy6Yf+I3OUlBC/P1SSAKwORsifFDXL322AEqoDi5ZpwzG9m5z\nAQIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"Ran by @mel :adm1::adm2:
For direct help with the service, send @mel a message.","tag":[{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm1.png"},"id":"https://soc.hyena.network/emoji/Signs/adm1.png","name":":adm1:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm2.png"},"id":"https://soc.hyena.network/emoji/Signs/adm2.png","name":":adm2:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/misc/glider.png"},"id":"https://soc.hyena.network/emoji/misc/glider.png","name":":glider:","type":"Emoji","updated":"1970-01-01T00:00:00Z"}],"type":"Service","url":"https://soc.hyena.network/users/HyNET"}"#; 105 | 106 | #[test] 107 | fn parse_hynet() { 108 | let actor = serde_json::from_str(HYNET_ADMIN).unwrap(); 109 | to_contact(actor).unwrap(); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/jobs/deliver.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | error::Error, 3 | future::BoxFuture, 4 | jobs::{debug_object, JobState}, 5 | requests::BreakerStrategy, 6 | }; 7 | use activitystreams::iri_string::types::IriString; 8 | use background_jobs::{Backoff, Job}; 9 | 10 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 11 | pub(crate) struct Deliver { 12 | to: IriString, 13 | data: serde_json::Value, 14 | } 15 | 16 | impl std::fmt::Debug for Deliver { 17 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 18 | f.debug_struct("Deliver") 19 | .field("to", &self.to.to_string()) 20 | .field("activity", &self.data["type"]) 21 | .field("object", debug_object(&self.data)) 22 | .finish() 23 | } 24 | } 25 | 26 | impl Deliver { 27 | pub(crate) fn new(to: IriString, data: T) -> Result 28 | where 29 | T: serde::ser::Serialize, 30 | { 31 | Ok(Deliver { 32 | to, 33 | data: serde_json::to_value(data)?, 34 | }) 35 | } 36 | 37 | #[tracing::instrument(name = "Deliver", skip(state))] 38 | async fn permform(self, state: JobState) -> Result<(), Error> { 39 | if let Err(e) = state 40 | .state 41 | .requests 42 | .deliver(&self.to, &self.data, BreakerStrategy::Allow401AndBelow) 43 | .await 44 | { 45 | if e.is_breaker() { 46 | tracing::debug!("Not trying due to failed breaker"); 47 | return Ok(()); 48 | } 49 | if e.is_bad_request() { 50 | tracing::debug!("Server didn't understand the activity"); 51 | return Ok(()); 52 | } 53 | return Err(e); 54 | } 55 | Ok(()) 56 | } 57 | } 58 | 59 | impl Job for Deliver { 60 | type State = JobState; 61 | type Future = BoxFuture<'static, anyhow::Result<()>>; 62 | 63 | const NAME: &'static str = "relay::jobs::Deliver"; 64 | const QUEUE: &'static str = "deliver"; 65 | const BACKOFF: Backoff = Backoff::Exponential(8); 66 | 67 | //TEMPORAL PATCH 68 | const MAX_RETRIES: background_jobs::MaxRetries = background_jobs::MaxRetries::Count(3); 69 | 70 | fn run(self, state: Self::State) -> Self::Future { 71 | Box::pin(async move { self.permform(state).await.map_err(Into::into) }) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/jobs/deliver_many.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | error::Error, 3 | future::BoxFuture, 4 | jobs::{debug_object, Deliver, JobState}, 5 | }; 6 | use activitystreams::iri_string::types::IriString; 7 | use background_jobs::Job; 8 | use rand::Rng; 9 | 10 | use crate::data::NodeConfig; 11 | 12 | #[derive(Clone, serde::Deserialize, serde::Serialize)] 13 | pub(crate) struct DeliverMany { 14 | to: Vec, 15 | filterable: bool, 16 | data: serde_json::Value, 17 | actor_authority: String, 18 | } 19 | 20 | impl std::fmt::Debug for DeliverMany { 21 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 22 | f.debug_struct("DeliverMany") 23 | .field("activity", &self.data["type"]) 24 | .field("object", debug_object(&self.data)) 25 | .finish() 26 | } 27 | } 28 | 29 | impl DeliverMany { 30 | pub(crate) fn new(to: Vec, data: T, actor_authority: String, filterable: bool) -> Result 31 | where 32 | T: serde::ser::Serialize, 33 | { 34 | Ok(DeliverMany { 35 | to, 36 | filterable, 37 | data: serde_json::to_value(data)?, 38 | actor_authority, 39 | }) 40 | } 41 | 42 | fn apply_filter(dice: u8, authority: &str, config: &NodeConfig) -> bool { 43 | if config.enable_probability && config.probability < dice { 44 | return false; 45 | } 46 | 47 | let has_authority = config.authority_set.contains(authority); 48 | 49 | if config.is_allowlist { 50 | has_authority 51 | } else { 52 | !has_authority 53 | } 54 | } 55 | 56 | #[tracing::instrument(name = "Deliver many", skip(state))] 57 | async fn perform(self, state: JobState) -> Result<(), Error> { 58 | let dice = rand::thread_rng().gen::(); 59 | 60 | for inbox in self.to { 61 | if self.filterable { 62 | // All inbox should have... authority... but... 63 | let inbox_authority = inbox.authority_str().unwrap_or(""); 64 | 65 | let node_config = match state.state.node_config.read() { 66 | Ok(node_config) => node_config, 67 | Err(e) => { 68 | tracing::error!("Failed to acquire read lock for node config: {}", e); 69 | continue; 70 | } 71 | }; 72 | 73 | if let Some(cfg) = node_config.get(inbox_authority) { 74 | if !Self::apply_filter(dice, &self.actor_authority, cfg) { 75 | tracing::info!("Skipping egress to {} due to given criteria", inbox_authority); 76 | continue; 77 | } 78 | } 79 | } 80 | 81 | state 82 | .job_server 83 | .queue(Deliver::new(inbox, self.data.clone())?) 84 | .await?; 85 | } 86 | 87 | Ok(()) 88 | } 89 | } 90 | 91 | impl Job for DeliverMany { 92 | type State = JobState; 93 | type Future = BoxFuture<'static, anyhow::Result<()>>; 94 | 95 | const NAME: &'static str = "relay::jobs::DeliverMany"; 96 | const QUEUE: &'static str = "deliver"; 97 | 98 | fn run(self, state: Self::State) -> Self::Future { 99 | Box::pin(async move { self.perform(state).await.map_err(Into::into) }) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/jobs/process_listeners.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | error::Error, 3 | future::BoxFuture, 4 | jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState}, 5 | }; 6 | use background_jobs::Job; 7 | 8 | #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] 9 | pub(crate) struct Listeners; 10 | 11 | impl Listeners { 12 | #[tracing::instrument(name = "Spawn query instances", skip(state))] 13 | async fn perform(self, state: JobState) -> Result<(), Error> { 14 | for actor_id in state.state.db.connected_ids().await? { 15 | state 16 | .job_server 17 | .queue(QueryInstance::new(actor_id.clone())) 18 | .await?; 19 | state.job_server.queue(QueryNodeinfo::new(actor_id)).await?; 20 | } 21 | 22 | Ok(()) 23 | } 24 | } 25 | 26 | impl Job for Listeners { 27 | type State = JobState; 28 | type Future = BoxFuture<'static, anyhow::Result<()>>; 29 | 30 | const NAME: &'static str = "relay::jobs::Listeners"; 31 | const QUEUE: &'static str = "maintenance"; 32 | 33 | fn run(self, state: Self::State) -> Self::Future { 34 | Box::pin(async move { self.perform(state).await.map_err(Into::into) }) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/jobs/record_last_online.rs: -------------------------------------------------------------------------------- 1 | use crate::{error::Error, future::BoxFuture, jobs::JobState}; 2 | use background_jobs::{Backoff, Job}; 3 | 4 | #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] 5 | pub(crate) struct RecordLastOnline; 6 | 7 | impl RecordLastOnline { 8 | #[tracing::instrument(skip(state))] 9 | async fn perform(self, state: JobState) -> Result<(), Error> { 10 | let nodes = state.state.last_online.take(); 11 | 12 | state.state.db.mark_last_seen(nodes).await 13 | } 14 | } 15 | 16 | impl Job for RecordLastOnline { 17 | type State = JobState; 18 | type Future = BoxFuture<'static, anyhow::Result<()>>; 19 | 20 | const NAME: &'static str = "relay::jobs::RecordLastOnline"; 21 | const QUEUE: &'static str = "maintenance"; 22 | const BACKOFF: Backoff = Backoff::Linear(1); 23 | 24 | fn run(self, state: Self::State) -> Self::Future { 25 | Box::pin(async move { self.perform(state).await.map_err(Into::into) }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/middleware.rs: -------------------------------------------------------------------------------- 1 | mod payload; 2 | mod timings; 3 | mod verifier; 4 | mod webfinger; 5 | 6 | pub(crate) use payload::DebugPayload; 7 | pub(crate) use timings::Timings; 8 | pub(crate) use verifier::MyVerify; 9 | pub(crate) use webfinger::RelayResolver; 10 | -------------------------------------------------------------------------------- /src/middleware/payload.rs: -------------------------------------------------------------------------------- 1 | use actix_web::{ 2 | dev::{Payload, Service, ServiceRequest, Transform}, 3 | http::Method, 4 | web::BytesMut, 5 | HttpMessage, 6 | }; 7 | use std::{ 8 | future::{ready, Ready}, 9 | task::{Context, Poll}, 10 | }; 11 | use streem::IntoStreamer; 12 | 13 | #[derive(Clone, Debug)] 14 | pub(crate) struct DebugPayload(pub bool); 15 | 16 | #[doc(hidden)] 17 | #[derive(Clone, Debug)] 18 | pub(crate) struct DebugPayloadMiddleware(bool, S); 19 | 20 | impl Transform for DebugPayload 21 | where 22 | S: Service, 23 | S::Future: 'static, 24 | S::Error: 'static, 25 | { 26 | type Response = S::Response; 27 | type Error = S::Error; 28 | type InitError = (); 29 | type Transform = DebugPayloadMiddleware; 30 | type Future = Ready>; 31 | 32 | fn new_transform(&self, service: S) -> Self::Future { 33 | ready(Ok(DebugPayloadMiddleware(self.0, service))) 34 | } 35 | } 36 | 37 | impl Service for DebugPayloadMiddleware 38 | where 39 | S: Service, 40 | S::Future: 'static, 41 | S::Error: 'static, 42 | { 43 | type Response = S::Response; 44 | type Error = S::Error; 45 | type Future = S::Future; 46 | 47 | fn poll_ready(&self, cx: &mut Context<'_>) -> Poll> { 48 | self.1.poll_ready(cx) 49 | } 50 | 51 | fn call(&self, mut req: ServiceRequest) -> Self::Future { 52 | if self.0 && req.method() == Method::POST { 53 | let mut pl = req.take_payload().into_streamer(); 54 | 55 | req.set_payload(Payload::Stream { 56 | payload: Box::pin(streem::try_from_fn(|yielder| async move { 57 | let mut buf = BytesMut::new(); 58 | 59 | while let Some(bytes) = pl.try_next().await? { 60 | buf.extend(bytes); 61 | } 62 | 63 | let bytes = buf.freeze(); 64 | tracing::info!("{}", String::from_utf8_lossy(&bytes)); 65 | 66 | yielder.yield_ok(bytes).await; 67 | 68 | Ok(()) 69 | })), 70 | }); 71 | 72 | self.1.call(req) 73 | } else { 74 | self.1.call(req) 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/middleware/timings.rs: -------------------------------------------------------------------------------- 1 | use actix_web::{ 2 | body::MessageBody, 3 | dev::{Service, ServiceRequest, ServiceResponse, Transform}, 4 | http::StatusCode, 5 | }; 6 | use std::{ 7 | future::{ready, Future, Ready}, 8 | time::Instant, 9 | }; 10 | 11 | pub(crate) struct Timings; 12 | pub(crate) struct TimingsMiddleware(S); 13 | 14 | struct LogOnDrop { 15 | begin: Instant, 16 | path: String, 17 | method: String, 18 | arm: bool, 19 | } 20 | 21 | pin_project_lite::pin_project! { 22 | pub(crate) struct TimingsFuture { 23 | #[pin] 24 | future: F, 25 | 26 | log_on_drop: Option, 27 | } 28 | } 29 | 30 | pin_project_lite::pin_project! { 31 | pub(crate) struct TimingsBody { 32 | #[pin] 33 | body: B, 34 | 35 | log_on_drop: LogOnDrop, 36 | } 37 | } 38 | 39 | impl Drop for LogOnDrop { 40 | fn drop(&mut self) { 41 | if self.arm { 42 | let duration = self.begin.elapsed(); 43 | metrics::histogram!("relay.request.complete", "path" => self.path.clone(), "method" => self.method.clone()).record(duration); 44 | } 45 | } 46 | } 47 | 48 | impl Transform for Timings 49 | where 50 | S: Service, Error = actix_web::Error>, 51 | S::Future: 'static, 52 | { 53 | type Response = ServiceResponse>; 54 | type Error = S::Error; 55 | type InitError = (); 56 | type Transform = TimingsMiddleware; 57 | type Future = Ready>; 58 | 59 | fn new_transform(&self, service: S) -> Self::Future { 60 | ready(Ok(TimingsMiddleware(service))) 61 | } 62 | } 63 | 64 | impl Service for TimingsMiddleware 65 | where 66 | S: Service, Error = actix_web::Error>, 67 | S::Future: 'static, 68 | { 69 | type Response = ServiceResponse>; 70 | type Error = S::Error; 71 | type Future = TimingsFuture; 72 | 73 | fn poll_ready( 74 | &self, 75 | ctx: &mut core::task::Context<'_>, 76 | ) -> std::task::Poll> { 77 | self.0.poll_ready(ctx) 78 | } 79 | 80 | fn call(&self, req: ServiceRequest) -> Self::Future { 81 | let log_on_drop = LogOnDrop { 82 | begin: Instant::now(), 83 | path: req.path().to_string(), 84 | method: req.method().to_string(), 85 | arm: false, 86 | }; 87 | 88 | let future = self.0.call(req); 89 | 90 | TimingsFuture { 91 | future, 92 | log_on_drop: Some(log_on_drop), 93 | } 94 | } 95 | } 96 | 97 | impl Future for TimingsFuture 98 | where 99 | F: Future, actix_web::Error>>, 100 | { 101 | type Output = Result>, actix_web::Error>; 102 | 103 | fn poll( 104 | self: std::pin::Pin<&mut Self>, 105 | cx: &mut std::task::Context<'_>, 106 | ) -> std::task::Poll { 107 | let this = self.project(); 108 | 109 | let res = std::task::ready!(this.future.poll(cx)); 110 | 111 | let mut log_on_drop = this 112 | .log_on_drop 113 | .take() 114 | .expect("TimingsFuture polled after completion"); 115 | 116 | let status = match &res { 117 | Ok(res) => res.status(), 118 | Err(e) => e.as_response_error().status_code(), 119 | }; 120 | 121 | log_on_drop.arm = 122 | status != StatusCode::NOT_FOUND && status != StatusCode::METHOD_NOT_ALLOWED; 123 | 124 | let res = res.map(|r| r.map_body(|_, body| TimingsBody { body, log_on_drop })); 125 | 126 | std::task::Poll::Ready(res) 127 | } 128 | } 129 | 130 | impl MessageBody for TimingsBody { 131 | type Error = B::Error; 132 | 133 | fn size(&self) -> actix_web::body::BodySize { 134 | self.body.size() 135 | } 136 | 137 | fn poll_next( 138 | self: std::pin::Pin<&mut Self>, 139 | cx: &mut std::task::Context<'_>, 140 | ) -> std::task::Poll>> { 141 | self.project().body.poll_next(cx) 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /src/middleware/verifier.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | apub::AcceptedActors, 3 | data::{ActorCache, State}, 4 | error::{Error, ErrorKind}, 5 | requests::{BreakerStrategy, Requests}, 6 | spawner::Spawner, 7 | }; 8 | use activitystreams::{base::BaseExt, iri, iri_string::types::IriString}; 9 | use base64::{engine::general_purpose::STANDARD, Engine}; 10 | use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm, Spawn}; 11 | use rsa::{pkcs1::EncodeRsaPublicKey, pkcs8::DecodePublicKey, RsaPublicKey}; 12 | use std::{future::Future, pin::Pin}; 13 | 14 | #[derive(Clone, Debug)] 15 | pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State, pub Spawner); 16 | 17 | impl MyVerify { 18 | #[tracing::instrument("Verify request", skip(self, signature, signing_string))] 19 | async fn verify( 20 | &self, 21 | algorithm: Option, 22 | key_id: String, 23 | signature: String, 24 | signing_string: String, 25 | ) -> Result { 26 | let public_key_id = iri!(key_id); 27 | 28 | // receiving an activity from a domain indicates it is probably online 29 | self.0.reset_breaker(&public_key_id); 30 | 31 | let actor_id = if let Some(mut actor_id) = self 32 | .2 33 | .db 34 | .actor_id_from_public_key_id(public_key_id.clone()) 35 | .await? 36 | { 37 | if !self.2.db.is_allowed(actor_id.clone()).await? { 38 | return Err(ErrorKind::NotAllowed(key_id).into()); 39 | } 40 | 41 | actor_id.set_fragment(None); 42 | let actor = self.1.get(&actor_id, &self.0).await?; 43 | let was_cached = actor.is_cached(); 44 | let actor = actor.into_inner(); 45 | 46 | match algorithm { 47 | Some(Algorithm::Hs2019) => (), 48 | Some(Algorithm::Deprecated(DeprecatedAlgorithm::RsaSha256)) => (), 49 | Some(other) => { 50 | return Err(ErrorKind::Algorithm(other.to_string()).into()); 51 | } 52 | None => (), 53 | }; 54 | 55 | let res = do_verify( 56 | &self.3, 57 | &actor.public_key, 58 | signature.clone(), 59 | signing_string.clone(), 60 | ) 61 | .await; 62 | 63 | if let Err(e) = res { 64 | if !was_cached { 65 | return Err(e); 66 | } 67 | } else { 68 | return Ok(true); 69 | } 70 | 71 | actor_id 72 | } else { 73 | match self 74 | .0 75 | .fetch::(&public_key_id, BreakerStrategy::Require2XX) 76 | .await 77 | { 78 | Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId), 79 | Err(e) => { 80 | if e.is_gone() { 81 | tracing::warn!("Actor gone: {public_key_id}"); 82 | return Ok(false); 83 | } else { 84 | return Err(e); 85 | } 86 | } 87 | }? 88 | }; 89 | 90 | // Previously we verified the sig from an actor's local cache 91 | // 92 | // Now we make sure we fetch an updated actor 93 | let actor = self.1.get_no_cache(&actor_id, &self.0).await?; 94 | 95 | do_verify(&self.3, &actor.public_key, signature, signing_string).await?; 96 | 97 | Ok(true) 98 | } 99 | } 100 | 101 | #[derive(serde::Deserialize)] 102 | #[serde(untagged)] 103 | #[serde(rename_all = "camelCase")] 104 | enum PublicKeyResponse { 105 | PublicKey { 106 | #[allow(dead_code)] 107 | id: IriString, 108 | owner: IriString, 109 | #[allow(dead_code)] 110 | public_key_pem: String, 111 | }, 112 | Actor(Box), 113 | } 114 | 115 | impl PublicKeyResponse { 116 | fn actor_id(&self) -> Option { 117 | match self { 118 | PublicKeyResponse::PublicKey { owner, .. } => Some(owner.clone()), 119 | PublicKeyResponse::Actor(actor) => actor.id_unchecked().cloned(), 120 | } 121 | } 122 | } 123 | 124 | #[tracing::instrument("Verify signature")] 125 | async fn do_verify( 126 | spawner: &Spawner, 127 | public_key: &str, 128 | signature: String, 129 | signing_string: String, 130 | ) -> Result<(), Error> { 131 | let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?; 132 | let public_key_der = public_key 133 | .to_pkcs1_der() 134 | .map_err(|_| ErrorKind::DerEncode)?; 135 | let public_key = ring::signature::UnparsedPublicKey::new( 136 | &ring::signature::RSA_PKCS1_2048_8192_SHA256, 137 | public_key_der, 138 | ); 139 | 140 | let span = tracing::Span::current(); 141 | spawner 142 | .spawn_blocking(move || { 143 | span.in_scope(|| { 144 | let decoded = STANDARD.decode(signature)?; 145 | 146 | public_key 147 | .verify(signing_string.as_bytes(), decoded.as_slice()) 148 | .map_err(|_| ErrorKind::VerifySignature)?; 149 | 150 | Ok(()) as Result<(), Error> 151 | }) 152 | }) 153 | .await??; 154 | 155 | Ok(()) 156 | } 157 | 158 | impl SignatureVerify for MyVerify { 159 | type Error = Error; 160 | type Future = Pin>>>; 161 | 162 | fn signature_verify( 163 | &mut self, 164 | algorithm: Option, 165 | key_id: String, 166 | signature: String, 167 | signing_string: String, 168 | ) -> Self::Future { 169 | let this = self.clone(); 170 | 171 | Box::pin(async move { 172 | this.verify(algorithm, key_id, signature, signing_string) 173 | .await 174 | }) 175 | } 176 | } 177 | 178 | #[cfg(test)] 179 | mod tests { 180 | use crate::apub::AcceptedActors; 181 | use rsa::{pkcs8::DecodePublicKey, RsaPublicKey}; 182 | 183 | const ASONIX_DOG_ACTOR: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://w3id.org/security/v1",{"manuallyApprovesFollowers":"as:manuallyApprovesFollowers","toot":"http://joinmastodon.org/ns#","featured":{"@id":"toot:featured","@type":"@id"},"featuredTags":{"@id":"toot:featuredTags","@type":"@id"},"alsoKnownAs":{"@id":"as:alsoKnownAs","@type":"@id"},"movedTo":{"@id":"as:movedTo","@type":"@id"},"schema":"http://schema.org#","PropertyValue":"schema:PropertyValue","value":"schema:value","discoverable":"toot:discoverable","Device":"toot:Device","Ed25519Signature":"toot:Ed25519Signature","Ed25519Key":"toot:Ed25519Key","Curve25519Key":"toot:Curve25519Key","EncryptedMessage":"toot:EncryptedMessage","publicKeyBase64":"toot:publicKeyBase64","deviceId":"toot:deviceId","claim":{"@type":"@id","@id":"toot:claim"},"fingerprintKey":{"@type":"@id","@id":"toot:fingerprintKey"},"identityKey":{"@type":"@id","@id":"toot:identityKey"},"devices":{"@type":"@id","@id":"toot:devices"},"messageFranking":"toot:messageFranking","messageType":"toot:messageType","cipherText":"toot:cipherText","suspended":"toot:suspended"}],"id":"https://masto.asonix.dog/actor","type":"Application","inbox":"https://masto.asonix.dog/actor/inbox","outbox":"https://masto.asonix.dog/actor/outbox","preferredUsername":"masto.asonix.dog","url":"https://masto.asonix.dog/about/more?instance_actor=true","manuallyApprovesFollowers":true,"publicKey":{"id":"https://masto.asonix.dog/actor#main-key","owner":"https://masto.asonix.dog/actor","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n"},"endpoints":{"sharedInbox":"https://masto.asonix.dog/inbox"}}"#; 184 | const KARJALAZET_RELAY: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://pleroma.karjalazet.se/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[],"capabilities":{},"discoverable":false,"endpoints":{"oauthAuthorizationEndpoint":"https://pleroma.karjalazet.se/oauth/authorize","oauthRegistrationEndpoint":"https://pleroma.karjalazet.se/api/v1/apps","oauthTokenEndpoint":"https://pleroma.karjalazet.se/oauth/token","sharedInbox":"https://pleroma.karjalazet.se/inbox","uploadMedia":"https://pleroma.karjalazet.se/api/ap/upload_media"},"featured":"https://pleroma.karjalazet.se/relay/collections/featured","followers":"https://pleroma.karjalazet.se/relay/followers","following":"https://pleroma.karjalazet.se/relay/following","id":"https://pleroma.karjalazet.se/relay","inbox":"https://pleroma.karjalazet.se/relay/inbox","manuallyApprovesFollowers":false,"name":null,"outbox":"https://pleroma.karjalazet.se/relay/outbox","preferredUsername":"relay","publicKey":{"id":"https://pleroma.karjalazet.se/relay#main-key","owner":"https://pleroma.karjalazet.se/relay","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"","tag":[],"type":"Person","url":"https://pleroma.karjalazet.se/relay"}"#; 185 | const ASONIX_DOG_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n"; 186 | const KARJALAZET_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n"; 187 | 188 | #[test] 189 | fn handles_masto_keys() { 190 | println!("{ASONIX_DOG_KEY}"); 191 | let _ = RsaPublicKey::from_public_key_pem(ASONIX_DOG_KEY.trim()).unwrap(); 192 | } 193 | 194 | #[test] 195 | fn handles_pleromo_keys() { 196 | println!("{KARJALAZET_KEY}"); 197 | let _ = RsaPublicKey::from_public_key_pem(KARJALAZET_KEY.trim()).unwrap(); 198 | } 199 | 200 | #[test] 201 | fn handles_pleromo_relay_format() { 202 | let _: AcceptedActors = serde_json::from_str(KARJALAZET_RELAY).unwrap(); 203 | } 204 | 205 | #[test] 206 | fn handles_masto_relay_format() { 207 | let _: AcceptedActors = serde_json::from_str(ASONIX_DOG_ACTOR).unwrap(); 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/middleware/webfinger.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::{Config, UrlKind}, 3 | data::State, 4 | future::LocalBoxFuture, 5 | }; 6 | use actix_web::web::Data; 7 | use actix_webfinger::{Resolver, Webfinger}; 8 | use rsa_magic_public_key::AsMagicPublicKey; 9 | 10 | pub(crate) struct RelayResolver; 11 | 12 | #[derive(Clone, Debug, thiserror::Error)] 13 | #[error("Error resolving webfinger data")] 14 | pub(crate) struct RelayError; 15 | 16 | impl Resolver for RelayResolver { 17 | type State = (Data, Data); 18 | type Error = RelayError; 19 | 20 | fn find( 21 | scheme: Option<&str>, 22 | account: &str, 23 | domain: &str, 24 | (state, config): Self::State, 25 | ) -> LocalBoxFuture<'static, Result, Self::Error>> { 26 | let domain = domain.to_owned(); 27 | let account = account.to_owned(); 28 | let scheme = scheme.map(|scheme| scheme.to_owned()); 29 | 30 | let fut = async move { 31 | if let Some(scheme) = scheme { 32 | if scheme != "acct:" { 33 | return Ok(None); 34 | } 35 | } 36 | 37 | if domain != config.hostname() { 38 | return Ok(None); 39 | } 40 | 41 | if account != "relay" { 42 | return Ok(None); 43 | } 44 | 45 | let mut wf = Webfinger::new(config.generate_resource().as_str()); 46 | wf.add_alias(config.generate_url(UrlKind::Actor).as_str()) 47 | .add_activitypub(config.generate_url(UrlKind::Actor).as_str()) 48 | .add_magic_public_key(&state.public_key.as_magic_public_key()); 49 | 50 | Ok(Some(wf)) 51 | }; 52 | 53 | Box::pin(fut) 54 | } 55 | } 56 | 57 | impl actix_web::error::ResponseError for RelayError {} 58 | -------------------------------------------------------------------------------- /src/requests.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | data::LastOnline, 3 | error::{Error, ErrorKind}, 4 | spawner::Spawner, 5 | }; 6 | use activitystreams::iri_string::types::IriString; 7 | use actix_web::http::header::Date; 8 | use base64::{engine::general_purpose::STANDARD, Engine}; 9 | use dashmap::DashMap; 10 | use http_signature_normalization_reqwest::{digest::ring::Sha256, prelude::*}; 11 | use reqwest_middleware::ClientWithMiddleware; 12 | use ring::{ 13 | rand::SystemRandom, 14 | signature::{RsaKeyPair, RSA_PKCS1_SHA256}, 15 | }; 16 | use rsa::{pkcs1::EncodeRsaPrivateKey, RsaPrivateKey}; 17 | use std::{ 18 | sync::Arc, 19 | time::{Duration, SystemTime}, 20 | }; 21 | 22 | const ONE_SECOND: u64 = 1; 23 | const ONE_MINUTE: u64 = 60 * ONE_SECOND; 24 | const ONE_HOUR: u64 = 60 * ONE_MINUTE; 25 | const ONE_DAY: u64 = 24 * ONE_HOUR; 26 | 27 | #[derive(Debug)] 28 | pub(crate) enum BreakerStrategy { 29 | // Requires a successful response 30 | Require2XX, 31 | // Allows HTTP 2xx-401 32 | Allow401AndBelow, 33 | // Allows HTTP 2xx-404 34 | Allow404AndBelow, 35 | } 36 | 37 | #[derive(Clone)] 38 | pub(crate) struct Breakers { 39 | inner: Arc>, 40 | } 41 | 42 | impl std::fmt::Debug for Breakers { 43 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 44 | f.debug_struct("Breakers").finish() 45 | } 46 | } 47 | 48 | impl Breakers { 49 | pub(crate) fn should_try(&self, url: &IriString) -> bool { 50 | if let Some(authority) = url.authority_str() { 51 | if let Some(breaker) = self.inner.get(authority) { 52 | breaker.should_try() 53 | } else { 54 | true 55 | } 56 | } else { 57 | false 58 | } 59 | } 60 | 61 | fn fail(&self, url: &IriString) { 62 | if let Some(authority) = url.authority_str() { 63 | let should_write = { 64 | if let Some(mut breaker) = self.inner.get_mut(authority) { 65 | breaker.fail(); 66 | if !breaker.should_try() { 67 | tracing::warn!("Failed breaker for {authority}"); 68 | } 69 | false 70 | } else { 71 | true 72 | } 73 | }; 74 | 75 | if should_write { 76 | let mut breaker = self.inner.entry(authority.to_owned()).or_default(); 77 | breaker.fail(); 78 | } 79 | } 80 | } 81 | 82 | fn succeed(&self, url: &IriString) { 83 | if let Some(authority) = url.authority_str() { 84 | let should_write = { 85 | if let Some(mut breaker) = self.inner.get_mut(authority) { 86 | breaker.succeed(); 87 | false 88 | } else { 89 | true 90 | } 91 | }; 92 | 93 | if should_write { 94 | let mut breaker = self.inner.entry(authority.to_owned()).or_default(); 95 | breaker.succeed(); 96 | } 97 | } 98 | } 99 | } 100 | 101 | impl Default for Breakers { 102 | fn default() -> Self { 103 | Breakers { 104 | inner: Arc::new(DashMap::new()), 105 | } 106 | } 107 | } 108 | 109 | #[derive(Debug)] 110 | struct Breaker { 111 | failures: usize, 112 | last_attempt: SystemTime, 113 | last_success: SystemTime, 114 | } 115 | 116 | impl Breaker { 117 | const FAILURE_WAIT: Duration = Duration::from_secs(ONE_DAY); 118 | const FAILURE_THRESHOLD: usize = 10; 119 | 120 | fn should_try(&self) -> bool { 121 | self.failures < Self::FAILURE_THRESHOLD 122 | || self.last_attempt + Self::FAILURE_WAIT < SystemTime::now() 123 | } 124 | 125 | fn fail(&mut self) { 126 | self.failures += 1; 127 | self.last_attempt = SystemTime::now(); 128 | } 129 | 130 | fn succeed(&mut self) { 131 | self.failures = 0; 132 | self.last_attempt = SystemTime::now(); 133 | self.last_success = SystemTime::now(); 134 | } 135 | } 136 | 137 | impl Default for Breaker { 138 | fn default() -> Self { 139 | let now = SystemTime::now(); 140 | 141 | Breaker { 142 | failures: 0, 143 | last_attempt: now, 144 | last_success: now, 145 | } 146 | } 147 | } 148 | 149 | #[derive(Clone)] 150 | pub(crate) struct Requests { 151 | client: ClientWithMiddleware, 152 | key_id: String, 153 | private_key: Arc, 154 | rng: SystemRandom, 155 | config: Config, 156 | breakers: Breakers, 157 | last_online: Arc, 158 | } 159 | 160 | impl std::fmt::Debug for Requests { 161 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 162 | f.debug_struct("Requests") 163 | .field("key_id", &self.key_id) 164 | .field("config", &self.config) 165 | .field("breakers", &self.breakers) 166 | .finish() 167 | } 168 | } 169 | 170 | impl Requests { 171 | #[allow(clippy::too_many_arguments)] 172 | pub(crate) fn new( 173 | key_id: String, 174 | private_key: RsaPrivateKey, 175 | breakers: Breakers, 176 | last_online: Arc, 177 | spawner: Spawner, 178 | client: ClientWithMiddleware, 179 | ) -> Self { 180 | let private_key_der = private_key.to_pkcs1_der().expect("Can encode der"); 181 | let private_key = ring::signature::RsaKeyPair::from_der(private_key_der.as_bytes()) 182 | .expect("Key is valid"); 183 | Requests { 184 | client, 185 | key_id, 186 | private_key: Arc::new(private_key), 187 | rng: SystemRandom::new(), 188 | config: Config::new_with_spawner(spawner).mastodon_compat(), 189 | breakers, 190 | last_online, 191 | } 192 | } 193 | 194 | pub(crate) fn spawner(mut self, spawner: Spawner) -> Self { 195 | self.config = self.config.set_spawner(spawner); 196 | self 197 | } 198 | 199 | pub(crate) fn reset_breaker(&self, iri: &IriString) { 200 | self.breakers.succeed(iri); 201 | } 202 | 203 | async fn check_response( 204 | &self, 205 | parsed_url: &IriString, 206 | strategy: BreakerStrategy, 207 | res: Result, 208 | ) -> Result { 209 | if res.is_err() { 210 | self.breakers.fail(&parsed_url); 211 | } 212 | 213 | let res = res?; 214 | 215 | let status = res.status(); 216 | 217 | let success = match strategy { 218 | BreakerStrategy::Require2XX => status.is_success(), 219 | BreakerStrategy::Allow401AndBelow => (200..=401).contains(&status.as_u16()), 220 | BreakerStrategy::Allow404AndBelow => (200..=404).contains(&status.as_u16()), 221 | }; 222 | 223 | if !success { 224 | self.breakers.fail(&parsed_url); 225 | 226 | if let Ok(s) = res.text().await { 227 | if !s.is_empty() { 228 | tracing::debug!("Response from {parsed_url}, {s}"); 229 | } 230 | } 231 | 232 | return Err(ErrorKind::Status(parsed_url.to_string(), status).into()); 233 | } 234 | 235 | // only actually succeed a breaker on 2xx response 236 | if status.is_success() { 237 | self.last_online.mark_seen(&parsed_url); 238 | self.breakers.succeed(&parsed_url); 239 | } 240 | 241 | Ok(res) 242 | } 243 | 244 | #[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))] 245 | pub(crate) async fn fetch_json( 246 | &self, 247 | url: &IriString, 248 | strategy: BreakerStrategy, 249 | ) -> Result 250 | where 251 | T: serde::de::DeserializeOwned, 252 | { 253 | self.do_fetch(url, "application/json", strategy).await 254 | } 255 | 256 | #[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))] 257 | pub(crate) async fn fetch_json_msky( 258 | &self, 259 | url: &IriString, 260 | strategy: BreakerStrategy, 261 | ) -> Result 262 | where 263 | T: serde::de::DeserializeOwned, 264 | { 265 | let body = self 266 | .do_deliver( 267 | url, 268 | &serde_json::json!({}), 269 | "application/json", 270 | "application/json", 271 | strategy, 272 | ) 273 | .await? 274 | .bytes() 275 | .await?; 276 | 277 | Ok(serde_json::from_slice(&body)?) 278 | } 279 | 280 | #[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))] 281 | pub(crate) async fn fetch( 282 | &self, 283 | url: &IriString, 284 | strategy: BreakerStrategy, 285 | ) -> Result 286 | where 287 | T: serde::de::DeserializeOwned, 288 | { 289 | self.do_fetch(url, "application/activity+json", strategy) 290 | .await 291 | } 292 | 293 | async fn do_fetch( 294 | &self, 295 | url: &IriString, 296 | accept: &str, 297 | strategy: BreakerStrategy, 298 | ) -> Result 299 | where 300 | T: serde::de::DeserializeOwned, 301 | { 302 | let body = self 303 | .do_fetch_response(url, accept, strategy) 304 | .await? 305 | .bytes() 306 | .await?; 307 | 308 | Ok(serde_json::from_slice(&body)?) 309 | } 310 | 311 | #[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))] 312 | pub(crate) async fn fetch_response( 313 | &self, 314 | url: &IriString, 315 | strategy: BreakerStrategy, 316 | ) -> Result { 317 | self.do_fetch_response(url, "*/*", strategy).await 318 | } 319 | 320 | pub(crate) async fn do_fetch_response( 321 | &self, 322 | url: &IriString, 323 | accept: &str, 324 | strategy: BreakerStrategy, 325 | ) -> Result { 326 | if !self.breakers.should_try(url) { 327 | return Err(ErrorKind::Breaker.into()); 328 | } 329 | 330 | let signer = self.signer(); 331 | let span = tracing::Span::current(); 332 | 333 | let request = self 334 | .client 335 | .get(url.as_str()) 336 | .header("Accept", accept) 337 | .header("Date", Date(SystemTime::now().into()).to_string()) 338 | .signature(&self.config, self.key_id.clone(), move |signing_string| { 339 | span.record("signing_string", signing_string); 340 | span.in_scope(|| signer.sign(signing_string)) 341 | }) 342 | .await?; 343 | 344 | let res = self.client.execute(request).await; 345 | 346 | let res = self.check_response(url, strategy, res).await?; 347 | 348 | Ok(res) 349 | } 350 | 351 | #[tracing::instrument( 352 | "Deliver to Inbox", 353 | skip_all, 354 | fields(inbox = inbox.to_string().as_str(), signing_string) 355 | )] 356 | pub(crate) async fn deliver( 357 | &self, 358 | inbox: &IriString, 359 | item: &T, 360 | strategy: BreakerStrategy, 361 | ) -> Result<(), Error> 362 | where 363 | T: serde::ser::Serialize + std::fmt::Debug, 364 | { 365 | self.do_deliver( 366 | inbox, 367 | item, 368 | "application/activity+json", 369 | "application/activity+json", 370 | strategy, 371 | ) 372 | .await?; 373 | Ok(()) 374 | } 375 | 376 | async fn do_deliver( 377 | &self, 378 | inbox: &IriString, 379 | item: &T, 380 | content_type: &str, 381 | accept: &str, 382 | strategy: BreakerStrategy, 383 | ) -> Result 384 | where 385 | T: serde::ser::Serialize + std::fmt::Debug, 386 | { 387 | if !self.breakers.should_try(&inbox) { 388 | return Err(ErrorKind::Breaker.into()); 389 | } 390 | 391 | let signer = self.signer(); 392 | let span = tracing::Span::current(); 393 | let item_string = serde_json::to_string(item)?; 394 | 395 | let request = self 396 | .client 397 | .post(inbox.as_str()) 398 | .header("Accept", accept) 399 | .header("Content-Type", content_type) 400 | .header("Date", Date(SystemTime::now().into()).to_string()) 401 | .signature_with_digest( 402 | self.config.clone(), 403 | self.key_id.clone(), 404 | Sha256::new(), 405 | item_string, 406 | move |signing_string| { 407 | span.record("signing_string", signing_string); 408 | span.in_scope(|| signer.sign(signing_string)) 409 | }, 410 | ) 411 | .await?; 412 | 413 | let res = self.client.execute(request).await; 414 | 415 | let res = self.check_response(inbox, strategy, res).await?; 416 | 417 | Ok(res) 418 | } 419 | 420 | fn signer(&self) -> Signer { 421 | Signer { 422 | private_key: self.private_key.clone(), 423 | rng: self.rng.clone(), 424 | } 425 | } 426 | } 427 | 428 | struct Signer { 429 | private_key: Arc, 430 | rng: SystemRandom, 431 | } 432 | 433 | impl Signer { 434 | fn sign(&self, signing_string: &str) -> Result { 435 | let mut signature = vec![0; self.private_key.public().modulus_len()]; 436 | 437 | self.private_key 438 | .sign( 439 | &RSA_PKCS1_SHA256, 440 | &self.rng, 441 | signing_string.as_bytes(), 442 | &mut signature, 443 | ) 444 | .map_err(|_| ErrorKind::SignRequest)?; 445 | 446 | Ok(STANDARD.encode(&signature)) 447 | } 448 | } 449 | -------------------------------------------------------------------------------- /src/routes.rs: -------------------------------------------------------------------------------- 1 | mod actor; 2 | mod healthz; 3 | mod inbox; 4 | mod index; 5 | mod media; 6 | mod nodeinfo; 7 | mod statics; 8 | 9 | pub(crate) use self::{ 10 | actor::route as actor, 11 | healthz::route as healthz, 12 | inbox::route as inbox, 13 | index::route as index, 14 | media::route as media, 15 | nodeinfo::{route as nodeinfo, well_known as nodeinfo_meta}, 16 | statics::route as statics, 17 | }; 18 | 19 | use actix_web::HttpResponse; 20 | use serde::ser::Serialize; 21 | 22 | static CONTENT_TYPE: &str = "application/activity+json"; 23 | 24 | fn ok(item: T) -> HttpResponse 25 | where 26 | T: Serialize, 27 | { 28 | HttpResponse::Ok().content_type(CONTENT_TYPE).json(&item) 29 | } 30 | 31 | fn accepted(item: T) -> HttpResponse 32 | where 33 | T: Serialize, 34 | { 35 | HttpResponse::Accepted() 36 | .content_type(CONTENT_TYPE) 37 | .json(&item) 38 | } 39 | -------------------------------------------------------------------------------- /src/routes/actor.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | apub::{PublicKey, PublicKeyInner}, 3 | config::{Config, UrlKind}, 4 | data::State, 5 | error::Error, 6 | routes::ok, 7 | }; 8 | use activitystreams::{ 9 | actor::{ApActor, Application, Endpoints}, 10 | context, 11 | prelude::*, 12 | security, 13 | }; 14 | use activitystreams_ext::Ext1; 15 | use actix_web::{web, Responder}; 16 | use rsa::pkcs8::EncodePublicKey; 17 | 18 | #[tracing::instrument(name = "Actor", skip(config, state))] 19 | pub(crate) async fn route( 20 | state: web::Data, 21 | config: web::Data, 22 | ) -> Result { 23 | let mut application = Ext1::new( 24 | ApActor::new(config.generate_url(UrlKind::Inbox), Application::new()), 25 | PublicKey { 26 | public_key: PublicKeyInner { 27 | id: config.generate_url(UrlKind::MainKey), 28 | owner: config.generate_url(UrlKind::Actor), 29 | public_key_pem: state 30 | .public_key 31 | .to_public_key_pem(rsa::pkcs8::LineEnding::default())?, 32 | }, 33 | }, 34 | ); 35 | 36 | application 37 | .set_id(config.generate_url(UrlKind::Actor)) 38 | .set_summary("AodeRelay bot") 39 | .set_name("AodeRelay") 40 | .set_url(config.generate_url(UrlKind::Actor)) 41 | .set_many_contexts(vec![context(), security()]) 42 | .set_preferred_username("relay") 43 | .set_outbox(config.generate_url(UrlKind::Outbox)) 44 | .set_followers(config.generate_url(UrlKind::Followers)) 45 | .set_following(config.generate_url(UrlKind::Following)) 46 | .set_endpoints(Endpoints { 47 | shared_inbox: Some(config.generate_url(UrlKind::Inbox)), 48 | ..Default::default() 49 | }); 50 | 51 | Ok(ok(application)) 52 | } 53 | -------------------------------------------------------------------------------- /src/routes/healthz.rs: -------------------------------------------------------------------------------- 1 | use crate::{data::State, error::Error}; 2 | use actix_web::{web, HttpResponse}; 3 | 4 | pub(crate) async fn route(state: web::Data) -> Result { 5 | state.db.check_health().await?; 6 | Ok(HttpResponse::Ok().finish()) 7 | } 8 | -------------------------------------------------------------------------------- /src/routes/inbox.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | apub::{AcceptedActivities, AcceptedUndoObjects, UndoTypes, ValidTypes}, 3 | config::{Config, UrlKind}, 4 | data::{ActorCache, State}, 5 | db::Actor, 6 | error::{Error, ErrorKind}, 7 | jobs::apub::{Announce, Follow, Forward, Reject, Undo}, 8 | jobs::JobServer, 9 | requests::Requests, 10 | routes::accepted, 11 | }; 12 | use activitystreams::{ 13 | activity, base::AnyBase, iri_string::types::IriString, prelude::*, primitives::OneOrMany, 14 | public, 15 | }; 16 | use actix_web::{web, HttpResponse}; 17 | use http_signature_normalization_actix::prelude::{DigestVerified, SignatureVerified}; 18 | 19 | #[tracing::instrument(name = "Inbox", skip_all, fields(id = tracing::field::debug(&input.id_unchecked()), kind = tracing::field::debug(&input.kind())))] 20 | #[allow(clippy::too_many_arguments)] 21 | pub(crate) async fn route( 22 | state: web::Data, 23 | actors: web::Data, 24 | config: web::Data, 25 | client: web::Data, 26 | jobs: web::Data, 27 | input: web::Json, 28 | digest_verified: Option, 29 | signature_verified: Option, 30 | ) -> Result { 31 | let input = input.into_inner(); 32 | 33 | let kind = input.kind().ok_or(ErrorKind::MissingKind)?; 34 | 35 | if digest_verified.is_some() && signature_verified.is_none() && *kind == ValidTypes::Delete { 36 | return Ok(accepted(serde_json::json!({}))); 37 | } else if config.validate_signatures() 38 | && (digest_verified.is_none() || signature_verified.is_none()) 39 | { 40 | return Err(ErrorKind::NoSignature(None).into()); 41 | } 42 | 43 | let actor_id = if input.id_unchecked().is_some() { 44 | input.actor()?.as_single_id().ok_or(ErrorKind::MissingId)? 45 | } else { 46 | input 47 | .actor_unchecked() 48 | .as_single_id() 49 | .ok_or(ErrorKind::MissingId)? 50 | }; 51 | 52 | let actor = actors.get(actor_id, &client).await?.into_inner(); 53 | 54 | if let Some(verified) = signature_verified { 55 | if actor.public_key_id.as_str() != verified.key_id() { 56 | tracing::error!("Actor signed with wrong key"); 57 | return Err(ErrorKind::BadActor( 58 | actor.public_key_id.to_string(), 59 | verified.key_id().to_owned(), 60 | ) 61 | .into()); 62 | } 63 | } else if config.validate_signatures() { 64 | tracing::error!("This case should never be reachable, since I handle signature checks earlier in the flow. If you see this in a log it means I did it wrong"); 65 | return Err(ErrorKind::NoSignature(Some(actor.public_key_id.to_string())).into()); 66 | } 67 | 68 | let is_allowed = state.db.is_allowed(actor.id.clone()).await?; 69 | let is_connected = state.db.is_connected(actor.id.clone()).await?; 70 | 71 | if !is_allowed { 72 | return Err(ErrorKind::NotAllowed(actor.id.to_string()).into()); 73 | } 74 | 75 | if !is_connected && !valid_without_listener(&input)? { 76 | return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into()); 77 | } 78 | 79 | match kind { 80 | ValidTypes::Accept => handle_accept(&config, input).await?, 81 | ValidTypes::Reject => handle_reject(&config, &jobs, input, actor).await?, 82 | ValidTypes::Announce | ValidTypes::Create => { 83 | handle_announce(&state, &jobs, input, actor).await? 84 | } 85 | ValidTypes::Follow => handle_follow(&config, &jobs, input, actor).await?, 86 | ValidTypes::Add | ValidTypes::Delete | ValidTypes::Remove | ValidTypes::Update => { 87 | handle_forward(&jobs, input, actor).await? 88 | } 89 | ValidTypes::Undo => handle_undo(&config, &jobs, input, actor, is_connected).await?, 90 | ValidTypes::Move => { 91 | tracing::warn!("Move activity received, ignoring"); 92 | } 93 | }; 94 | 95 | Ok(accepted(serde_json::json!({}))) 96 | } 97 | 98 | fn valid_without_listener(input: &AcceptedActivities) -> Result { 99 | match input.kind() { 100 | Some(ValidTypes::Follow) => Ok(true), 101 | Some(ValidTypes::Undo) => Ok(single_object(input.object_unchecked())?.is_kind("Follow")), 102 | _ => Ok(false), 103 | } 104 | } 105 | 106 | fn kind_str(base: &AnyBase) -> Result<&str, Error> { 107 | base.kind_str() 108 | .ok_or(ErrorKind::MissingKind) 109 | .map_err(Into::into) 110 | } 111 | 112 | fn id_string(id: Option<&IriString>) -> Result { 113 | id.map(|s| s.to_string()) 114 | .ok_or(ErrorKind::MissingId) 115 | .map_err(Into::into) 116 | } 117 | 118 | fn single_object(o: &OneOrMany) -> Result<&AnyBase, Error> { 119 | o.as_one().ok_or(ErrorKind::ObjectCount).map_err(Into::into) 120 | } 121 | 122 | async fn handle_accept(config: &Config, input: AcceptedActivities) -> Result<(), Error> { 123 | let base = single_object(input.object_unchecked())?.clone(); 124 | let follow = if let Some(follow) = activity::Follow::from_any_base(base)? { 125 | follow 126 | } else { 127 | return Err(ErrorKind::Kind( 128 | kind_str(single_object(input.object_unchecked())?)?.to_owned(), 129 | ) 130 | .into()); 131 | }; 132 | 133 | if !follow.actor_is(&config.generate_url(UrlKind::Actor)) { 134 | return Err(ErrorKind::WrongActor(id_string(follow.actor()?.as_single_id())?).into()); 135 | } 136 | 137 | Ok(()) 138 | } 139 | 140 | async fn handle_reject( 141 | config: &Config, 142 | jobs: &JobServer, 143 | input: AcceptedActivities, 144 | actor: Actor, 145 | ) -> Result<(), Error> { 146 | let base = single_object(input.object_unchecked())?.clone(); 147 | let follow = if let Some(follow) = activity::Follow::from_any_base(base)? { 148 | follow 149 | } else { 150 | return Err(ErrorKind::Kind( 151 | kind_str(single_object(input.object_unchecked())?)?.to_owned(), 152 | ) 153 | .into()); 154 | }; 155 | 156 | if !follow.actor_is(&config.generate_url(UrlKind::Actor)) { 157 | return Err(ErrorKind::WrongActor(id_string(follow.actor()?.as_single_id())?).into()); 158 | } 159 | 160 | jobs.queue(Reject(actor)).await?; 161 | 162 | Ok(()) 163 | } 164 | 165 | async fn handle_undo( 166 | config: &Config, 167 | jobs: &JobServer, 168 | input: AcceptedActivities, 169 | actor: Actor, 170 | is_listener: bool, 171 | ) -> Result<(), Error> { 172 | let any_base = single_object(input.object_unchecked())?.clone(); 173 | let undone_object = 174 | AcceptedUndoObjects::from_any_base(any_base)?.ok_or(ErrorKind::ObjectFormat)?; 175 | 176 | if !undone_object.is_kind(&UndoTypes::Follow) { 177 | if is_listener { 178 | jobs.queue(Forward::new(input, actor)).await?; 179 | return Ok(()); 180 | } else { 181 | return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into()); 182 | } 183 | } 184 | 185 | let my_id: IriString = config.generate_url(UrlKind::Actor); 186 | 187 | if !undone_object.object_is(&my_id) && !undone_object.object_is(&public()) { 188 | return Err(ErrorKind::WrongActor(id_string( 189 | undone_object.object_unchecked().as_single_id(), 190 | )?) 191 | .into()); 192 | } 193 | 194 | if !is_listener { 195 | return Ok(()); 196 | } 197 | 198 | jobs.queue(Undo::new(input, actor)).await?; 199 | Ok(()) 200 | } 201 | 202 | async fn handle_forward( 203 | jobs: &JobServer, 204 | input: AcceptedActivities, 205 | actor: Actor, 206 | ) -> Result<(), Error> { 207 | jobs.queue(Forward::new(input, actor)).await?; 208 | 209 | Ok(()) 210 | } 211 | 212 | async fn handle_announce( 213 | state: &State, 214 | jobs: &JobServer, 215 | input: AcceptedActivities, 216 | actor: Actor, 217 | ) -> Result<(), Error> { 218 | let object_id = input 219 | .object_unchecked() 220 | .as_single_id() 221 | .ok_or(ErrorKind::MissingId)?; 222 | 223 | if state.is_cached(object_id) { 224 | return Ok(()); 225 | //return Err(ErrorKind::Duplicate.into()); 226 | } 227 | 228 | jobs.queue(Announce::new(object_id.to_owned(), actor)) 229 | .await?; 230 | 231 | Ok(()) 232 | } 233 | 234 | async fn handle_follow( 235 | config: &Config, 236 | jobs: &JobServer, 237 | input: AcceptedActivities, 238 | actor: Actor, 239 | ) -> Result<(), Error> { 240 | let my_id: IriString = config.generate_url(UrlKind::Actor); 241 | 242 | if !input.object_is(&my_id) && !input.object_is(&public()) { 243 | return Err( 244 | ErrorKind::WrongActor(id_string(input.object_unchecked().as_single_id())?).into(), 245 | ); 246 | } 247 | 248 | jobs.queue(Follow::new(input, actor)).await?; 249 | 250 | Ok(()) 251 | } 252 | -------------------------------------------------------------------------------- /src/routes/index.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::Config, 3 | data::{Node, State}, 4 | error::{Error, ErrorKind}, 5 | }; 6 | use actix_web::{web, HttpResponse}; 7 | use rand::{seq::SliceRandom, thread_rng}; 8 | use std::io::BufWriter; 9 | 10 | const MINIFY_CONFIG: minify_html::Cfg = minify_html::Cfg { 11 | do_not_minify_doctype: true, 12 | ensure_spec_compliant_unquoted_attribute_values: true, 13 | keep_closing_tags: true, 14 | keep_html_and_head_opening_tags: false, 15 | keep_spaces_between_attributes: true, 16 | keep_comments: false, 17 | keep_input_type_text_attr: true, 18 | keep_ssi_comments: false, 19 | preserve_brace_template_syntax: false, 20 | preserve_chevron_percent_template_syntax: false, 21 | minify_css: true, 22 | minify_js: true, 23 | remove_bangs: true, 24 | remove_processing_instructions: true, 25 | }; 26 | 27 | fn open_reg(node: &Node) -> bool { 28 | node.instance 29 | .as_ref() 30 | .map(|i| i.reg) 31 | .or_else(|| node.info.as_ref().map(|i| i.reg)) 32 | .unwrap_or(false) 33 | } 34 | 35 | #[tracing::instrument(name = "Index", skip(config, state))] 36 | pub(crate) async fn route( 37 | state: web::Data, 38 | config: web::Data, 39 | ) -> Result { 40 | let all_nodes = state.node_cache.nodes().await?; 41 | 42 | let mut nodes = Vec::new(); 43 | let mut local = Vec::new(); 44 | 45 | for node in all_nodes { 46 | if !state.is_connected(&node.base) { 47 | continue; 48 | } 49 | 50 | if node 51 | .base 52 | .authority_str() 53 | .map(|authority| { 54 | config 55 | .local_domains() 56 | .iter() 57 | .any(|domain| domain.as_str() == authority) 58 | }) 59 | .unwrap_or(false) 60 | { 61 | local.push(node); 62 | } else { 63 | nodes.push(node); 64 | } 65 | } 66 | 67 | nodes.sort_by(|lhs, rhs| match (open_reg(lhs), open_reg(rhs)) { 68 | (true, true) | (false, false) => std::cmp::Ordering::Equal, 69 | (true, false) => std::cmp::Ordering::Less, 70 | (false, true) => std::cmp::Ordering::Greater, 71 | }); 72 | 73 | if let Some((i, _)) = nodes.iter().enumerate().find(|(_, node)| !open_reg(node)) { 74 | nodes[..i].shuffle(&mut thread_rng()); 75 | nodes[i..].shuffle(&mut thread_rng()); 76 | } else { 77 | nodes.shuffle(&mut thread_rng()); 78 | } 79 | 80 | let mut buf = BufWriter::new(Vec::new()); 81 | 82 | crate::templates::index_html(&mut buf, &local, &nodes, &config)?; 83 | let html = buf.into_inner().map_err(|e| { 84 | tracing::error!("Error rendering template, {}", e.error()); 85 | ErrorKind::FlushBuffer 86 | })?; 87 | 88 | let html = minify_html::minify(&html, &MINIFY_CONFIG); 89 | 90 | Ok(HttpResponse::Ok().content_type("text/html").body(html)) 91 | } 92 | -------------------------------------------------------------------------------- /src/routes/media.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | data::MediaCache, 3 | error::Error, 4 | requests::{BreakerStrategy, Requests}, 5 | }; 6 | use actix_web::{body::BodyStream, web, HttpResponse}; 7 | use uuid::Uuid; 8 | 9 | #[tracing::instrument(name = "Media", skip(media, requests))] 10 | pub(crate) async fn route( 11 | media: web::Data, 12 | requests: web::Data, 13 | uuid: web::Path, 14 | ) -> Result { 15 | let uuid = uuid.into_inner(); 16 | 17 | if let Some(url) = media.get_url(uuid).await? { 18 | let res = requests 19 | .fetch_response(&url, BreakerStrategy::Allow404AndBelow) 20 | .await?; 21 | 22 | let mut response = HttpResponse::build(res.status()); 23 | 24 | for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") { 25 | response.insert_header((name.clone(), value.clone())); 26 | } 27 | 28 | return Ok(response.body(BodyStream::new(res.bytes_stream()))); 29 | } 30 | 31 | Ok(HttpResponse::NotFound().finish()) 32 | } 33 | -------------------------------------------------------------------------------- /src/routes/nodeinfo.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::{Config, UrlKind}, 3 | data::State, 4 | }; 5 | use actix_web::{web, Responder}; 6 | use actix_webfinger::Link; 7 | 8 | #[tracing::instrument(name = "Well Known NodeInfo", skip(config))] 9 | pub(crate) async fn well_known(config: web::Data) -> impl Responder { 10 | web::Json(Links { 11 | links: vec![Link { 12 | rel: "http://nodeinfo.diaspora.software/ns/schema/2.0".to_owned(), 13 | href: Some(config.generate_url(UrlKind::NodeInfo).to_string()), 14 | template: None, 15 | kind: None, 16 | }], 17 | }) 18 | .customize() 19 | .insert_header(("Content-Type", "application/jrd+json")) 20 | } 21 | 22 | #[derive(serde::Serialize)] 23 | struct Links { 24 | links: Vec, 25 | } 26 | 27 | #[tracing::instrument(name = "NodeInfo", skip_all)] 28 | pub(crate) async fn route( 29 | config: web::Data, 30 | state: web::Data, 31 | ) -> web::Json { 32 | let inboxes = state.db.inboxes().await; 33 | 34 | let blocks = if config.publish_blocks() { 35 | Some(state.db.blocks().await.unwrap_or_default()) 36 | } else { 37 | None 38 | }; 39 | 40 | let peers = inboxes 41 | .unwrap_or_default() 42 | .iter() 43 | .filter_map(|listener| listener.authority_str()) 44 | .map(|s| s.to_owned()) 45 | .collect(); 46 | 47 | let open_registrations = !config.restricted_mode(); 48 | 49 | web::Json(NodeInfo { 50 | version: NodeInfoVersion, 51 | software: Software { 52 | name: Config::software_name().to_lowercase(), 53 | version: Config::software_version(), 54 | }, 55 | protocols: vec![Protocol::ActivityPub], 56 | services: Services { 57 | inbound: vec![], 58 | outbound: vec![], 59 | }, 60 | open_registrations, 61 | usage: Usage { 62 | users: Users { 63 | total: 1, 64 | active_halfyear: 1, 65 | active_month: 1, 66 | }, 67 | local_posts: 0, 68 | local_comments: 0, 69 | }, 70 | metadata: Metadata { peers, blocks }, 71 | }) 72 | } 73 | 74 | #[derive(Clone, Debug, serde::Serialize)] 75 | #[serde(rename_all = "camelCase")] 76 | pub struct NodeInfo { 77 | version: NodeInfoVersion, 78 | software: Software, 79 | protocols: Vec, 80 | services: Services, 81 | open_registrations: bool, 82 | usage: Usage, 83 | metadata: Metadata, 84 | } 85 | 86 | #[derive(Clone, Debug, Default)] 87 | pub struct NodeInfoVersion; 88 | 89 | #[derive(Clone, Debug, Default, serde::Serialize)] 90 | pub struct Software { 91 | name: String, 92 | version: String, 93 | } 94 | 95 | #[derive(Clone, Debug, serde::Serialize)] 96 | #[serde(rename_all = "lowercase")] 97 | pub enum Protocol { 98 | ActivityPub, 99 | } 100 | 101 | #[derive(Clone, Debug, serde::Serialize)] 102 | pub struct Services { 103 | inbound: Vec, 104 | outbound: Vec, 105 | } 106 | 107 | #[derive(Clone, Debug, serde::Serialize)] 108 | #[serde(rename_all = "lowercase")] 109 | pub enum Service {} 110 | 111 | #[derive(Clone, Debug, Default, serde::Serialize)] 112 | #[serde(rename_all = "camelCase")] 113 | pub struct Usage { 114 | users: Users, 115 | local_posts: u64, 116 | local_comments: u64, 117 | } 118 | 119 | #[derive(Clone, Debug, Default, serde::Serialize)] 120 | pub struct Metadata { 121 | peers: Vec, 122 | 123 | #[serde(skip_serializing_if = "Option::is_none")] 124 | blocks: Option>, 125 | } 126 | 127 | #[derive(Clone, Debug, Default, serde::Serialize)] 128 | #[serde(rename_all = "camelCase")] 129 | pub struct Users { 130 | total: u64, 131 | active_halfyear: u64, 132 | active_month: u64, 133 | } 134 | 135 | impl serde::ser::Serialize for NodeInfoVersion { 136 | fn serialize(&self, serializer: S) -> Result 137 | where 138 | S: serde::ser::Serializer, 139 | { 140 | serializer.serialize_str("2.0") 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /src/routes/statics.rs: -------------------------------------------------------------------------------- 1 | use crate::templates::statics::StaticFile; 2 | use actix_web::{ 3 | http::header::{CacheControl, CacheDirective, ContentType}, 4 | web, HttpResponse, 5 | }; 6 | 7 | #[allow(clippy::async_yields_async)] 8 | #[tracing::instrument(name = "Statics")] 9 | pub(crate) async fn route(filename: web::Path) -> HttpResponse { 10 | if let Some(data) = StaticFile::get(&filename.into_inner()) { 11 | HttpResponse::Ok() 12 | .insert_header(CacheControl(vec![ 13 | CacheDirective::Public, 14 | CacheDirective::MaxAge(60 * 60 * 24), 15 | CacheDirective::Extension("immutable".to_owned(), None), 16 | ])) 17 | .insert_header(ContentType(data.mime.clone())) 18 | .body(data.content) 19 | } else { 20 | HttpResponse::NotFound() 21 | .reason("No such static file.") 22 | .finish() 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/spawner.rs: -------------------------------------------------------------------------------- 1 | use async_cpupool::CpuPool; 2 | use http_signature_normalization_actix::{Canceled, Spawn}; 3 | use std::time::Duration; 4 | 5 | #[derive(Clone)] 6 | pub(crate) struct Spawner { 7 | pool: CpuPool, 8 | } 9 | 10 | impl Spawner { 11 | pub(crate) fn build(name: &'static str, threads: u16) -> anyhow::Result { 12 | let pool = CpuPool::configure() 13 | .name(name) 14 | .max_threads(threads) 15 | .build()?; 16 | 17 | Ok(Spawner { pool }) 18 | } 19 | 20 | pub(crate) async fn close(self) { 21 | self.pool.close().await; 22 | } 23 | } 24 | 25 | impl std::fmt::Debug for Spawner { 26 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 27 | f.debug_struct("Spawner").finish() 28 | } 29 | } 30 | 31 | async fn timer(fut: Fut) -> Fut::Output 32 | where 33 | Fut: std::future::Future, 34 | { 35 | let id = uuid::Uuid::new_v4(); 36 | 37 | metrics::counter!("relay.spawner.wait-timer.start").increment(1); 38 | 39 | let mut interval = tokio::time::interval(Duration::from_secs(5)); 40 | 41 | // pass the first tick (instant) 42 | interval.tick().await; 43 | 44 | let mut fut = std::pin::pin!(fut); 45 | 46 | let mut counter = 0; 47 | loop { 48 | tokio::select! { 49 | out = &mut fut => { 50 | metrics::counter!("relay.spawner.wait-timer.end").increment(1); 51 | return out; 52 | } 53 | _ = interval.tick() => { 54 | counter += 1; 55 | metrics::counter!("relay.spawner.wait-timer.pending").increment(1); 56 | tracing::warn!("Blocking operation {id} is taking a long time, {} seconds", counter * 5); 57 | } 58 | } 59 | } 60 | } 61 | 62 | impl Spawn for Spawner { 63 | type Future = std::pin::Pin>>>; 64 | 65 | fn spawn_blocking(&self, func: Func) -> Self::Future 66 | where 67 | Func: FnOnce() -> Out + Send + 'static, 68 | Out: Send + 'static, 69 | { 70 | let pool = self.pool.clone(); 71 | 72 | Box::pin(async move { timer(pool.spawn(func)).await.map_err(|_| Canceled) }) 73 | } 74 | } 75 | 76 | impl http_signature_normalization_reqwest::Spawn for Spawner { 77 | type Future = std::pin::Pin> + Send>> where T: Send; 78 | 79 | fn spawn_blocking(&self, func: Func) -> Self::Future 80 | where 81 | Func: FnOnce() -> Out + Send + 'static, 82 | Out: Send + 'static, 83 | { 84 | let pool = self.pool.clone(); 85 | 86 | Box::pin(async move { 87 | timer(pool.spawn(func)) 88 | .await 89 | .map_err(|_| http_signature_normalization_reqwest::Canceled) 90 | }) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/telegram.rs: -------------------------------------------------------------------------------- 1 | use crate::db::Db; 2 | use std::sync::Arc; 3 | use teloxide::{ 4 | dispatching::{Dispatcher, UpdateFilterExt}, 5 | requests::{Requester, ResponseResult}, 6 | types::{Message, Update}, 7 | utils::command::BotCommands, 8 | Bot, 9 | }; 10 | 11 | #[derive(BotCommands, Clone, Debug)] 12 | #[command( 13 | rename_rule = "lowercase", 14 | description = "These commands are for administering AodeRelay" 15 | )] 16 | enum Command { 17 | #[command(description = "Display this text.")] 18 | Start, 19 | 20 | #[command(description = "Display this text.")] 21 | Help, 22 | 23 | #[command(description = "Block a domain from the relay.")] 24 | Block { domain: String }, 25 | 26 | #[command(description = "Unblock a domain from the relay.")] 27 | Unblock { domain: String }, 28 | 29 | #[command(description = "Allow a domain to connect to the relay (for RESTRICTED_MODE)")] 30 | Allow { domain: String }, 31 | 32 | #[command(description = "Disallow a domain to connect to the relay (for RESTRICTED_MODE)")] 33 | Disallow { domain: String }, 34 | 35 | #[command(description = "List blocked domains")] 36 | ListBlocks, 37 | 38 | #[command(description = "List allowed domains")] 39 | ListAllowed, 40 | 41 | #[command(description = "List connected domains")] 42 | ListConnected, 43 | } 44 | 45 | pub(crate) fn start(admin_handle: String, db: Db, token: &str) { 46 | let bot = Bot::new(token); 47 | let admin_handle = Arc::new(admin_handle); 48 | 49 | tokio::spawn(async move { 50 | let command_handler = teloxide::filter_command::().endpoint( 51 | move |bot: Bot, msg: Message, cmd: Command| { 52 | let admin_handle = admin_handle.clone(); 53 | let db = db.clone(); 54 | 55 | async move { 56 | if !is_admin(&admin_handle, &msg) { 57 | bot.send_message(msg.chat.id, "You are not authorized") 58 | .await?; 59 | return Ok(()); 60 | } 61 | 62 | answer(bot, msg, cmd, db).await 63 | } 64 | }, 65 | ); 66 | 67 | let message_handler = Update::filter_message().branch(command_handler); 68 | 69 | Dispatcher::builder(bot, message_handler) 70 | .build() 71 | .dispatch() 72 | .await; 73 | }); 74 | } 75 | 76 | fn is_admin(admin_handle: &str, message: &Message) -> bool { 77 | message 78 | .from() 79 | .and_then(|user| user.username.as_deref()) 80 | .map(|username| username == admin_handle) 81 | .unwrap_or(false) 82 | } 83 | 84 | #[tracing::instrument(skip(bot, msg, db))] 85 | async fn answer(bot: Bot, msg: Message, cmd: Command, db: Db) -> ResponseResult<()> { 86 | match cmd { 87 | Command::Help | Command::Start => { 88 | bot.send_message(msg.chat.id, Command::descriptions().to_string()) 89 | .await?; 90 | } 91 | Command::Block { domain } if db.add_blocks(vec![domain.clone()]).await.is_ok() => { 92 | bot.send_message(msg.chat.id, format!("{domain} has been blocked")) 93 | .await?; 94 | } 95 | Command::Unblock { domain } if db.remove_blocks(vec![domain.clone()]).await.is_ok() => { 96 | bot.send_message(msg.chat.id, format!("{domain} has been unblocked")) 97 | .await?; 98 | } 99 | Command::Allow { domain } if db.add_allows(vec![domain.clone()]).await.is_ok() => { 100 | bot.send_message(msg.chat.id, format!("{domain} has been allowed")) 101 | .await?; 102 | } 103 | Command::Disallow { domain } if db.remove_allows(vec![domain.clone()]).await.is_ok() => { 104 | bot.send_message(msg.chat.id, format!("{domain} has been disallowed")) 105 | .await?; 106 | } 107 | Command::ListAllowed => { 108 | if let Ok(allowed) = db.allows().await { 109 | bot.send_message(msg.chat.id, allowed.join("\n")).await?; 110 | } 111 | } 112 | Command::ListBlocks => { 113 | if let Ok(blocks) = db.blocks().await { 114 | bot.send_message(msg.chat.id, blocks.join("\n")).await?; 115 | } 116 | } 117 | Command::ListConnected => { 118 | if let Ok(connected) = db.connected_ids().await { 119 | bot.send_message(msg.chat.id, connected.join("\n")).await?; 120 | } 121 | } 122 | _ => { 123 | bot.send_message(msg.chat.id, "Internal server error") 124 | .await?; 125 | } 126 | } 127 | 128 | Ok(()) 129 | } 130 | -------------------------------------------------------------------------------- /systemd/example-relay.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Activitypub Relay 3 | Documentation=https://git.asonix.dog/asonix/relay 4 | Wants=network.target 5 | After=network.target 6 | 7 | [Install] 8 | WantedBy=multi-user.target 9 | 10 | [Service] 11 | Type=simple 12 | EnvironmentFile=/etc/systemd/system/example-relay.service.env 13 | ExecStart=/path/to/relay 14 | Restart=always 15 | 16 | -------------------------------------------------------------------------------- /systemd/example-relay.service.env: -------------------------------------------------------------------------------- 1 | HOSTNAME='relay.example.com' 2 | ADDR='0.0.0.0' 3 | PORT='8080' 4 | RESTRICTED_MODE='true' 5 | VALIDATE_SIGNATURES='true' 6 | HTTPS='true' 7 | PRETTY_LOG='false' 8 | PUBLISH_BLOCKS='true' 9 | DEBUG='false' 10 | SLED_PATH='/opt/sled' 11 | TELEGRAM_ADMIN_HANDLE='myhandle' 12 | RUST_BACKTRACE='full' 13 | FOOTER_BLURB='Contact @example for inquiries.' 14 | LOCAL_DOMAINS='masto.example.com' 15 | LOCAL_BLURB='

An ActivityPub relay for servers. Currently running somewhere. Let me know if you want to join!

' 16 | OPENTELEMETRY_URL='http://otel.example.com:4317' 17 | API_TOKEN='blahblahblahblahblahblahblah' 18 | TELEGRAM_TOKEN='blahblahblahblahblahblahblah' 19 | 20 | -------------------------------------------------------------------------------- /systemd/example-relay.socket: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Activitypub Relay Socket 3 | Before=multi-user.target 4 | After=network.target 5 | 6 | [Socket] 7 | Service=example-relay.service 8 | ListenStream=8080 9 | 10 | [Install] 11 | WantedBy=sockets.target 12 | -------------------------------------------------------------------------------- /templates/admin.rs.html: -------------------------------------------------------------------------------- 1 | @use crate::db::Contact; 2 | @use activitystreams::iri_string::types::IriString; 3 | 4 | @(contact: &Contact, base: &IriString) 5 | 6 |
7 |
8 |
9 | @contact.display_name's avatar 10 |
11 |
12 |
13 |

@contact.display_name

14 |

15 | @@@contact.username@if let Some(authority) = base.authority_str() {@@@authority} 16 |

17 |
18 |
19 | -------------------------------------------------------------------------------- /templates/index.rs.html: -------------------------------------------------------------------------------- 1 | @use crate::{ 2 | config::{Config, UrlKind}, 3 | data::Node, 4 | templates::{info_html, instance_html, statics::index_css}, 5 | }; 6 | 7 | @(local: &[Node], nodes: &[Node], config: &Config) 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | @config.hostname() | ActivityPub Relay 16 | 17 | 18 | 19 | 20 |
21 |
22 |

@Config::software_name()@Config::software_version()

23 |

on @config.hostname()

24 |
25 |
26 |
27 | @if !local.is_empty() || config.local_blurb().is_some() { 28 |
29 |

About

30 |
31 | @if let Some(blurb) = config.local_blurb() { 32 | @blurb 33 | } else { 34 |

These domains are run by the same admins as this relay.

35 | } 36 |
37 | @if !local.is_empty() { 38 |
    39 | @for node in local { 40 | @if let Some(inst) = node.instance.as_ref() { 41 |
  • 42 | @:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(), 43 | &node.base) 44 |
  • 45 | } else { 46 | @if let Some(inf) = node.info.as_ref() { 47 |
  • 48 | @:info_html(inf, &node.base) 49 |
  • 50 | } 51 | } 52 | } 53 |
54 | } 55 |
56 | } 57 |
58 |

Joining

59 |
60 | @if config.restricted_mode() { 61 |

62 | This relay is Restricted 63 |

64 |

65 | This relay is currently in Restricted Mode, which means servers need to be approved ahead of time by the relay 66 | administrator. Please contact the admin before attempting to join. 67 |

68 | } else { 69 |

70 | If you are the admin of a server that supports activitypub relays, you can add 71 | this relay to your server. 72 |

73 | } 74 |

Mastodon

75 |

76 | Mastodon admins can add this relay by adding 77 |

@config.generate_url(UrlKind::Inbox)
in their relay settings. 78 |

79 |

Pleroma

80 |

81 | Pleroma admins can add this relay by adding 82 |

@config.generate_url(UrlKind::Actor)
to their relay settings. 83 |

84 |

Others

85 |

86 | Consult the documentation for your server. It's likely that it follows either 87 | Mastodon or Pleroma's relay formatting. 88 |

89 |
90 |
91 | @if !nodes.is_empty() { 92 |
93 |

@nodes.len() Connected Servers

94 |
    95 | @for node in nodes { 96 | @if let Some(inst) = node.instance.as_ref() { 97 |
  • 98 | @:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(), 99 | &node.base) 100 |
  • 101 | } else { 102 | @if let Some(inf) = node.info.as_ref() { 103 |
  • 104 | @:info_html(inf, &node.base) 105 |
  • 106 | } 107 | } 108 | } 109 |
110 |
111 | } 112 |
113 |
114 | @if let Some(blurb) = config.footer_blurb() { 115 |
@blurb
116 | } 117 |

118 | The source code for this project can be found at 119 | @config.source_code() 120 |

121 |
122 | 123 | 124 | 125 | -------------------------------------------------------------------------------- /templates/info.rs.html: -------------------------------------------------------------------------------- 1 | @use crate::db::Info; 2 | @use activitystreams::iri_string::types::IriString; 3 | 4 | @(info: &Info, base: &IriString) 5 | 6 |
7 | @if let Some(authority) = base.authority_str() { 8 |

@authority

9 | } 10 |

11 | Running @info.software, version @info.version. 12 | @if info.reg { 13 | Registration is open 14 | } 15 |

16 |
17 | -------------------------------------------------------------------------------- /templates/instance.rs.html: -------------------------------------------------------------------------------- 1 | @use crate::{db::{Contact, Instance}, templates::admin_html}; 2 | @use activitystreams::iri_string::types::IriString; 3 | 4 | @(instance: &Instance, software: Option<&str>, contact: Option<&Contact>, base: &IriString) 5 | 6 |
7 |

@instance.title

8 |

9 | @if let Some(software) = software { 10 | Running @software, version @instance.version. 11 | } 12 | @if instance.reg { 13 |
Registration is open. 14 | @if instance.requires_approval { 15 | Accounts must be approved by an admin. 16 | } 17 | } else{ 18 | Registration is closed 19 | } 20 |

21 | @if !instance.description.trim().is_empty() || contact.is_some() { 22 |
23 | @if !instance.description.trim().is_empty() { 24 |
@instance.title's description:
25 |
26 |
27 | @Html(instance.description.trim()) 28 |
29 |
30 | } 31 | @if let Some(contact) = contact { 32 |
@instance.title's admin:
33 | @:admin_html(contact, base) 34 | } 35 |
36 | } 37 |
38 | --------------------------------------------------------------------------------