├── .cargo └── config.toml ├── .deepsource.toml ├── .devcontainer ├── Dockerfile └── devcontainer.json ├── .dockerignore ├── .env ├── .gitattributes ├── .github └── workflows │ ├── build.yml │ ├── codeql-analysis.yml │ └── docker-publish.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Cross.toml ├── Dockerfile ├── Dockerfile.cross ├── LICENSE ├── Makefile ├── README.md ├── cmd └── nova │ └── nova.go ├── docker-compose.yaml ├── exes ├── cache │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── config.rs │ │ ├── main.rs │ │ └── managers │ │ ├── automoderation.rs │ │ ├── bans.rs │ │ ├── channels.rs │ │ ├── guild_schedules.rs │ │ ├── guilds.rs │ │ ├── integrations.rs │ │ ├── invites.rs │ │ ├── members.rs │ │ ├── messages.rs │ │ ├── mod.rs │ │ ├── reactions.rs │ │ ├── roles.rs │ │ ├── stage_instances.rs │ │ └── threads.rs ├── gateway │ ├── Cargo.toml │ └── src │ │ ├── config.rs │ │ ├── lib.rs │ │ └── main.rs ├── ratelimit │ ├── Cargo.toml │ ├── benches │ │ └── bucket.rs │ └── src │ │ ├── buckets │ │ ├── async_queue.rs │ │ ├── atomic_instant.rs │ │ ├── bucket.rs │ │ ├── mod.rs │ │ ├── noop_lock.rs │ │ └── redis_lock.rs │ │ ├── config.rs │ │ ├── grpc.rs │ │ ├── lib.rs │ │ └── main.rs ├── rest │ ├── Cargo.toml │ └── src │ │ ├── config.rs │ │ ├── handler.rs │ │ ├── lib.rs │ │ ├── main.rs │ │ └── ratelimit_client │ │ ├── mod.rs │ │ └── remote_hashring.rs └── webhook │ ├── Cargo.toml │ └── src │ ├── config.rs │ ├── handler │ ├── make_service.rs │ ├── mod.rs │ ├── signature.rs │ └── tests │ │ ├── handler.rs │ │ ├── mod.rs │ │ └── signature.rs │ ├── lib.rs │ └── main.rs ├── go.mod ├── go.sum ├── internal └── pkg │ └── all-in-one │ ├── all-in-one.go │ ├── all_in_one.h │ ├── error_handler.h │ └── handler.go ├── libs ├── all_in_one │ ├── Cargo.toml │ ├── build.rs │ └── src │ │ ├── errors.rs │ │ ├── ffi.rs │ │ ├── lib.rs │ │ └── utils.rs ├── leash │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── proto │ ├── Cargo.toml │ ├── build.rs │ └── src │ │ └── lib.rs └── shared │ ├── Cargo.toml │ └── src │ ├── config.rs │ ├── lib.rs │ ├── nats.rs │ ├── opentelemetry.rs │ ├── payloads.rs │ └── redis.rs ├── proto └── nova │ └── ratelimit │ └── ratelimiter.proto └── shell.nix /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [net] 2 | git-fetch-with-cli = true 3 | -------------------------------------------------------------------------------- /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | [[analyzers]] 4 | name = "rust" 5 | enabled = true 6 | 7 | [[analyzers]] 8 | name = "go" 9 | enabled = true 10 | 11 | [analyzers.meta] 12 | import_root = "github.com/discordnova/nova" 13 | dependencies_vendored = false -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/vscode/devcontainers/base:0-focal 2 | ARG NONROOT_USER=vscode 3 | 4 | # Install required programs for the container 5 | RUN apt update -y && apt install libssl-dev pkg-config apt-transport-https curl sudo gnupg python build-essential ca-certificates lsb-release -y && \ 6 | # Add docker repository gpg keys 7 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg && \ 8 | # Add docker repository apt source 9 | echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list && \ 10 | # Install docker 11 | apt update -y && apt install docker-ce-cli -y 12 | 13 | # Add the user to the sudo group 14 | RUN adduser $NONROOT_USER sudo 15 | # Allow to use sudo without password 16 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 17 | 18 | # Startup script, configure docker for the container non-root user 19 | RUN echo "#!/bin/sh\n\ 20 | . ~/.cargo/env\n\ 21 | SOCKET_GID=\$(stat -c '%g' /var/run/docker.sock) \n\ 22 | if [ \"${SOCKET_GID}\" != '0' ]; then\n\ 23 | if [ \"\$(cat /etc/group | grep :\${SOCKET_GID}:)\" = '' ]; then sudo groupadd --gid \${SOCKET_GID} docker-host; fi \n\ 24 | if [ \"\$(id ${NONROOT_USER} | grep -E \"groups=.*(=|,)\${SOCKET_GID}\(\")\" = '' ]; then sudo usermod -aG \${SOCKET_GID} ${NONROOT_USER}; fi\n\ 25 | fi\n\ 26 | exec \"\$@\"" > /usr/local/share/docker-init.sh \ 27 | && chmod +x /usr/local/share/docker-init.sh 28 | 29 | USER $NONROOT_USER 30 | 31 | # Install go & rust toolchains 32 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain nightly -y && \ 33 | curl https://raw.githubusercontent.com/canha/golang-tools-install-script/master/goinstall.sh | bash 34 | 35 | ENTRYPOINT [ "/usr/local/share/docker-init.sh" ] 36 | # Required for vscode to start the server inside the container 37 | CMD [ "sleep", "infinity" ] 38 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.191.1/containers/debian 3 | { 4 | "name": "Ubuntu", 5 | "build": { 6 | "dockerfile": "Dockerfile", 7 | // Update 'VARIANT' to pick an Debian version: bullseye, buster, stretch 8 | "args": { "VARIANT": "focal" } 9 | }, 10 | 11 | // Set *default* container specific settings.json values on container create. 12 | "settings": {}, 13 | 14 | // Add the IDs of extensions you want installed when the container is created. 15 | "extensions": [ 16 | "matklad.rust-analyzer", 17 | "vadimcn.vscode-lldb", 18 | "ms-azuretools.vscode-docker", 19 | "golang.go", 20 | "ms-kubernetes-tools.vscode-kubernetes-tools", 21 | "zxh404.vscode-proto3", 22 | "phgn.vscode-starlark", 23 | "redhat.vscode-yaml" 24 | ], 25 | 26 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 27 | // "forwardPorts": [], 28 | 29 | // Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker. 30 | "mounts": ["source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind"], 31 | 32 | // Uncomment when using a ptrace-based debugger like C++, Go, and Rust 33 | // "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ], 34 | 35 | // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. 36 | "remoteUser": "vscode", 37 | "overrideCommand": false, 38 | "runArgs": ["--init", "--network=host"] 39 | } -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target/ 2 | docs/ 3 | bin/ -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | TAG=amd64 2 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | release: 8 | workflow_dispatch: 9 | pull_request: 10 | 11 | jobs: 12 | lint: 13 | name: Lint 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Setup | Checkout 17 | uses: actions/checkout@v2 18 | - name: Install Protoc 19 | uses: arduino/setup-protoc@v1 20 | with: 21 | repo-token: ${{ secrets.GITHUB_TOKEN }} 22 | - name: Setup | Rust 23 | uses: ATiltedTree/setup-rust@v1 24 | with: 25 | rust-version: stable 26 | components: clippy 27 | - name: Build | Lint 28 | run: cargo clippy 29 | - uses: actions/cache@v3 30 | with: 31 | path: | 32 | ~/.cargo/bin/ 33 | ~/.cargo/registry/index/ 34 | ~/.cargo/registry/cache/ 35 | ~/.cargo/git/db/ 36 | target/ 37 | key: lint 38 | 39 | build_macos: 40 | name: 'Build for MacOS' 41 | runs-on: macos-latest 42 | 43 | steps: 44 | - uses: actions/checkout@v2 45 | - uses: actions/cache@v3 46 | with: 47 | path: | 48 | ~/.cargo/bin/ 49 | ~/.cargo/registry/index/ 50 | ~/.cargo/registry/cache/ 51 | ~/.cargo/git/db/ 52 | target/ 53 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} 54 | - uses: arduino/setup-protoc@v1 55 | with: 56 | repo-token: ${{ secrets.GITHUB_TOKEN }} 57 | - uses: ATiltedTree/setup-rust@v1 58 | with: 59 | rust-version: stable 60 | - name: Build all 61 | run: | 62 | export CGO_LDFLAGS="-framework Security -framework CoreFoundation" 63 | make all 64 | - uses: actions/upload-artifact@v3 65 | with: 66 | name: macos 67 | path: build/* 68 | 69 | build_linux: 70 | name: 'Build for Linux' 71 | runs-on: ubuntu-latest 72 | strategy: 73 | fail-fast: false 74 | matrix: 75 | arch: 76 | - aarch64-unknown-linux-gnu 77 | - aarch64-unknown-linux-musl 78 | - armv7-unknown-linux-gnueabi 79 | - armv7-unknown-linux-gnueabihf 80 | - armv7-unknown-linux-musleabi 81 | - armv7-unknown-linux-musleabihf 82 | - x86_64-unknown-linux-gnu 83 | - x86_64-unknown-linux-musl 84 | - x86_64-pc-windows-gnu 85 | steps: 86 | - uses: actions/checkout@v2 87 | - uses: ATiltedTree/setup-rust@v1 88 | with: 89 | rust-version: stable 90 | - uses: actions/cache@v3 91 | with: 92 | path: | 93 | ~/.cargo/bin/ 94 | ~/.cargo/registry/index/ 95 | ~/.cargo/registry/cache/ 96 | ~/.cargo/git/db/ 97 | target/ 98 | key: ${{ runner.os }}-cargo-${{ matrix.arch }} 99 | - name: Build all 100 | run: | 101 | cargo install cross --force 102 | export CROSS_CONTAINER_ENGINE_NO_BUILDKIT=1 103 | cross build --release --target ${{ matrix.arch }} 104 | mkdir -p ./build 105 | cp target/${{ matrix.arch }}/release/* ./build/ || true 106 | rm ./build/*.{d,rlib} 107 | - uses: actions/upload-artifact@v3 108 | with: 109 | name: release-${{ matrix.arch }} 110 | path: build/* 111 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '31 9 * * 6' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'go' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v2 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | # This is commented, because we don't use C/C++, C# or Java 57 | # - name: Autobuild 58 | # uses: github/codeql-action/autobuild@v1 59 | 60 | - name: Perform CodeQL Analysis 61 | uses: github/codeql-action/analyze@v1 62 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: docker 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | env: 8 | # Use docker.io for Docker Hub if empty 9 | REGISTRY: ghcr.io 10 | 11 | jobs: 12 | bake: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | platform: 18 | - linux/amd64 19 | - linux/arm64/v8 20 | - linux/arm/v7 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v3 24 | 25 | - name: Set up QEMU 26 | uses: docker/setup-qemu-action@v2 27 | with: 28 | platforms: "arm64,arm" 29 | - name: Set up Docker Buildx 30 | uses: docker/setup-buildx-action@v2 31 | with: 32 | platforms: linux/amd64,linux/arm64/v8,linux/arm/v7 33 | 34 | - name: Log into registry ${{ env.REGISTRY }} 35 | uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c 36 | with: 37 | registry: ${{ env.REGISTRY }} 38 | username: ${{ github.actor }} 39 | password: ${{ secrets.GITHUB_TOKEN }} 40 | - name: Create tag 41 | run: | 42 | export TAG="${{ matrix.platform }}" 43 | export TAG=${TAG/linux\//} 44 | echo -n "TAG=${TAG//\//-}" > .env 45 | - name: Build and push 46 | uses: docker/bake-action@v2 47 | with: 48 | push: true 49 | set: | 50 | *.cache-from=type=gha 51 | *.cache-to=type=gha,mode=max 52 | *.platform=${{ matrix.platform }} 53 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | target/ 3 | **/local* 4 | .ijwb 5 | .idea 6 | config.yml 7 | 8 | config/* 9 | build/ 10 | *.yml 11 | bin/ -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "exes/cache/", 4 | "exes/gateway/", 5 | "exes/rest/", 6 | "exes/webhook/", 7 | "exes/ratelimit/", 8 | 9 | 10 | "libs/all_in_one/", 11 | "libs/proto/", 12 | "libs/shared/", 13 | "libs/leash/" 14 | ] 15 | resolver = "2" 16 | [workspace.dependencies] 17 | shared = { path = "libs/shared" } 18 | proto = { path = "libs/proto" } 19 | leash = { path = "libs/leash" } 20 | 21 | tokio = { version = "1", features = ["rt-multi-thread", "signal"] } 22 | serde = { version = "1", features = ["derive"] } 23 | serde_json = "1" 24 | hyper = "0.14.27" 25 | 26 | anyhow = "1" 27 | 28 | tracing = "0.1" 29 | tracing-futures = "0.2" 30 | tracing-opentelemetry = "0.19" 31 | opentelemetry = { version = "0.19", features = ["rt-tokio"] } 32 | opentelemetry-http = "0.8" 33 | 34 | criterion = { version = "0.5", features = ["async_tokio"] } 35 | tokio-test = "0.4.2" 36 | tracing-test = "0.2.4" 37 | tracing-subscriber = "0.3.17" 38 | test-log = { version = "*", features = ["trace"] } 39 | env_logger = "0.10.0" 40 | -------------------------------------------------------------------------------- /Cross.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | pre-build = [ 3 | "dpkg --add-architecture $CROSS_DEB_ARCH", 4 | "apt-get update && apt-get --assume-yes install libc6-dev", 5 | ] 6 | dockerfile = "Dockerfile.cross" -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | FROM --platform=$BUILDPLATFORM tonistiigi/xx:master AS xx 3 | FROM --platform=$BUILDPLATFORM rust:alpine as alpine_rbuild 4 | RUN apk add clang lld protobuf-dev build-base git 5 | # Copy the xx scripts 6 | COPY --from=xx / / 7 | # Copy source code 8 | COPY . . 9 | 10 | RUN --mount=type=cache,target=/root/.cargo/git/db \ 11 | --mount=type=cache,target=/root/.cargo/registry/cache \ 12 | --mount=type=cache,target=/root/.cargo/registry/index \ 13 | cargo fetch 14 | ARG TARGETPLATFORM 15 | RUN --mount=type=cache,target=/root/.cargo/git/db \ 16 | --mount=type=cache,target=/root/.cargo/registry/cache \ 17 | --mount=type=cache,target=/root/.cargo/registry/index \ 18 | xx-cargo build --release --target-dir ./build 19 | 20 | #Copy from the build//release folder to the out folder 21 | RUN mkdir ./out && cp ./build/*/release/* ./out || true 22 | 23 | FROM alpine AS runtime 24 | ARG COMPONENT 25 | ENV COMPONENT=${COMPONENT} 26 | COPY --from=alpine_rbuild /out/${COMPONENT} /usr/local/bin/ 27 | ENTRYPOINT /usr/local/bin/${COMPONENT} 28 | -------------------------------------------------------------------------------- /Dockerfile.cross: -------------------------------------------------------------------------------- 1 | ARG CROSS_BASE_IMAGE 2 | 3 | FROM debian as db 4 | WORKDIR /dl 5 | RUN apt-get update && apt-get install -y curl zip 6 | RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protoc-21.12-linux-x86_64.zip && \ 7 | unzip protoc-21.12-linux-x86_64.zip -d protoc3 8 | 9 | FROM $CROSS_BASE_IMAGE 10 | 11 | COPY --from=db /dl/protoc3 /dl/ 12 | RUN mv /dl/bin/* /usr/local/bin/ && \ 13 | mv /dl/include/* /usr/local/include/ 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2021-2023 Matthieu Pignolet, Nicolas Paul, Max Charrier 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | EXTENSION := 2 | ifeq ($(OS),Windows_NT) 3 | EXTENSION += .exe 4 | endif 5 | PROJECTS = $(shell find exes/ -mindepth 1 -maxdepth 1 -type d -printf '%f\n') 6 | 7 | # Static libraries 8 | target/release/lib%.a: libs/% 9 | cargo build --release -p $* 10 | 11 | # Executables 12 | target/release/%$(EXTENSION): 13 | cargo build --release -p $* 14 | 15 | # Copy static libraries 16 | build/lib/%: target/release/% 17 | @mkdir -p build/lib 18 | cp target/release/$* build/lib/$* 19 | 20 | # Copy executables 21 | build/bin/%$(EXTENSION): target/release/%$(EXTENSION) 22 | @mkdir -p build/bin 23 | cp target/release/$*$(EXTENSION) build/bin/$*$(EXTENSION) 24 | 25 | # All in one binary 26 | build/bin/nova$(EXTENSION): build/lib/liball_in_one.a 27 | @mkdir -p build/bin 28 | go build -a -ldflags '-s' -o build/bin/nova cmd/nova/nova.go 29 | 30 | BINS=$(PROJECTS:%=build/bin/%$(EXTENSION)) 31 | all: $(BINS) build/bin/nova$(EXTENSION) 32 | 33 | clean: 34 | rm -rf build 35 | rm -rf $(PROJECTS:%=target/release/%$(EXTENSION)) 36 | rm -rf target/release/liball_in_one.a 37 | 38 | test: 39 | cargo test 40 | go test 41 | 42 | .PHONY: clean all test 43 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nova 2 | 3 | [![Build](https://github.com/discordnova/Nova/actions/workflows/build.yml/badge.svg)](https://github.com/discordnova/Nova/actions/workflows/build.yml) 4 | 5 | ## What is nova ? 6 | 7 | Nova is a complete framework for building reliable and stable discord bots 8 | using a services based system to operate all the components needed to operate 9 | a discord such as the discord gateway, new discord webhooks for receiving interactions. 10 | Using a traditional infrastructure (sharder / process), we can quickly reach bottlenecks 11 | when we scale to multiple thousands of shards. Nova is a way to avoid these problems by 12 | building a scale-first discord framework that creates an environment that allow 13 | better testing, reliability and operations easier. 14 | 15 | ### Advantages 16 | 17 | With the help of Nova, you can achieve a number of things, such as 18 | 19 | * Scaling of workers independent of the number of shards 20 | * zero-login updates 21 | * Automatic shards scaling 22 | * Shared cache for the whole bot (without broadcastEval and other unsafe methods) 23 | * Stateless workers, easier to test 24 | * Distributed rest rate-limiting 25 | * Easier fine-tuned monitoring using cloud-native technologies 26 | * Languages agnostic apis 27 | 28 | ### How did we solve this ? 29 | 30 | Nova separates the gateway into multiple smaller components corresponding to each 31 | discord apis 32 | 33 | 34 | 35 | [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fdiscordnova%2Fnova.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fdiscordnova%2Fnova?ref=badge_large) 36 | -------------------------------------------------------------------------------- /cmd/nova/nova.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "os/signal" 6 | "syscall" 7 | 8 | allinone "github.com/discordnova/nova/internal/pkg/all-in-one" 9 | ) 10 | 11 | func main() { 12 | allInOne, err := allinone.NewAllInOne() 13 | if err != nil { 14 | panic(err) 15 | } 16 | err = allInOne.Start() 17 | if err != nil { 18 | panic(err) 19 | } 20 | // Wait for a SIGINT 21 | c := make(chan os.Signal, 1) 22 | signal.Notify(c, 23 | syscall.SIGHUP, 24 | syscall.SIGINT, 25 | syscall.SIGTERM, 26 | syscall.SIGQUIT) 27 | <-c 28 | 29 | allInOne.Stop() 30 | 31 | println("Arret de nova all in one") 32 | } 33 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | services: 3 | nats: 4 | image: nats 5 | restart: always 6 | ports: 7 | - 4222:4222 8 | - 8222:8222 9 | 10 | redis: 11 | image: redis 12 | ports: 13 | - 6379:6379 14 | mock: 15 | image: nginx 16 | cache: 17 | image: ghcr.io/discordnova/nova/cache:${TAG:-latest} 18 | restart: always 19 | build: 20 | context: . 21 | args: 22 | - COMPONENT=cache 23 | volumes: 24 | - ./config/default.yml:/config/default.yml 25 | environment: 26 | - RUST_LOG=debug 27 | depends_on: 28 | - nats 29 | - redis 30 | - otelcol 31 | 32 | gateway: 33 | image: ghcr.io/discordnova/nova/gateway:${TAG:-latest} 34 | restart: always 35 | build: 36 | context: . 37 | args: 38 | - COMPONENT=gateway 39 | volumes: 40 | - ./config/default.yml:/config/default.yml 41 | environment: 42 | - RUST_LOG=debug 43 | depends_on: 44 | - nats 45 | - otelcol 46 | 47 | rest: 48 | image: ghcr.io/discordnova/nova/rest:${TAG:-latest} 49 | restart: always 50 | build: 51 | context: . 52 | args: 53 | - COMPONENT=rest 54 | volumes: 55 | - ./config/default.yml:/config/default.yml 56 | environment: 57 | - RUST_LOG=debug 58 | depends_on: 59 | - ratelimit 60 | - otelcol 61 | ports: 62 | - 9001:9000 63 | - 8090:8090 64 | 65 | webhook: 66 | image: ghcr.io/discordnova/nova/webhook:${TAG:-latest} 67 | restart: always 68 | build: 69 | context: . 70 | args: 71 | - COMPONENT=webhook 72 | volumes: 73 | - ./config/default.yml:/config/default.yml 74 | environment: 75 | - RUST_LOG=debug 76 | depends_on: 77 | - nats 78 | - otelcol 79 | ports: 80 | - 9002:9000 81 | - 8091:8091 82 | ratelimit: 83 | image: ghcr.io/discordnova/nova/ratelimit:${TAG:-latest} 84 | restart: always 85 | build: 86 | context: . 87 | args: 88 | - COMPONENT=ratelimit 89 | volumes: 90 | - ./config/default.yml:/config/default.yml 91 | environment: 92 | - RUST_LOG=debug 93 | depends_on: 94 | - nats 95 | - redis 96 | - otelcol 97 | 98 | # ******************** 99 | # Telemetry Components 100 | # ******************** 101 | # Jaeger 102 | jaeger: 103 | image: jaegertracing/all-in-one 104 | container_name: jaeger 105 | command: 106 | - "--query.base-path" 107 | - "/jaeger/ui" 108 | - "--prometheus.server-url" 109 | - "http://${PROMETHEUS_ADDR}" 110 | deploy: 111 | resources: 112 | limits: 113 | memory: 275M 114 | restart: always 115 | ports: 116 | - "4317" # OTLP gRPC default port 117 | environment: 118 | - COLLECTOR_OTLP_ENABLED=true 119 | - METRICS_STORAGE_TYPE=prometheus 120 | 121 | # Grafana 122 | grafana: 123 | image: grafana/grafana:9.1.0 124 | container_name: grafana 125 | volumes: 126 | - ./otel/grafana/grafana.ini:/etc/grafana/grafana.ini 127 | - ./otel/grafana/provisioning/:/etc/grafana/provisioning/ 128 | ports: 129 | - "3000:3000" 130 | 131 | # OpenTelemetry Collector 132 | otelcol: 133 | image: otel/opentelemetry-collector-contrib:0.61.0 134 | deploy: 135 | resources: 136 | limits: 137 | memory: 100M 138 | restart: always 139 | command: [ "--config=/etc/otelcol-config.yml", "--config=/etc/otelcol-config-extras.yml" ] 140 | volumes: 141 | - ./otel/otelcollector/otelcol-config.yml:/etc/otelcol-config.yml 142 | - ./otel/otelcollector/otelcol-config-extras.yml:/etc/otelcol-config-extras.yml 143 | ports: 144 | - "4317:4317" # OTLP over gRPC receiver 145 | - "4318:4318" # OTLP over HTTP receiver 146 | - "9464" # Prometheus exporter 147 | - "8888" # metrics endpoint 148 | depends_on: 149 | - jaeger 150 | 151 | # Prometheus 152 | prometheus: 153 | image: quay.io/prometheus/prometheus:v2.34.0 154 | container_name: prometheus 155 | command: 156 | - --web.console.templates=/etc/prometheus/consoles 157 | - --web.console.libraries=/etc/prometheus/console_libraries 158 | - --storage.tsdb.retention.time=1h 159 | - --config.file=/etc/prometheus/prometheus-config.yaml 160 | - --storage.tsdb.path=/prometheus 161 | - --web.enable-lifecycle 162 | - --web.route-prefix=/ 163 | volumes: 164 | - ./otel/prometheus/prometheus-config.yaml:/etc/prometheus/prometheus-config.yaml 165 | -------------------------------------------------------------------------------- /exes/cache/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cache" 3 | version = "0.1.0" 4 | edition = "2018" 5 | description = "Stores the data from discord if needed" 6 | readme = "README.md" 7 | repository = "https://github.com/discordnova/nova.git" 8 | keywords = ["discord", "scaleable", "cache"] 9 | categories = ["microservices", "nova"] 10 | license = "APACHE2" 11 | 12 | [dependencies] 13 | shared = { path = "../../libs/shared" } 14 | proto = { path = "../../libs/proto" } 15 | 16 | tokio = { version = "1", features = ["rt"] } 17 | tokio-stream = "0.1.14" 18 | 19 | serde = { version = "1.0.166", features = ["derive"] } 20 | serde_json = { version = "1.0" } 21 | 22 | async-nats = "0.29.0" 23 | twilight-model = "0.15.2" 24 | anyhow = "1.0.71" 25 | 26 | tracing = "0.1.37" 27 | -------------------------------------------------------------------------------- /exes/cache/README.md: -------------------------------------------------------------------------------- 1 | # Cache 2 | 3 | Stores the data from discord if needed -------------------------------------------------------------------------------- /exes/cache/src/config.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | #[derive(Debug, Deserialize, Clone, Default)] 3 | pub struct CacheConfiguration { 4 | pub toggles: Vec, 5 | } 6 | -------------------------------------------------------------------------------- /exes/cache/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{error::Error, future::Future, pin::Pin}; 2 | 3 | use async_nats::{Client, Subscriber}; 4 | use managers::{ 5 | automoderation::Automoderation, bans::Bans, channels::Channels, 6 | guild_schedules::GuildSchedules, guilds::Guilds, integrations::Integrations, invites::Invites, 7 | members::Members, messages::Messages, reactions::Reactions, roles::Roles, 8 | stage_instances::StageInstances, threads::Threads, CacheManager, 9 | }; 10 | use shared::{config::Settings, payloads::CachePayload}; 11 | use tokio_stream::StreamExt; 12 | use tracing::info; 13 | use twilight_model::gateway::event::DispatchEvent; 14 | 15 | use crate::config::CacheConfiguration; 16 | 17 | mod config; 18 | mod managers; 19 | 20 | pub enum CacheSourcedEvents { 21 | None, 22 | } 23 | 24 | #[derive(Default)] 25 | struct Cache { 26 | automoderation: Automoderation, 27 | channels: Channels, 28 | bans: Bans, 29 | guild_schedules: GuildSchedules, 30 | guilds: Guilds, 31 | integrations: Integrations, 32 | invites: Invites, 33 | members: Members, 34 | messages: Messages, 35 | reactions: Reactions, 36 | roles: Roles, 37 | stage_instances: StageInstances, 38 | threads: Threads, 39 | } 40 | 41 | #[tokio::main] 42 | async fn main() -> Result<(), Box> { 43 | let settings: Settings = Settings::new("cache").unwrap(); 44 | info!("loaded configuration: {:?}", settings); 45 | let nats = 46 | Into::> + Send>>>::into(settings.nats) 47 | .await?; 48 | 49 | let mut cache = Cache::default(); 50 | 51 | let mut sub = nats.subscribe("nova.cache.dispatch.*".to_string()).await?; 52 | listen(&mut sub, &mut cache, settings.config.toggles).await; 53 | Ok(()) 54 | } 55 | 56 | async fn listen(sub: &mut Subscriber, cache: &mut Cache, features: Vec) { 57 | while let Some(data) = sub.next().await { 58 | let cp: CachePayload = serde_json::from_slice(&data.payload).unwrap(); 59 | let event = cp.data.0; 60 | match event { 61 | // Channel events 62 | DispatchEvent::ChannelCreate(_) 63 | | DispatchEvent::ChannelDelete(_) 64 | | DispatchEvent::ChannelPinsUpdate(_) 65 | | DispatchEvent::ChannelUpdate(_) 66 | if features.contains(&"channels_cache".to_string()) => 67 | { 68 | cache.channels.handle(event); 69 | } 70 | 71 | // Guild Cache 72 | DispatchEvent::GuildCreate(_) 73 | | DispatchEvent::GuildDelete(_) 74 | | DispatchEvent::UnavailableGuild(_) 75 | | DispatchEvent::GuildUpdate(_) 76 | | DispatchEvent::WebhooksUpdate(_) 77 | | DispatchEvent::GuildStickersUpdate(_) 78 | | DispatchEvent::GuildEmojisUpdate(_) 79 | | DispatchEvent::VoiceServerUpdate(_) 80 | | DispatchEvent::GuildIntegrationsUpdate(_) 81 | | DispatchEvent::CommandPermissionsUpdate(_) 82 | if features.contains(&"guilds_cache".to_string()) => 83 | { 84 | cache.guilds.handle(event); 85 | } 86 | 87 | // Guild Scheduled event 88 | DispatchEvent::GuildScheduledEventCreate(_) 89 | | DispatchEvent::GuildScheduledEventDelete(_) 90 | | DispatchEvent::GuildScheduledEventUpdate(_) 91 | | DispatchEvent::GuildScheduledEventUserAdd(_) 92 | | DispatchEvent::GuildScheduledEventUserRemove(_) 93 | if features.contains(&"guild_schedules_cache".to_string()) => 94 | { 95 | cache.guild_schedules.handle(event); 96 | } 97 | 98 | // Stage events 99 | DispatchEvent::StageInstanceCreate(_) 100 | | DispatchEvent::StageInstanceDelete(_) 101 | | DispatchEvent::StageInstanceUpdate(_) 102 | if features.contains(&"stage_instances_cache".to_string()) => 103 | { 104 | cache.stage_instances.handle(event); 105 | } 106 | 107 | // Integration events 108 | DispatchEvent::IntegrationCreate(_) 109 | | DispatchEvent::IntegrationDelete(_) 110 | | DispatchEvent::IntegrationUpdate(_) 111 | | DispatchEvent::InteractionCreate(_) 112 | if features.contains(&"integrations_cache".to_string()) => 113 | { 114 | cache.integrations.handle(event); 115 | } 116 | 117 | // Member events 118 | DispatchEvent::MemberAdd(_) 119 | | DispatchEvent::MemberRemove(_) 120 | | DispatchEvent::MemberUpdate(_) 121 | | DispatchEvent::MemberChunk(_) 122 | | DispatchEvent::UserUpdate(_) 123 | if features.contains(&"members_cache".to_string()) => 124 | { 125 | cache.members.handle(event); 126 | } 127 | 128 | // Ban cache 129 | DispatchEvent::BanAdd(_) | DispatchEvent::BanRemove(_) 130 | if features.contains(&"bans_cache".to_string()) => 131 | { 132 | cache.bans.handle(event); 133 | } 134 | 135 | // Reaction cache 136 | DispatchEvent::ReactionAdd(_) 137 | | DispatchEvent::ReactionRemove(_) 138 | | DispatchEvent::ReactionRemoveAll(_) 139 | | DispatchEvent::ReactionRemoveEmoji(_) 140 | if features.contains(&"reactions_cache".to_string()) => 141 | { 142 | cache.reactions.handle(event); 143 | } 144 | 145 | // Message cache 146 | DispatchEvent::MessageCreate(_) 147 | | DispatchEvent::MessageDelete(_) 148 | | DispatchEvent::MessageDeleteBulk(_) 149 | | DispatchEvent::MessageUpdate(_) 150 | if features.contains(&"messages_cache".to_string()) => 151 | { 152 | cache.messages.handle(event); 153 | } 154 | 155 | // Thread cache 156 | DispatchEvent::ThreadCreate(_) 157 | | DispatchEvent::ThreadDelete(_) 158 | | DispatchEvent::ThreadListSync(_) 159 | | DispatchEvent::ThreadMemberUpdate(_) 160 | | DispatchEvent::ThreadMembersUpdate(_) 161 | | DispatchEvent::ThreadUpdate(_) 162 | if features.contains(&"threads_cache".to_string()) => 163 | { 164 | cache.threads.handle(event); 165 | } 166 | 167 | // Invite cache 168 | DispatchEvent::InviteCreate(_) | DispatchEvent::InviteDelete(_) 169 | if features.contains(&"invites_cache".to_string()) => 170 | { 171 | cache.invites.handle(event); 172 | } 173 | 174 | // Roles cache 175 | DispatchEvent::RoleCreate(_) 176 | | DispatchEvent::RoleDelete(_) 177 | | DispatchEvent::RoleUpdate(_) 178 | if features.contains(&"roles_cache".to_string()) => 179 | { 180 | cache.roles.handle(event); 181 | } 182 | 183 | // Automod rules 184 | DispatchEvent::AutoModerationRuleCreate(_) 185 | | DispatchEvent::AutoModerationRuleDelete(_) 186 | | DispatchEvent::AutoModerationRuleUpdate(_) 187 | if features.contains(&"automoderation_cache".to_string()) => 188 | { 189 | cache.automoderation.handle(event); 190 | } 191 | 192 | // Voice State 193 | DispatchEvent::VoiceStateUpdate(_) 194 | if features.contains(&"voice_states_cache".to_string()) => {} 195 | 196 | _ => { 197 | // just forward 198 | } 199 | } 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /exes/cache/src/managers/automoderation.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Automoderation {} 10 | impl CacheManager for Automoderation { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::AutoModerationRuleCreate(_) => {} 18 | DispatchEvent::AutoModerationRuleDelete(_) => {} 19 | DispatchEvent::AutoModerationRuleUpdate(_) => {} 20 | _ => unreachable!(), 21 | }; 22 | 23 | CacheSourcedEvents::None 24 | }) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /exes/cache/src/managers/bans.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Bans {} 10 | impl CacheManager for Bans { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::BanAdd(_) => {} 18 | DispatchEvent::BanRemove(_) => {} 19 | _ => unreachable!(), 20 | }; 21 | 22 | CacheSourcedEvents::None 23 | }) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /exes/cache/src/managers/channels.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Channels {} 10 | impl CacheManager for Channels { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::ChannelCreate(_) => {} 18 | DispatchEvent::ChannelDelete(_) => {} 19 | DispatchEvent::ChannelPinsUpdate(_) => {} 20 | DispatchEvent::ChannelUpdate(_) => {} 21 | _ => unreachable!(), 22 | }; 23 | 24 | CacheSourcedEvents::None 25 | }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /exes/cache/src/managers/guild_schedules.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct GuildSchedules {} 10 | impl CacheManager for GuildSchedules { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::GuildScheduledEventCreate(_) => {} 18 | DispatchEvent::GuildScheduledEventDelete(_) => {} 19 | DispatchEvent::GuildScheduledEventUpdate(_) => {} 20 | DispatchEvent::GuildScheduledEventUserAdd(_) => {} 21 | DispatchEvent::GuildScheduledEventUserRemove(_) => {} 22 | _ => unreachable!(), 23 | }; 24 | 25 | CacheSourcedEvents::None 26 | }) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /exes/cache/src/managers/guilds.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Guilds {} 10 | impl CacheManager for Guilds { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::GuildCreate(_) => {} 18 | DispatchEvent::GuildDelete(_) => {} 19 | DispatchEvent::UnavailableGuild(_) => {} 20 | DispatchEvent::GuildUpdate(_) => {} 21 | DispatchEvent::WebhooksUpdate(_) => {} 22 | DispatchEvent::GuildStickersUpdate(_) => {} 23 | DispatchEvent::GuildEmojisUpdate(_) => {} 24 | DispatchEvent::VoiceServerUpdate(_) => {} 25 | DispatchEvent::GuildIntegrationsUpdate(_) => {} 26 | DispatchEvent::CommandPermissionsUpdate(_) => {} 27 | _ => unreachable!(), 28 | }; 29 | 30 | CacheSourcedEvents::None 31 | }) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /exes/cache/src/managers/integrations.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Integrations {} 10 | impl CacheManager for Integrations { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::IntegrationCreate(_) => {} 18 | DispatchEvent::IntegrationDelete(_) => {} 19 | DispatchEvent::IntegrationUpdate(_) => {} 20 | DispatchEvent::InteractionCreate(_) => {} 21 | _ => unreachable!(), 22 | }; 23 | 24 | CacheSourcedEvents::None 25 | }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /exes/cache/src/managers/invites.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Invites {} 10 | impl CacheManager for Invites { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::InviteCreate(_) => {} 18 | DispatchEvent::InviteDelete(_) => {} 19 | _ => unreachable!(), 20 | }; 21 | 22 | CacheSourcedEvents::None 23 | }) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /exes/cache/src/managers/members.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Members {} 10 | impl CacheManager for Members { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::MemberAdd(_) => {} 18 | DispatchEvent::MemberRemove(_) => {} 19 | DispatchEvent::MemberUpdate(_) => {} 20 | DispatchEvent::MemberChunk(_) => {} 21 | DispatchEvent::UserUpdate(_) => {} 22 | _ => unreachable!(), 23 | }; 24 | 25 | CacheSourcedEvents::None 26 | }) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /exes/cache/src/managers/messages.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Messages {} 10 | impl CacheManager for Messages { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::MessageCreate(_) => {} 18 | DispatchEvent::MessageDelete(_) => {} 19 | DispatchEvent::MessageDeleteBulk(_) => {} 20 | DispatchEvent::MessageUpdate(_) => {} 21 | _ => unreachable!(), 22 | }; 23 | 24 | CacheSourcedEvents::None 25 | }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /exes/cache/src/managers/mod.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use twilight_model::gateway::event::DispatchEvent; 4 | 5 | use crate::CacheSourcedEvents; 6 | 7 | pub mod automoderation; 8 | pub mod bans; 9 | pub mod channels; 10 | pub mod guild_schedules; 11 | pub mod guilds; 12 | pub mod integrations; 13 | pub mod invites; 14 | pub mod members; 15 | pub mod messages; 16 | pub mod reactions; 17 | pub mod roles; 18 | pub mod stage_instances; 19 | pub mod threads; 20 | 21 | pub trait CacheManager { 22 | fn handle(&self, event: DispatchEvent) -> Pin>>; 23 | } 24 | -------------------------------------------------------------------------------- /exes/cache/src/managers/reactions.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Reactions {} 10 | impl CacheManager for Reactions { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::ReactionAdd(_) => {} 18 | DispatchEvent::ReactionRemove(_) => {} 19 | DispatchEvent::ReactionRemoveAll(_) => {} 20 | DispatchEvent::ReactionRemoveEmoji(_) => {} 21 | _ => unreachable!(), 22 | }; 23 | 24 | CacheSourcedEvents::None 25 | }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /exes/cache/src/managers/roles.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Roles {} 10 | impl CacheManager for Roles { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::RoleCreate(_) => {} 18 | DispatchEvent::RoleDelete(_) => {} 19 | DispatchEvent::RoleUpdate(_) => {} 20 | _ => unreachable!(), 21 | }; 22 | 23 | CacheSourcedEvents::None 24 | }) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /exes/cache/src/managers/stage_instances.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct StageInstances {} 10 | impl CacheManager for StageInstances { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::StageInstanceCreate(_) => {} 18 | DispatchEvent::StageInstanceDelete(_) => {} 19 | DispatchEvent::StageInstanceUpdate(_) => {} 20 | _ => unreachable!(), 21 | }; 22 | 23 | CacheSourcedEvents::None 24 | }) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /exes/cache/src/managers/threads.rs: -------------------------------------------------------------------------------- 1 | use twilight_model::gateway::event::DispatchEvent; 2 | 3 | use crate::CacheSourcedEvents; 4 | 5 | use super::CacheManager; 6 | use std::future::Future; 7 | 8 | #[derive(Default)] 9 | pub struct Threads {} 10 | impl CacheManager for Threads { 11 | fn handle( 12 | &self, 13 | event: twilight_model::gateway::event::DispatchEvent, 14 | ) -> std::pin::Pin>> { 15 | Box::pin(async move { 16 | match event { 17 | DispatchEvent::ThreadCreate(_) => {} 18 | DispatchEvent::ThreadDelete(_) => {} 19 | DispatchEvent::ThreadListSync(_) => {} 20 | DispatchEvent::ThreadMemberUpdate(_) => {} 21 | DispatchEvent::ThreadMembersUpdate(_) => {} 22 | DispatchEvent::ThreadUpdate(_) => {} 23 | _ => unreachable!(), 24 | }; 25 | 26 | CacheSourcedEvents::None 27 | }) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /exes/gateway/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "gateway" 3 | version = "0.1.0" 4 | edition = "2018" 5 | 6 | [dependencies] 7 | shared = { workspace = true } 8 | proto = { workspace = true } 9 | leash = { workspace = true } 10 | tracing-opentelemetry = { workspace = true } 11 | opentelemetry = { workspace = true } 12 | opentelemetry-http = { workspace = true } 13 | tracing = { workspace = true } 14 | tracing-futures = { workspace = true } 15 | tokio = { workspace = true } 16 | anyhow = { workspace = true } 17 | serde = { workspace = true } 18 | serde_json = { workspace = true } 19 | 20 | tokio-stream = "0.1.14" 21 | twilight-gateway = { default-features = false, features = ["rustls-webpki-roots"], version = "0.15.2" } 22 | twilight-model = "0.15.2" 23 | bytes = "1.4.0" 24 | async-nats = "0.29.0" 25 | -------------------------------------------------------------------------------- /exes/gateway/src/config.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use twilight_gateway::Intents; 3 | 4 | #[derive(Serialize, Deserialize, Clone)] 5 | pub struct Gateway { 6 | pub token: String, 7 | pub intents: Intents, 8 | pub shard: u64, 9 | pub shard_total: u64, 10 | } 11 | 12 | impl Default for Gateway { 13 | fn default() -> Self { 14 | Self { 15 | intents: Intents::empty(), 16 | token: String::default(), 17 | shard_total: 1, 18 | shard: 1, 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /exes/gateway/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | clippy::all, 3 | clippy::correctness, 4 | clippy::suspicious, 5 | clippy::style, 6 | clippy::complexity, 7 | clippy::perf, 8 | clippy::pedantic, 9 | clippy::nursery, 10 | unsafe_code 11 | )] 12 | #![allow(clippy::redundant_pub_crate)] 13 | use async_nats::{Client, HeaderMap, HeaderValue}; 14 | use config::Gateway; 15 | use leash::{AnyhowResultFuture, Component}; 16 | use opentelemetry::{global, propagation::Injector}; 17 | use shared::{ 18 | config::Settings, 19 | payloads::{CachePayload, DispatchEventTagged}, 20 | }; 21 | use std::{convert::TryFrom, future::Future, pin::Pin, str::FromStr}; 22 | use tokio::{select, sync::oneshot}; 23 | use tracing_opentelemetry::OpenTelemetrySpanExt; 24 | use twilight_gateway::{Event, Shard, ShardId}; 25 | pub mod config; 26 | use tracing::{debug, error, info, info_span, instrument, Instrument}; 27 | use twilight_model::gateway::event::DispatchEvent; 28 | 29 | struct MetadataMap<'a>(&'a mut HeaderMap); 30 | 31 | impl<'a> Injector for MetadataMap<'a> { 32 | fn set(&mut self, key: &str, value: String) { 33 | self.0.insert(key, HeaderValue::from_str(&value).unwrap()); 34 | } 35 | } 36 | 37 | pub struct GatewayServer {} 38 | 39 | impl Component for GatewayServer { 40 | type Config = Gateway; 41 | const SERVICE_NAME: &'static str = "gateway"; 42 | 43 | fn start( 44 | &self, 45 | settings: Settings, 46 | mut stop: oneshot::Receiver<()>, 47 | ) -> AnyhowResultFuture<()> { 48 | Box::pin(async move { 49 | let mut shard = Shard::new( 50 | ShardId::new(settings.shard, settings.shard_total), 51 | settings.token.clone(), 52 | settings.intents, 53 | ); 54 | 55 | let nats = Into::> + Send>>>::into( 56 | settings.nats, 57 | ) 58 | .await?; 59 | 60 | loop { 61 | select! { 62 | event = shard.next_event() => { 63 | match event { 64 | Ok(event) => { 65 | let _ = handle_event(event, &nats) 66 | .await 67 | .map_err(|err| error!(error = ?err, "event publish failed")); 68 | }, 69 | Err(source) => { 70 | if source.is_fatal() { 71 | break; 72 | } 73 | continue; 74 | } 75 | } 76 | }, 77 | _ = (&mut stop) => break 78 | }; 79 | } 80 | 81 | info!("stopping shard..."); 82 | Ok(()) 83 | }) 84 | } 85 | 86 | fn new() -> Self { 87 | Self {} 88 | } 89 | } 90 | 91 | #[instrument] 92 | async fn handle_event(event: Event, nats: &Client) -> anyhow::Result<()> { 93 | if let Event::Ready(ready) = event { 94 | info!(username = ready.user.name, "logged in"); 95 | } else { 96 | let name = event.kind().name(); 97 | if let Ok(dispatch_event) = DispatchEvent::try_from(event) { 98 | let name = name.unwrap(); 99 | debug!(event_name = name, "handling dispatch event"); 100 | 101 | let data = CachePayload { 102 | data: DispatchEventTagged(dispatch_event), 103 | }; 104 | let value = serde_json::to_string(&data)?; 105 | let bytes = bytes::Bytes::from(value); 106 | 107 | let span = info_span!("nats send"); 108 | 109 | let mut header_map = HeaderMap::new(); 110 | let context = span.context(); 111 | global::get_text_map_propagator(|propagator| { 112 | propagator.inject_context(&context, &mut MetadataMap(&mut header_map)); 113 | }); 114 | 115 | nats.publish_with_headers(format!("nova.cache.dispatch.{name}"), header_map, bytes) 116 | .instrument(info_span!("sending to nats")) 117 | .await?; 118 | } 119 | } 120 | 121 | Ok(()) 122 | } 123 | -------------------------------------------------------------------------------- /exes/gateway/src/main.rs: -------------------------------------------------------------------------------- 1 | use gateway::GatewayServer; 2 | use leash::ignite; 3 | 4 | ignite!(GatewayServer); 5 | -------------------------------------------------------------------------------- /exes/ratelimit/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ratelimit" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | shared = { workspace = true } 10 | proto = { workspace = true } 11 | leash = { workspace = true } 12 | tracing-opentelemetry = { workspace = true } 13 | opentelemetry = { workspace = true } 14 | opentelemetry-http = { workspace = true } 15 | tracing = { workspace = true } 16 | tracing-futures = { workspace = true } 17 | tokio = { workspace = true } 18 | anyhow = { workspace = true } 19 | serde = { workspace = true } 20 | serde_json = { workspace = true } 21 | hyper = { workspace = true } 22 | 23 | twilight-http-ratelimiting = "0.15.1" 24 | tonic = "0.9.2" 25 | tokio-stream = "0.1.14" 26 | redis = { version = "0.23.0", features = ["cluster", "connection-manager", "tokio-comp"] } 27 | 28 | [dev-dependencies] 29 | criterion = { workspace = true } 30 | tokio-test = { workspace = true } 31 | tracing-test = { workspace = true } 32 | tracing-subscriber = { workspace = true } 33 | test-log = { workspace = true } 34 | env_logger = { workspace = true } 35 | 36 | [[bench]] 37 | name = "bucket" 38 | harness = false 39 | -------------------------------------------------------------------------------- /exes/ratelimit/benches/bucket.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Add; 2 | use std::time::{Duration, SystemTime}; 3 | 4 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 5 | use ratelimit::buckets::bucket::Bucket; 6 | use tokio::runtime::Runtime; 7 | use twilight_http_ratelimiting::RatelimitHeaders; 8 | 9 | pub fn acquire_ticket(c: &mut Criterion) { 10 | let rt = Runtime::new().unwrap(); 11 | 12 | let bucket = rt.block_on(async move { 13 | let bucket = Bucket::new(); 14 | 15 | let mreset = SystemTime::now() 16 | .add(Duration::from_secs(3600)) 17 | .duration_since(SystemTime::UNIX_EPOCH) 18 | .unwrap() 19 | .as_millis() 20 | .to_string(); 21 | let headers = [ 22 | ( 23 | "x-ratelimit-bucket", 24 | "d721dea6054f6322373d361f98e5c38b".as_bytes(), 25 | ), 26 | ("x-ratelimit-limit", "100".as_bytes()), 27 | ("x-ratelimit-remaining", "1".as_bytes()), 28 | ("x-ratelimit-reset", mreset.as_bytes()), 29 | ("x-ratelimit-reset-after", "100000.000".as_bytes()), 30 | ]; 31 | if let RatelimitHeaders::Present(present) = 32 | RatelimitHeaders::from_pairs(headers.into_iter()).unwrap() 33 | { 34 | bucket.update( 35 | &present, 36 | SystemTime::now() 37 | .duration_since(SystemTime::UNIX_EPOCH) 38 | .unwrap() 39 | .as_millis() as u64, 40 | ); 41 | } 42 | bucket 43 | }); 44 | 45 | let size: usize = 1024; 46 | c.bench_with_input(BenchmarkId::new("input_example", size), &size, |b, _| { 47 | // Insert a call to `to_async` to convert the bencher to async mode. 48 | // The timing loops are the same as with the normal bencher. 49 | b.to_async(&rt).iter(|| async { 50 | bucket.ticket().await.unwrap(); 51 | }); 52 | }); 53 | } 54 | 55 | criterion_group!(benches, acquire_ticket); 56 | criterion_main!(benches); 57 | -------------------------------------------------------------------------------- /exes/ratelimit/src/buckets/async_queue.rs: -------------------------------------------------------------------------------- 1 | use tokio::sync::{ 2 | mpsc::{self, UnboundedReceiver, UnboundedSender}, 3 | Mutex, 4 | }; 5 | 6 | /// Simple async fifo (first in fist out) queue based on unbounded channels 7 | /// 8 | /// # Usage 9 | /// ``` 10 | /// # use ratelimit::buckets::async_queue::AsyncQueue; 11 | /// # tokio_test::block_on(async { 12 | /// let queue = AsyncQueue::::default(); 13 | /// // Pushing into the queue is syncronous 14 | /// queue.push(123); 15 | /// 16 | /// // Popping from the queue is asyncronous 17 | /// let value = queue.pop().await; 18 | /// 19 | /// // Our value should be the same! 20 | /// assert_eq!(value, Some(123)); 21 | /// # }); 22 | /// ``` 23 | #[derive(Debug)] 24 | pub struct AsyncQueue { 25 | rx: Mutex>, 26 | tx: UnboundedSender, 27 | } 28 | 29 | impl AsyncQueue { 30 | /// Add a new item to the queue 31 | pub fn push(&self, tx: T) { 32 | let _sent = self.tx.send(tx); 33 | } 34 | 35 | /// Receive the first incoming ratelimit request. 36 | pub async fn pop(&self) -> Option { 37 | let mut rx = self.rx.lock().await; 38 | 39 | rx.recv().await 40 | } 41 | } 42 | 43 | impl Default for AsyncQueue { 44 | fn default() -> Self { 45 | let (tx, rx) = mpsc::unbounded_channel(); 46 | 47 | Self { 48 | rx: Mutex::new(rx), 49 | tx, 50 | } 51 | } 52 | } 53 | 54 | #[cfg(test)] 55 | mod tests { 56 | use crate::buckets::async_queue::AsyncQueue; 57 | 58 | #[test_log::test(tokio::test)] 59 | async fn should_queue_dequeue_fifo() { 60 | let queue = AsyncQueue::::default(); 61 | 62 | // queue data 63 | for i in 0..2_000_000 { 64 | queue.push(i); 65 | } 66 | 67 | for i in 0..2_000_000 { 68 | let result = queue.pop().await.unwrap(); 69 | assert_eq!(i, result); 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /exes/ratelimit/src/buckets/atomic_instant.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | hash::Hash, 3 | ops::{Add, AddAssign, Sub}, 4 | sync::atomic::{AtomicU64, Ordering}, 5 | time::{Duration, SystemTime, UNIX_EPOCH}, 6 | }; 7 | 8 | /// Instant implementation based on an atomic number 9 | /// # Example 10 | /// ``` 11 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 12 | /// # use std::time::Duration; 13 | /// 14 | /// let now = AtomicInstant::now(); 15 | /// let max_seconds = u64::MAX / 1_000_000_000; 16 | /// let duration = Duration::new(max_seconds, 0); 17 | /// println!("{:?}", now + duration); 18 | /// ``` 19 | #[derive(Default, Debug)] 20 | #[cfg(not(target_feature = "atomic128"))] 21 | pub struct AtomicInstant(AtomicU64); 22 | 23 | impl AtomicInstant { 24 | /// Calculates the duration since the instant. 25 | /// # Example 26 | /// ``` 27 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 28 | /// # use std::time::Duration; 29 | /// let mut instant = AtomicInstant::now(); 30 | /// std::thread::sleep(Duration::from_secs(1)); 31 | /// 32 | /// assert_eq!(instant.elapsed().as_secs(), 1); 33 | /// ``` 34 | pub fn elapsed(&self) -> Duration { 35 | // Truncation is expected 36 | #[allow(clippy::cast_possible_truncation)] 37 | Duration::from_millis( 38 | SystemTime::now() 39 | .duration_since(UNIX_EPOCH) 40 | .expect("time went backwards") 41 | .as_millis() as u64 42 | - self.0.load(Ordering::Relaxed), 43 | ) 44 | } 45 | /// Gets the current time in millis 46 | /// # Example 47 | /// ``` 48 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 49 | /// # use std::time::Duration; 50 | /// let mut instant = AtomicInstant::default(); 51 | /// instant.set_millis(1000); 52 | /// 53 | /// assert_eq!(instant.as_millis(), 1000); 54 | /// ``` 55 | pub fn as_millis(&self) -> u64 { 56 | self.0.load(Ordering::Relaxed) 57 | } 58 | 59 | /// Creates an instant at the current time 60 | /// # Safety 61 | /// Truncates if the current unix time is greater than `u64::MAX` 62 | #[allow(clippy::cast_possible_truncation)] 63 | #[must_use] 64 | pub fn now() -> Self { 65 | Self(AtomicU64::new( 66 | SystemTime::now() 67 | .duration_since(UNIX_EPOCH) 68 | .expect("time went backwards") 69 | .as_millis() as u64, 70 | )) 71 | } 72 | 73 | /// Sets the unix time of the instant 74 | /// # Example 75 | /// ``` 76 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 77 | /// # use std::time::Duration; 78 | /// let mut instant = AtomicInstant::default(); 79 | /// instant.set_millis(1000); 80 | /// 81 | /// assert_eq!(instant.as_millis(), 1000); 82 | /// ``` 83 | pub fn set_millis(&self, millis: u64) { 84 | self.0.store(millis, Ordering::Relaxed); 85 | } 86 | 87 | /// Determines if the current instant is at the default value 88 | /// # Example 89 | /// ``` 90 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 91 | /// # use std::time::Duration; 92 | /// let mut instant = AtomicInstant::default(); 93 | /// 94 | /// assert!(instant.is_empty()); 95 | /// ``` 96 | pub fn is_empty(&self) -> bool { 97 | self.as_millis() == 0 98 | } 99 | } 100 | 101 | impl Add for AtomicInstant { 102 | type Output = Self; 103 | /// # Safety 104 | /// This panics if the right hand side is greater than `i64::MAX` 105 | /// You can remedy to this using the 128bits feature with changes the 106 | /// underlying atomic. 107 | /// # Example 108 | /// ``` 109 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 110 | /// # use std::time::Duration; 111 | /// let mut instant = AtomicInstant::default(); 112 | /// 113 | /// // we add one second to our instant 114 | /// instant = instant + Duration::from_secs(1); 115 | /// 116 | /// // should be equal to a second 117 | /// assert_eq!(instant.as_millis(), 1000); 118 | /// ``` 119 | fn add(self, rhs: Duration) -> Self::Output { 120 | self.0 121 | .fetch_add(rhs.as_millis().try_into().unwrap(), Ordering::Relaxed); 122 | self 123 | } 124 | } 125 | 126 | impl AddAssign for AtomicInstant { 127 | /// # Safety 128 | /// This panics if the right hand side is greater than `i64::MAX` 129 | /// You can remedy to this using the 128bits feature with changes the 130 | /// underlying atomic. 131 | /// # Example 132 | /// ``` 133 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 134 | /// # use std::time::Duration; 135 | /// let mut instant = AtomicInstant::default(); 136 | /// 137 | /// // we add one second to our instant 138 | /// instant += Duration::from_secs(1); 139 | /// 140 | /// // should be equal to a second 141 | /// assert_eq!(instant.as_millis(), 1000); 142 | /// ``` 143 | fn add_assign(&mut self, rhs: Duration) { 144 | self.0 145 | .fetch_add(rhs.as_millis().try_into().unwrap(), Ordering::Relaxed); 146 | } 147 | } 148 | 149 | impl Hash for AtomicInstant { 150 | fn hash(&self, state: &mut H) { 151 | self.0.load(Ordering::Relaxed).hash(state); 152 | } 153 | } 154 | 155 | impl PartialEq for AtomicInstant { 156 | /// # Example 157 | /// ``` 158 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 159 | /// # use std::time::Duration; 160 | /// let mut instant = AtomicInstant::default(); 161 | /// let mut instant2 = AtomicInstant::default(); 162 | /// 163 | /// assert_eq!(instant, instant2); 164 | /// ``` 165 | fn eq(&self, other: &Self) -> bool { 166 | self.0.load(Ordering::Relaxed) == other.0.load(Ordering::Relaxed) 167 | } 168 | } 169 | impl Eq for AtomicInstant {} 170 | 171 | impl PartialOrd for AtomicInstant { 172 | /// # Example 173 | /// ``` 174 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 175 | /// # use std::time::Duration; 176 | /// let mut instant = AtomicInstant::default(); 177 | /// let mut instant2 = AtomicInstant::default(); 178 | /// 179 | /// assert!(instant == instant2); 180 | /// instant.set_millis(1000); 181 | /// assert!(instant > instant2); 182 | /// instant.set_millis(0); 183 | /// instant2.set_millis(1000); 184 | /// assert!(instant < instant2); 185 | /// ``` 186 | fn partial_cmp(&self, other: &Self) -> Option { 187 | self.0 188 | .load(Ordering::Relaxed) 189 | .partial_cmp(&other.0.load(Ordering::Relaxed)) 190 | } 191 | } 192 | 193 | impl Ord for AtomicInstant { 194 | /// # Example 195 | /// ``` 196 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 197 | /// # use std::time::Duration; 198 | /// let mut instant = AtomicInstant::default(); 199 | /// let mut instant2 = AtomicInstant::default(); 200 | /// 201 | /// assert!(instant == instant2); 202 | /// instant.set_millis(1000); 203 | /// assert!(instant > instant2); 204 | /// instant.set_millis(0); 205 | /// instant2.set_millis(1000); 206 | /// assert!(instant < instant2); 207 | /// ``` 208 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 209 | self.0 210 | .load(Ordering::Relaxed) 211 | .cmp(&other.0.load(Ordering::Relaxed)) 212 | } 213 | } 214 | 215 | impl Sub for AtomicInstant { 216 | type Output = Self; 217 | /// # Example 218 | /// ``` 219 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 220 | /// # use std::time::Duration; 221 | /// let mut instant = AtomicInstant::default(); 222 | /// instant.set_millis(1000); 223 | /// 224 | /// instant = instant - Duration::from_secs(1); 225 | /// 226 | /// assert!(instant.is_empty()); 227 | /// ``` 228 | fn sub(self, rhs: Duration) -> Self::Output { 229 | self.0 230 | .fetch_sub(rhs.as_millis().try_into().unwrap(), Ordering::Relaxed); 231 | self 232 | } 233 | } 234 | 235 | impl Sub for AtomicInstant { 236 | type Output = Self; 237 | /// # Example 238 | /// ``` 239 | /// # use ratelimit::buckets::atomic_instant::AtomicInstant; 240 | /// # use std::time::Duration; 241 | /// let mut instant = AtomicInstant::default(); 242 | /// let mut instant2 = AtomicInstant::default(); 243 | /// instant.set_millis(1000); 244 | /// instant2.set_millis(2000); 245 | /// 246 | /// instant = instant2 - instant; 247 | /// 248 | /// assert_eq!(instant.as_millis(), 1000); 249 | /// ``` 250 | fn sub(self, rhs: Self) -> Self::Output { 251 | self.0 252 | .fetch_sub(rhs.0.load(Ordering::Relaxed), Ordering::Relaxed); 253 | self 254 | } 255 | } 256 | 257 | #[cfg(test)] 258 | mod tests { 259 | use super::AtomicInstant; 260 | 261 | #[test] 262 | fn should_detect_default() { 263 | let instant = AtomicInstant::default(); 264 | assert!(instant.is_empty()); 265 | 266 | instant.set_millis(1000); 267 | assert!(!instant.is_empty()); 268 | } 269 | } 270 | -------------------------------------------------------------------------------- /exes/ratelimit/src/buckets/bucket.rs: -------------------------------------------------------------------------------- 1 | use super::{async_queue::AsyncQueue, atomic_instant::AtomicInstant}; 2 | use std::{ 3 | sync::{ 4 | atomic::{AtomicU64, Ordering}, 5 | Arc, 6 | }, 7 | time::Duration, 8 | }; 9 | use tokio::{ 10 | sync::oneshot::{self, Sender}, 11 | task::JoinHandle, 12 | }; 13 | use tracing::{debug, trace}; 14 | use twilight_http_ratelimiting::headers::Present; 15 | 16 | #[derive(Clone, Debug)] 17 | pub enum TimeRemaining { 18 | Finished, 19 | NotStarted, 20 | Some(Duration), 21 | } 22 | 23 | /// A bucket is a simple atomic implementation of a bucket used for ratelimiting 24 | /// It can be updated dynamically depending on the discord api responses. 25 | /// 26 | /// # Usage 27 | /// ``` 28 | /// # use ratelimit::buckets::bucket::Bucket; 29 | /// # use twilight_http_ratelimiting::RatelimitHeaders; 30 | /// # use std::time::SystemTime; 31 | /// # tokio_test::block_on(async { 32 | /// 33 | /// let bucket = Bucket::new(); 34 | /// 35 | /// // Feed the headers informations into the bucket to update it 36 | /// let headers = [ 37 | /// ( "x-ratelimit-bucket", "bucket id".as_bytes()), 38 | /// ("x-ratelimit-limit", "100".as_bytes()), 39 | /// ("x-ratelimit-remaining", "0".as_bytes()), 40 | /// ("x-ratelimit-reset", "99999999999999".as_bytes()), 41 | /// ("x-ratelimit-reset-after", "10.000".as_bytes()), 42 | /// ]; 43 | /// 44 | /// // Parse the headers 45 | /// let present = if let Ok(RatelimitHeaders::Present(present)) 46 | /// = RatelimitHeaders::from_pairs(headers.into_iter()) { 47 | /// present 48 | /// } else { todo!() }; 49 | /// 50 | /// // this should idealy the time of the request 51 | /// let current_time = SystemTime::now() 52 | /// .duration_since(SystemTime::UNIX_EPOCH) 53 | /// .unwrap() 54 | /// .as_millis() as u64; 55 | /// 56 | /// bucket.update(&present, current_time); 57 | /// # }) 58 | /// ``` 59 | /// 60 | /// # Async 61 | /// You need to call this struct new method in a tokio 1.x async runtime. 62 | #[derive(Debug)] 63 | pub struct Bucket { 64 | /// Limits of tickets that can be accepted 65 | pub limit: AtomicU64, 66 | /// Remaining requests that can be executed 67 | pub remaining: AtomicU64, 68 | /// Time to wait after [`Self::last_update`] before accepting new tickets. 69 | pub reset_after: AtomicU64, 70 | /// Last update got from the discord upstream 71 | pub last_update: AtomicInstant, 72 | 73 | /// List of tasks that dequeue tasks from [`Self::queue`] 74 | tasks: Vec>, 75 | /// Queue of tickets to be processed. 76 | queue: AsyncQueue>, 77 | } 78 | 79 | impl Drop for Bucket { 80 | /// Simply abord the dequeue tasks to aboid leaking memory via arc(s) 81 | fn drop(&mut self) { 82 | for join in &self.tasks { 83 | join.abort(); 84 | } 85 | } 86 | } 87 | 88 | impl Bucket { 89 | /// Creates a new bucket with four dequeue tasks 90 | /// # Async 91 | /// This functions **should** be called in a tokio 1.x runtime, otherwise the function *will* panic. 92 | #[must_use] 93 | pub fn new() -> Arc { 94 | let tasks = vec![]; 95 | 96 | let this = Arc::new(Self { 97 | limit: AtomicU64::new(u64::max_value()), 98 | queue: AsyncQueue::default(), 99 | remaining: AtomicU64::new(u64::max_value()), 100 | reset_after: AtomicU64::new(u64::max_value()), 101 | last_update: AtomicInstant::default(), 102 | tasks, 103 | }); 104 | 105 | // Run with 4 dequeue tasks 106 | for _ in 0..4 { 107 | let this = this.clone(); 108 | tokio::spawn(async move { 109 | // continuously wait for elements in the queue to process them sequantially. 110 | // this is using parallel tasks to allow (hopefully) better performance. 111 | while let Some(element) = this.queue.pop().await { 112 | if this.remaining() == 0 { 113 | debug!("0 tickets remaining, we have to wait."); 114 | 115 | match this.time_remaining() { 116 | TimeRemaining::Finished => { 117 | debug!("waiting seems finished."); 118 | this.try_reset(); 119 | } 120 | TimeRemaining::Some(duration) => { 121 | debug!(milliseconds=%duration.as_millis(), "waiting for ratelimit"); 122 | tokio::time::sleep(duration).await; 123 | 124 | this.try_reset(); 125 | } 126 | TimeRemaining::NotStarted => { 127 | debug!("we should not wait"); 128 | } 129 | } 130 | } 131 | 132 | this.remaining.fetch_sub(1, Ordering::Relaxed); 133 | let _ = element 134 | .send(()) 135 | .map_err(|_| trace!("response channel was closed.")); 136 | } 137 | }); 138 | } 139 | 140 | this 141 | } 142 | 143 | /// Total number of tickets allowed in a cycle. 144 | pub fn limit(&self) -> u64 { 145 | self.limit.load(Ordering::Relaxed) 146 | } 147 | 148 | /// Number of tickets remaining in the current cycle. 149 | pub fn remaining(&self) -> u64 { 150 | self.remaining.load(Ordering::Relaxed) 151 | } 152 | 153 | /// Duration after the [`Self::last_update`] time the bucket will refresh. 154 | pub fn reset_after(&self) -> u64 { 155 | self.reset_after.load(Ordering::Relaxed) 156 | } 157 | 158 | /// Time remaining until this bucket will reset. 159 | pub fn time_remaining(&self) -> TimeRemaining { 160 | let reset_after = self.reset_after(); 161 | let last_update = &self.last_update; 162 | 163 | if last_update.is_empty() { 164 | debug!("last update is empty"); 165 | 166 | TimeRemaining::NotStarted 167 | } else { 168 | let elapsed = last_update.elapsed(); 169 | 170 | if elapsed > Duration::from_millis(reset_after) { 171 | return TimeRemaining::Finished; 172 | } 173 | 174 | TimeRemaining::Some(Duration::from_millis(reset_after) - elapsed) 175 | } 176 | } 177 | 178 | /// Try to reset this bucket's [`Self::last_update`] value if it has finished. 179 | /// 180 | /// Returns whether resetting was possible. 181 | pub fn try_reset(&self) -> bool { 182 | if self.last_update.is_empty() { 183 | return false; 184 | } 185 | 186 | if matches!(self.time_remaining(), TimeRemaining::Finished) { 187 | self.remaining.store(self.limit(), Ordering::Relaxed); 188 | self.last_update.set_millis(0); 189 | 190 | true 191 | } else { 192 | false 193 | } 194 | } 195 | 196 | /// Update this bucket's ratelimit data after a request has been made. 197 | /// The time of the request should be given. 198 | pub fn update(&self, ratelimits: &Present, time: u64) { 199 | let bucket_limit = self.limit(); 200 | 201 | if self.last_update.is_empty() { 202 | debug!(millis = time, "updated the last update time"); 203 | self.last_update.set_millis(time); 204 | } 205 | 206 | if bucket_limit != ratelimits.limit() && bucket_limit == u64::max_value() { 207 | self.reset_after 208 | .store(ratelimits.reset_after(), Ordering::SeqCst); 209 | self.limit.store(ratelimits.limit(), Ordering::SeqCst); 210 | } 211 | 212 | self.remaining 213 | .store(ratelimits.remaining(), Ordering::Relaxed); 214 | } 215 | 216 | /// Submits a ticket to the queue 217 | /// A oneshot receiver is returned and will be called when the ticket is accepted. 218 | pub fn ticket(&self) -> oneshot::Receiver<()> { 219 | let (tx, rx) = oneshot::channel(); 220 | self.queue.push(tx); 221 | rx 222 | } 223 | } 224 | 225 | #[cfg(test)] 226 | mod tests { 227 | use std::{ 228 | ops::Add, 229 | time::{Duration, Instant, SystemTime}, 230 | }; 231 | 232 | use tokio::time::timeout; 233 | use tracing::info; 234 | use twilight_http_ratelimiting::RatelimitHeaders; 235 | 236 | use super::Bucket; 237 | 238 | #[test_log::test(tokio::test)] 239 | async fn should_ratelimit() { 240 | let bucket = Bucket::new(); 241 | 242 | // Intialize a bucket with one remaining ticket 243 | // and that resets in oue hour 244 | let mreset = SystemTime::now() 245 | .add(Duration::from_secs(100)) 246 | .duration_since(SystemTime::UNIX_EPOCH) 247 | .unwrap() 248 | .as_millis() 249 | .to_string(); 250 | let headers: [(&str, &[u8]); 5] = [ 251 | ("x-ratelimit-bucket", b"123"), 252 | ("x-ratelimit-limit", b"100"), 253 | ("x-ratelimit-remaining", b"1"), 254 | ("x-ratelimit-reset", mreset.as_bytes()), 255 | ("x-ratelimit-reset-after", b"100.000"), 256 | ]; 257 | if let RatelimitHeaders::Present(present) = 258 | RatelimitHeaders::from_pairs(headers.into_iter()).unwrap() 259 | { 260 | // Integer truncating is expected 261 | #[allow(clippy::cast_possible_truncation)] 262 | bucket.update( 263 | &present, 264 | SystemTime::now() 265 | .duration_since(SystemTime::UNIX_EPOCH) 266 | .unwrap() 267 | .as_millis() as u64, 268 | ); 269 | } 270 | 271 | let ticket = bucket.ticket(); 272 | 273 | info!("first request"); 274 | // We should accept one ticket 275 | let respo = timeout(Duration::from_secs(10), ticket).await; 276 | assert!(respo.is_ok()); 277 | 278 | info!("second request"); 279 | 280 | let ticket = bucket.ticket(); 281 | // We should accept one ticket 282 | let respo = timeout(Duration::from_secs(1), ticket).await; 283 | 284 | // the ticket should not have responded because the queue is locked 285 | assert!(respo.is_err()); 286 | } 287 | 288 | #[test_log::test(tokio::test)] 289 | async fn should_block_until_possible() { 290 | let bucket = Bucket::new(); 291 | 292 | // Intialize a bucket with one remaining ticket 293 | // and that resets in oue hour 294 | let mreset = SystemTime::now() 295 | .add(Duration::from_secs(100)) 296 | .duration_since(SystemTime::UNIX_EPOCH) 297 | .unwrap() 298 | .as_millis() 299 | .to_string(); 300 | let headers: [(&str, &[u8]); 5] = [ 301 | ("x-ratelimit-bucket", b"123"), 302 | ("x-ratelimit-limit", b"100"), 303 | ("x-ratelimit-remaining", b"0"), 304 | ("x-ratelimit-reset", mreset.as_bytes()), 305 | ("x-ratelimit-reset-after", b"10.000"), 306 | ]; 307 | 308 | if let RatelimitHeaders::Present(present) = 309 | RatelimitHeaders::from_pairs(headers.into_iter()).unwrap() 310 | { 311 | // Integer truncating is expected 312 | #[allow(clippy::cast_possible_truncation)] 313 | bucket.update( 314 | &present, 315 | SystemTime::now() 316 | .duration_since(SystemTime::UNIX_EPOCH) 317 | .unwrap() 318 | .as_millis() as u64, 319 | ); 320 | } 321 | 322 | let ticket = bucket.ticket(); 323 | let start = Instant::now(); 324 | 325 | // in this case, the ratelimiter should wait 10 seconds 326 | let respo = timeout(Duration::from_secs(12), ticket).await; 327 | let end = start.elapsed().as_secs(); 328 | 329 | // we should have waited 10 seconds (+- 1s) 330 | assert_eq!(10, end); 331 | // and the ticket should be a success 332 | assert!(respo.is_ok()); 333 | } 334 | } 335 | -------------------------------------------------------------------------------- /exes/ratelimit/src/buckets/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin, sync::Arc, time::Duration}; 2 | 3 | pub mod async_queue; 4 | pub mod atomic_instant; 5 | pub mod bucket; 6 | pub mod noop_lock; 7 | pub mod redis_lock; 8 | 9 | pub trait GlobalLock: Send + Sync { 10 | fn lock_for<'a>( 11 | self: &'a Arc, 12 | duration: Duration, 13 | ) -> Pin + Send + 'a>>; 14 | fn is_locked<'a>( 15 | self: &'a Arc, 16 | ) -> Pin> + Send + 'a>>; 17 | } 18 | -------------------------------------------------------------------------------- /exes/ratelimit/src/buckets/noop_lock.rs: -------------------------------------------------------------------------------- 1 | use super::GlobalLock; 2 | 3 | pub struct NoOpLock; 4 | impl GlobalLock for NoOpLock { 5 | fn lock_for<'a>( 6 | self: &'a std::sync::Arc, 7 | _duration: std::time::Duration, 8 | ) -> std::pin::Pin + Send + 'a>> { 9 | Box::pin(async move {}) 10 | } 11 | 12 | fn is_locked<'a>( 13 | self: &'a std::sync::Arc, 14 | ) -> std::pin::Pin> + Send + 'a>> 15 | { 16 | Box::pin(async move { None }) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /exes/ratelimit/src/buckets/redis_lock.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | pin::Pin, 4 | sync::{atomic::AtomicU64, Arc}, 5 | time::{Duration, SystemTime}, 6 | }; 7 | 8 | use redis::{aio::MultiplexedConnection, AsyncCommands}; 9 | use tokio::sync::Mutex; 10 | use tracing::debug; 11 | 12 | use super::GlobalLock; 13 | 14 | /// This is flawed and needs to be replaced sometime with the real `RedisLock` algorithm 15 | #[derive(Debug)] 16 | pub struct RedisLock { 17 | redis: Mutex, 18 | is_locked: AtomicU64, 19 | } 20 | 21 | impl RedisLock { 22 | #[must_use] 23 | pub fn new(redis: MultiplexedConnection) -> Arc { 24 | Arc::new(Self { 25 | redis: Mutex::new(redis), 26 | is_locked: AtomicU64::new(0), 27 | }) 28 | } 29 | } 30 | 31 | impl GlobalLock for RedisLock { 32 | fn lock_for<'a>( 33 | self: &'a Arc, 34 | duration: Duration, 35 | ) -> Pin + Send + 'a>> { 36 | Box::pin(async move { 37 | debug!("locking globally for {}", duration.as_secs()); 38 | let _: () = self 39 | .redis 40 | .lock() 41 | .await 42 | .set_ex( 43 | "nova:rls:lock", 44 | 1, 45 | (duration.as_secs() + 1).try_into().unwrap(), 46 | ) 47 | .await 48 | .unwrap(); 49 | 50 | // Integer truncating is expected 51 | #[allow(clippy::cast_possible_truncation)] 52 | self.is_locked.store( 53 | (SystemTime::now() + duration) 54 | .duration_since(SystemTime::UNIX_EPOCH) 55 | .unwrap() 56 | .as_millis() as u64, 57 | std::sync::atomic::Ordering::Relaxed, 58 | ); 59 | }) 60 | } 61 | 62 | fn is_locked<'a>( 63 | self: &'a Arc, 64 | ) -> Pin> + Send + 'a>> { 65 | Box::pin(async move { 66 | let load = self.is_locked.load(std::sync::atomic::Ordering::Relaxed); 67 | if load != 0 { 68 | // Integer truncating is expected 69 | #[allow(clippy::cast_possible_truncation)] 70 | if load 71 | > SystemTime::now() 72 | .duration_since(SystemTime::UNIX_EPOCH) 73 | .unwrap() 74 | .as_millis() as u64 75 | { 76 | return Some(Duration::from_millis(load)); 77 | } 78 | self.is_locked 79 | .store(0, std::sync::atomic::Ordering::Relaxed); 80 | } 81 | 82 | let result = self.redis.lock().await.ttl::<_, i64>("nova:rls:lock").await; 83 | match result { 84 | Ok(remaining_time) => { 85 | if remaining_time > 0 { 86 | // Sign loss is allowed since we know it's a positive number 87 | // because a ttl is always positive when the key exists and have a ttl 88 | // otherwise redis *will* return a negative number, hence the check for 89 | // a positive sign. 90 | #[allow(clippy::cast_sign_loss)] 91 | let duration = Duration::from_secs(remaining_time as u64); 92 | debug!("external global lock detected, locking"); 93 | self.lock_for(duration).await; 94 | Some(duration) 95 | } else { 96 | None 97 | } 98 | } 99 | Err(error) => { 100 | debug!("redis call failed: {}", error); 101 | 102 | None 103 | } 104 | } 105 | }) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /exes/ratelimit/src/config.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; 3 | 4 | fn default_listening_address() -> SocketAddr { 5 | SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 8090)) 6 | } 7 | 8 | #[derive(Debug, Deserialize, Clone)] 9 | pub struct ServerSettings { 10 | pub listening_adress: SocketAddr, 11 | } 12 | impl Default for ServerSettings { 13 | fn default() -> Self { 14 | Self { 15 | listening_adress: default_listening_address(), 16 | } 17 | } 18 | } 19 | 20 | #[derive(Debug, Deserialize, Clone, Default)] 21 | pub struct Ratelimit { 22 | pub server: ServerSettings, 23 | } 24 | -------------------------------------------------------------------------------- /exes/ratelimit/src/grpc.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::sync::Arc; 3 | use std::time::Duration; 4 | 5 | use opentelemetry::global; 6 | use opentelemetry::propagation::Extractor; 7 | use proto::nova::ratelimit::ratelimiter::HeadersSubmitRequest; 8 | use proto::nova::ratelimit::ratelimiter::{ 9 | ratelimiter_server::Ratelimiter, BucketSubmitTicketRequest, 10 | }; 11 | use tokio::sync::RwLock; 12 | use tonic::Response; 13 | use tracing::debug; 14 | use tracing_opentelemetry::OpenTelemetrySpanExt; 15 | use twilight_http_ratelimiting::RatelimitHeaders; 16 | 17 | use crate::buckets::bucket::Bucket; 18 | use crate::buckets::redis_lock::RedisLock; 19 | use crate::buckets::GlobalLock; 20 | 21 | pub struct RLServer { 22 | global: Arc, 23 | buckets: RwLock>>, 24 | } 25 | 26 | impl RLServer { 27 | pub fn new(redis_lock: Arc) -> Self { 28 | Self { 29 | global: redis_lock, 30 | buckets: RwLock::new(HashMap::new()), 31 | } 32 | } 33 | } 34 | 35 | struct MetadataMap<'a>(&'a tonic::metadata::MetadataMap); 36 | 37 | impl<'a> Extractor for MetadataMap<'a> { 38 | /// Get a value for a key from the `MetadataMap`. If the value can't be converted to &str, returns None 39 | fn get(&self, key: &str) -> Option<&str> { 40 | self.0.get(key).and_then(|metadata| metadata.to_str().ok()) 41 | } 42 | 43 | /// Collect all the keys from the `MetadataMap`. 44 | fn keys(&self) -> Vec<&str> { 45 | self.0 46 | .keys() 47 | .map(|key| match key { 48 | tonic::metadata::KeyRef::Ascii(v) => v.as_str(), 49 | tonic::metadata::KeyRef::Binary(v) => v.as_str(), 50 | }) 51 | .collect::>() 52 | } 53 | } 54 | 55 | #[tonic::async_trait] 56 | impl Ratelimiter for RLServer { 57 | async fn submit_headers( 58 | &self, 59 | request: tonic::Request, 60 | ) -> Result, tonic::Status> { 61 | let parent_cx = 62 | global::get_text_map_propagator(|prop| prop.extract(&MetadataMap(request.metadata()))); 63 | // Generate a tracing span as usual 64 | let span = tracing::span!(tracing::Level::INFO, "request process"); 65 | span.set_parent(parent_cx); 66 | 67 | let data = request.into_inner(); 68 | 69 | let ratelimit_headers = RatelimitHeaders::from_pairs( 70 | data.headers.iter().map(|f| (f.0 as &str, f.1.as_bytes())), 71 | ) 72 | .unwrap(); 73 | 74 | if let Some(duration) = self.global.is_locked().await { 75 | tokio::time::sleep(duration).await; 76 | } 77 | 78 | let bucket: Arc = if self.buckets.read().await.contains_key(&data.path) { 79 | self.buckets 80 | .read() 81 | .await 82 | .get(&data.path) 83 | .expect("impossible") 84 | .clone() 85 | } else { 86 | let bucket = Bucket::new(); 87 | self.buckets.write().await.insert(data.path, bucket.clone()); 88 | bucket 89 | }; 90 | 91 | match ratelimit_headers { 92 | RatelimitHeaders::Global(global) => { 93 | // If we are globally ratelimited, we lock using the redis lock 94 | // This is using redis because a global ratelimit should be executed in all 95 | // ratelimit workers. 96 | debug!( 97 | "global ratelimit headers detected: {}", 98 | global.retry_after() 99 | ); 100 | self.global 101 | .clone() 102 | .lock_for(Duration::from_secs(global.retry_after())) 103 | .await; 104 | } 105 | RatelimitHeaders::None => {} 106 | RatelimitHeaders::Present(present) => { 107 | // we should update the bucket. 108 | bucket.update(&present, data.precise_time); 109 | } 110 | _ => unreachable!(), 111 | }; 112 | 113 | Ok(Response::new(())) 114 | } 115 | 116 | async fn submit_ticket( 117 | &self, 118 | request: tonic::Request, 119 | ) -> Result, tonic::Status> { 120 | let parent_cx = 121 | global::get_text_map_propagator(|prop| prop.extract(&MetadataMap(request.metadata()))); 122 | // Generate a tracing span as usual 123 | let span = tracing::span!(tracing::Level::INFO, "request process"); 124 | span.set_parent(parent_cx); 125 | 126 | let data = request.into_inner(); 127 | 128 | let bucket: Arc = if self.buckets.read().await.contains_key(&data.path) { 129 | self.buckets 130 | .read() 131 | .await 132 | .get(&data.path) 133 | .expect("impossible") 134 | .clone() 135 | } else { 136 | let bucket = Bucket::new(); 137 | self.buckets.write().await.insert(data.path, bucket.clone()); 138 | bucket 139 | }; 140 | 141 | // wait for the ticket to be accepted 142 | let _ = bucket.ticket().await; 143 | 144 | Ok(Response::new(())) 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /exes/ratelimit/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | clippy::all, 3 | clippy::correctness, 4 | clippy::suspicious, 5 | clippy::style, 6 | clippy::complexity, 7 | clippy::perf, 8 | clippy::pedantic, 9 | clippy::nursery, 10 | unsafe_code 11 | )] 12 | 13 | use buckets::redis_lock::RedisLock; 14 | use config::Ratelimit; 15 | use grpc::RLServer; 16 | use leash::{AnyhowResultFuture, Component}; 17 | use proto::nova::ratelimit::ratelimiter::ratelimiter_server::RatelimiterServer; 18 | use redis::aio::MultiplexedConnection; 19 | use shared::config::Settings; 20 | use std::future::Future; 21 | use std::pin::Pin; 22 | use tokio::sync::oneshot; 23 | use tonic::transport::Server; 24 | 25 | pub mod buckets; 26 | mod config; 27 | mod grpc; 28 | 29 | pub struct RatelimiterServerComponent {} 30 | impl Component for RatelimiterServerComponent { 31 | type Config = Ratelimit; 32 | const SERVICE_NAME: &'static str = "ratelimiter"; 33 | 34 | fn start( 35 | &self, 36 | settings: Settings, 37 | stop: oneshot::Receiver<()>, 38 | ) -> AnyhowResultFuture<()> { 39 | Box::pin(async move { 40 | let listening_address = settings.server.listening_adress; 41 | let redis = Into::< 42 | Pin> + Send>>, 43 | >::into(settings.redis) 44 | .await?; 45 | 46 | let server = RLServer::new(RedisLock::new(redis)); 47 | 48 | Server::builder() 49 | .add_service(RatelimiterServer::new(server)) 50 | .serve_with_shutdown(listening_address, async move { 51 | let _ = stop.await; 52 | }) 53 | .await?; 54 | 55 | Ok(()) 56 | }) 57 | } 58 | 59 | fn new() -> Self { 60 | Self {} 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /exes/ratelimit/src/main.rs: -------------------------------------------------------------------------------- 1 | use leash::ignite; 2 | use ratelimit::RatelimiterServerComponent; 3 | 4 | ignite!(RatelimiterServerComponent); 5 | -------------------------------------------------------------------------------- /exes/rest/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rest" 3 | version = "0.1.0" 4 | edition = "2018" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | shared = { workspace = true } 10 | proto = { workspace = true } 11 | leash = { workspace = true } 12 | tracing-opentelemetry = { workspace = true } 13 | opentelemetry = { workspace = true } 14 | opentelemetry-http = { workspace = true } 15 | tracing = { workspace = true } 16 | tracing-futures = { workspace = true } 17 | tokio = { workspace = true } 18 | anyhow = { workspace = true } 19 | serde = { workspace = true } 20 | serde_json = { workspace = true } 21 | hyper = { workspace = true } 22 | 23 | futures-util = "0.3.28" 24 | hyper-rustls = "0.24.1" 25 | lazy_static = "1.4.0" 26 | xxhash-rust = { version = "0.8.6", features = ["xxh32"] } 27 | twilight-http-ratelimiting = "0.15.1" 28 | 29 | hashring = "0.3.0" 30 | tonic = "0.9.2" 31 | tokio-stream = "0.1.14" 32 | dns-lookup = "2.0.2" 33 | -------------------------------------------------------------------------------- /exes/rest/src/config.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; 3 | 4 | fn default_listening_address() -> SocketAddr { 5 | SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 8090)) 6 | } 7 | 8 | #[derive(Debug, Deserialize, Clone)] 9 | pub struct ServerSettings { 10 | pub listening_adress: SocketAddr, 11 | } 12 | impl Default for ServerSettings { 13 | fn default() -> Self { 14 | Self { 15 | listening_adress: default_listening_address(), 16 | } 17 | } 18 | } 19 | 20 | #[derive(Debug, Deserialize, Clone, Default)] 21 | pub struct Discord { 22 | pub token: String, 23 | } 24 | 25 | #[derive(Debug, Deserialize, Clone, Default)] 26 | pub struct ReverseProxy { 27 | pub server: ServerSettings, 28 | pub discord: Discord, 29 | pub ratelimiter_address: String, 30 | pub ratelimiter_port: u16, 31 | #[serde(default = "default_upstream")] 32 | pub upstream: Option, 33 | } 34 | 35 | #[allow(clippy::unnecessary_wraps)] 36 | fn default_upstream() -> Option { 37 | Some("https://discord.com".to_string()) 38 | } 39 | -------------------------------------------------------------------------------- /exes/rest/src/handler.rs: -------------------------------------------------------------------------------- 1 | use anyhow::bail; 2 | use hyper::http::{ 3 | header::{AUTHORIZATION, CONNECTION, HOST, TRANSFER_ENCODING, UPGRADE}, 4 | HeaderValue, Method as HttpMethod, Request, Response, Uri, 5 | }; 6 | use hyper::{client::HttpConnector, Body, Client}; 7 | use hyper_rustls::HttpsConnector; 8 | use opentelemetry::{ 9 | global, 10 | metrics::{Counter, Histogram}, 11 | Context as OpenTelemetryContext, KeyValue, 12 | }; 13 | use std::{ 14 | collections::hash_map::DefaultHasher, 15 | convert::TryFrom, 16 | hash::{Hash, Hasher}, 17 | str::FromStr, 18 | sync::Arc, 19 | time::SystemTime, 20 | }; 21 | use tracing::{debug_span, error, info_span, trace, Instrument}; 22 | use twilight_http_ratelimiting::{Method, Path}; 23 | 24 | use crate::{config::ReverseProxy, ratelimit_client::RemoteRatelimiter}; 25 | use lazy_static::lazy_static; 26 | 27 | lazy_static! { 28 | static ref METER_NAME: &'static str = ""; 29 | static ref REQUESTS: Counter = { 30 | global::meter(&METER_NAME) 31 | .u64_counter("rest.http_requests_total") 32 | .with_description("Amount of requests processed by the rest reverse proxy") 33 | .init() 34 | }; 35 | static ref UPSTREAM_CALLS: Counter = { 36 | global::meter(&METER_NAME) 37 | .u64_counter("rest.upstream_http_requests_total") 38 | .with_description("Amount of requests sent to discord") 39 | .init() 40 | }; 41 | static ref TICKET_CALLS: Counter = { 42 | global::meter(&METER_NAME) 43 | .u64_counter("rest.ticket_http_requests_total") 44 | .with_description("Amount of requests sent to the ratelimiter") 45 | .init() 46 | }; 47 | static ref HEADERS_SUBMIT_CALLS: Counter = { 48 | global::meter(&METER_NAME) 49 | .u64_counter("rest.header_submit_http_requests_total") 50 | .with_description("Amount of requests sent to the ratelimiter") 51 | .init() 52 | }; 53 | static ref UPSTREAM_TIMES: Histogram = { 54 | global::meter(&METER_NAME) 55 | .u64_histogram("rest.upstream_http_request_duration_miliseconds") 56 | .with_description("Time took to request discord") 57 | .init() 58 | }; 59 | static ref TICKET_TIMES: Histogram = { 60 | global::meter(&METER_NAME) 61 | .u64_histogram("rest.ticket_http_request_duration_miliseconds") 62 | .with_description("Time took to get a ticket from the ratelimiter") 63 | .init() 64 | }; 65 | static ref HEADERS_SUBMIT_TIMES: Histogram = { 66 | global::meter(&METER_NAME) 67 | .u64_histogram("rest.header_submit_http_request_duration_miliseconds") 68 | .with_description("Time took to get a ticket from the ratelimiter") 69 | .init() 70 | }; 71 | } 72 | 73 | /// Normalizes the path 74 | #[inline] 75 | fn normalize_path(request_path: &str) -> (&str, &str) { 76 | if let Some(trimmed_path) = request_path.strip_prefix("/api") { 77 | if let Some(maybe_api_version) = trimmed_path.split('/').nth(1) { 78 | if let Some(version_number) = maybe_api_version.strip_prefix('v') { 79 | if version_number.parse::().is_ok() { 80 | let len = "/api/v".len() + version_number.len(); 81 | return (&request_path[..len], &request_path[len..]); 82 | }; 83 | }; 84 | } 85 | 86 | ("/api", trimmed_path) 87 | } else { 88 | ("/api", request_path) 89 | } 90 | } 91 | const fn path_name(path: &Path) -> &'static str { 92 | match path { 93 | Path::ApplicationCommand(..) => "Application commands", 94 | Path::ApplicationCommandId(..) => "Application command", 95 | Path::ApplicationGuildCommand(..) => "Application commands in guild", 96 | Path::ApplicationGuildCommandId(..) => "Application command in guild", 97 | Path::ChannelsId(..) => "Channel", 98 | Path::ChannelsIdFollowers(..) => "Channel followers", 99 | Path::ChannelsIdInvites(..) => "Channel invite", 100 | Path::ChannelsIdMessages(..) | Path::ChannelsIdMessagesId(..) => "Channel message", 101 | Path::ChannelsIdMessagesBulkDelete(..) => "Bulk delete message", 102 | Path::ChannelsIdMessagesIdCrosspost(..) => "Crosspost message", 103 | Path::ChannelsIdMessagesIdReactions(..) => "Message reaction", 104 | Path::ChannelsIdMessagesIdReactionsUserIdType(..) => "Message reaction for user", 105 | Path::ChannelsIdMessagesIdThreads(..) => "Threads of a specific message", 106 | Path::ChannelsIdPermissionsOverwriteId(..) => "Channel permission override", 107 | Path::ChannelsIdPins(..) => "Channel pins", 108 | Path::ChannelsIdPinsMessageId(..) => "Specific channel pin", 109 | Path::ChannelsIdRecipients(..) => "Channel recipients", 110 | Path::ChannelsIdThreadMembers(..) => "Thread members", 111 | Path::ChannelsIdThreadMembersId(..) => "Thread member", 112 | Path::ChannelsIdThreads(..) => "Channel threads", 113 | Path::ChannelsIdTyping(..) => "Typing indicator", 114 | Path::ChannelsIdWebhooks(..) | Path::WebhooksId(..) | Path::WebhooksIdToken(..) => { 115 | "Webhook" 116 | } 117 | Path::Gateway => "Gateway", 118 | Path::GatewayBot => "Gateway bot info", 119 | Path::Guilds => "Guilds", 120 | Path::GuildsId(..) => "Guild", 121 | Path::GuildsIdAuditLogs(..) => "Guild audit logs", 122 | Path::GuildsIdAutoModerationRules(..) => "Guild automoderation rules", 123 | Path::GuildsIdAutoModerationRulesId(..) => "Guild automoderation rule", 124 | Path::GuildsIdBans(..) => "Guild bans", 125 | Path::GuildsIdBansId(..) => "Specific guild ban", 126 | Path::GuildsIdBansUserId(..) => "Guild ban for user", 127 | Path::GuildsIdChannels(..) => "Guild channel", 128 | Path::GuildsIdEmojis(..) => "Guild emoji", 129 | Path::GuildsIdEmojisId(..) => "Specific guild emoji", 130 | Path::GuildsIdIntegrations(..) => "Guild integrations", 131 | Path::GuildsIdIntegrationsId(..) => "Specific guild integration", 132 | Path::GuildsIdIntegrationsIdSync(..) => "Sync guild integration", 133 | Path::GuildsIdInvites(..) => "Guild invites", 134 | Path::GuildsIdMembers(..) => "Guild members", 135 | Path::GuildsIdMembersId(..) => "Specific guild member", 136 | Path::GuildsIdMembersIdRolesId(..) => "Guild member role", 137 | Path::GuildsIdMembersMeNick(..) => "Modify own nickname", 138 | Path::GuildsIdMembersSearch(..) => "Search guild members", 139 | Path::GuildsIdMfa(..) => "Guild MFA setting", 140 | Path::GuildsIdPreview(..) => "Guild preview", 141 | Path::GuildsIdPrune(..) => "Guild prune", 142 | Path::GuildsIdRegions(..) => "Guild region", 143 | Path::GuildsIdRoles(..) => "Guild roles", 144 | Path::GuildsIdRolesId(..) => "Specific guild role", 145 | Path::GuildsIdScheduledEvents(..) => "Scheduled events in guild", 146 | Path::GuildsIdScheduledEventsId(..) => "Scheduled event in guild", 147 | Path::GuildsIdScheduledEventsIdUsers(..) => "Users of a scheduled event", 148 | Path::GuildsIdStickers(..) => "Guild stickers", 149 | Path::GuildsIdTemplates(..) => "Guild templates", 150 | Path::GuildsIdTemplatesCode(..) | Path::GuildsTemplatesCode(..) => { 151 | "Specific guild template" 152 | } 153 | Path::GuildsIdThreads(..) => "Guild threads", 154 | Path::GuildsIdVanityUrl(..) => "Guild vanity invite", 155 | Path::GuildsIdVoiceStates(..) => "Guild voice states", 156 | Path::GuildsIdWebhooks(..) => "Guild webhooks", 157 | Path::GuildsIdWelcomeScreen(..) => "Guild welcome screen", 158 | Path::GuildsIdWidget(..) => "Guild widget", 159 | Path::InteractionCallback(..) => "Interaction callback", 160 | Path::InvitesCode => "Invite info", 161 | Path::OauthApplicationsMe => "Current application info", 162 | Path::StageInstances => "Stage instances", 163 | Path::StickerPacks => "Sticker packs", 164 | Path::Stickers => "Stickers", 165 | Path::UsersId => "User info", 166 | Path::UsersIdChannels => "User channels", 167 | Path::UsersIdConnections => "User connections", 168 | Path::UsersIdGuilds => "User in guild", 169 | Path::UsersIdGuildsId => "Guild from user", 170 | Path::UsersIdGuildsIdMember => "Member of a guild", 171 | Path::VoiceRegions => "Voice region list", 172 | Path::WebhooksIdTokenMessagesId(..) => "Specific webhook message", 173 | _ => "Unknown path!", 174 | } 175 | } 176 | 177 | #[inline] 178 | #[allow(clippy::too_many_lines)] 179 | pub async fn handle_request( 180 | client: Client, Body>, 181 | ratelimiter: Arc, 182 | config: ReverseProxy, 183 | token: String, 184 | mut request: Request, 185 | ) -> Result, anyhow::Error> { 186 | let cx = OpenTelemetryContext::current(); 187 | 188 | let (bucket, uri_string, name) = { 189 | let method = match *request.method() { 190 | HttpMethod::DELETE => Method::Delete, 191 | HttpMethod::GET => Method::Get, 192 | HttpMethod::PATCH => Method::Patch, 193 | HttpMethod::POST => Method::Post, 194 | HttpMethod::PUT => Method::Put, 195 | _ => { 196 | error!(method =? request.method(), "unsupported HTTP method in request"); 197 | bail!("unsupported method"); 198 | } 199 | }; 200 | let request_path = request.uri().path(); 201 | let (api_path, trimmed_path) = normalize_path(request_path); 202 | trace!("normalized path to {trimmed_path}"); 203 | 204 | let mut uri_string = format!( 205 | "{}{api_path}{trimmed_path}", 206 | config.upstream.expect("no upstream") 207 | ); 208 | if let Some(query) = request.uri().query() { 209 | uri_string.push('?'); 210 | uri_string.push_str(query); 211 | } 212 | 213 | trace!("full request uri is {uri_string}"); 214 | 215 | let mut hash = DefaultHasher::new(); 216 | let path = match Path::try_from((method, trimmed_path)) { 217 | Ok(path) => path, 218 | Err(e) => { 219 | error!( 220 | "Failed to parse path for {:?} {}: {:?}", 221 | method, trimmed_path, e 222 | ); 223 | bail!("failed to parse"); 224 | } 225 | }; 226 | path.hash(&mut hash); 227 | let bucket = hash.finish().to_string(); 228 | trace!("Request bucket is {}", bucket); 229 | 230 | (bucket, uri_string, path_name(&path)) 231 | }; 232 | 233 | REQUESTS.add(&cx, 1, &[KeyValue::new("bucket", name)]); 234 | 235 | let ticket_start = SystemTime::now(); 236 | TICKET_CALLS.add(&cx, 1, &[KeyValue::new("bucket", name)]); 237 | // waits for the request to be authorized 238 | match ratelimiter 239 | .ticket(bucket.clone()) 240 | .instrument(debug_span!("ticket validation request")) 241 | .await 242 | { 243 | Ok(_) => { 244 | #[allow(clippy::cast_possible_truncation)] 245 | TICKET_TIMES.record( 246 | &cx, 247 | ticket_start.elapsed()?.as_millis() as u64, 248 | &[KeyValue::new("bucket", name)], 249 | ); 250 | } 251 | Err(e) => { 252 | error!("Error when requesting the ratelimiter: {:?}", e); 253 | bail!("failed to request the ratelimiter"); 254 | } 255 | } 256 | 257 | request 258 | .headers_mut() 259 | .insert(HOST, HeaderValue::from_static("discord.com")); 260 | 261 | // Remove forbidden HTTP/2 headers 262 | // https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2 263 | request.headers_mut().remove(CONNECTION); 264 | request.headers_mut().remove("keep-alive"); 265 | request.headers_mut().remove("proxy-connection"); 266 | request.headers_mut().remove(TRANSFER_ENCODING); 267 | request.headers_mut().remove(UPGRADE); 268 | 269 | if let Some(auth) = request.headers_mut().get_mut(AUTHORIZATION) { 270 | if auth 271 | .to_str() 272 | .expect("Failed to check header") 273 | .starts_with("Bot") 274 | { 275 | *auth = HeaderValue::from_str(&format!("Bot {token}"))?; 276 | } 277 | } else { 278 | request.headers_mut().insert( 279 | AUTHORIZATION, 280 | HeaderValue::from_str(&format!("Bot {token}"))?, 281 | ); 282 | } 283 | 284 | let uri = match Uri::from_str(&uri_string) { 285 | Ok(uri) => uri, 286 | Err(e) => { 287 | error!("Failed to create URI for requesting Discord API: {:?}", e); 288 | bail!("failed to create uri"); 289 | } 290 | }; 291 | *request.uri_mut() = uri; 292 | let span = debug_span!("upstream request to discord"); 293 | let upstream_start = SystemTime::now(); 294 | UPSTREAM_CALLS.add(&cx, 1, &[KeyValue::new("bucket", name)]); 295 | let resp = match client.request(request).instrument(span).await { 296 | Ok(response) => { 297 | #[allow(clippy::cast_possible_truncation)] 298 | UPSTREAM_TIMES.record( 299 | &cx, 300 | upstream_start.elapsed()?.as_millis() as u64, 301 | &[KeyValue::new("bucket", name)], 302 | ); 303 | response 304 | } 305 | Err(e) => { 306 | error!("Error when requesting the Discord API: {:?}", e); 307 | bail!("failed to request the discord api"); 308 | } 309 | }; 310 | 311 | let headers = resp 312 | .headers() 313 | .into_iter() 314 | .map(|(k, v)| { 315 | ( 316 | k.to_string(), 317 | v.to_str().map(std::string::ToString::to_string), 318 | ) 319 | }) 320 | .filter(|f| f.1.is_ok()) 321 | .map(|f| (f.0, f.1.expect("errors should be filtered"))) 322 | .collect(); 323 | 324 | let headers_start = SystemTime::now(); 325 | HEADERS_SUBMIT_CALLS.add(&cx, 1, &[KeyValue::new("bucket", name)]); 326 | ratelimiter 327 | .submit_headers(bucket.clone(), headers) 328 | .instrument(info_span!("submitting headers")) 329 | .await?; 330 | #[allow(clippy::cast_possible_truncation)] 331 | HEADERS_SUBMIT_TIMES.record( 332 | &cx, 333 | headers_start.elapsed()?.as_millis() as u64, 334 | &[KeyValue::new("bucket", name)], 335 | ); 336 | 337 | Ok(resp) 338 | } 339 | -------------------------------------------------------------------------------- /exes/rest/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | clippy::all, 3 | clippy::correctness, 4 | clippy::suspicious, 5 | clippy::style, 6 | clippy::complexity, 7 | clippy::perf, 8 | clippy::pedantic, 9 | clippy::nursery, 10 | unsafe_code 11 | )] 12 | 13 | use config::ReverseProxy; 14 | 15 | use handler::handle_request; 16 | use hyper::{ 17 | server::conn::AddrStream, 18 | service::{make_service_fn, service_fn}, 19 | Body, Client, Request, Server, 20 | }; 21 | use leash::{AnyhowResultFuture, Component}; 22 | use opentelemetry::global; 23 | use opentelemetry_http::HeaderExtractor; 24 | use shared::config::Settings; 25 | use std::{convert::Infallible, sync::Arc}; 26 | use tokio::sync::oneshot; 27 | use tracing_opentelemetry::OpenTelemetrySpanExt; 28 | 29 | mod config; 30 | mod handler; 31 | mod ratelimit_client; 32 | 33 | pub struct ReverseProxyServer {} 34 | impl Component for ReverseProxyServer { 35 | type Config = ReverseProxy; 36 | const SERVICE_NAME: &'static str = "rest"; 37 | 38 | fn start( 39 | &self, 40 | settings: Settings, 41 | stop: oneshot::Receiver<()>, 42 | ) -> AnyhowResultFuture<()> { 43 | Box::pin(async move { 44 | // Client to the remote ratelimiters 45 | let ratelimiter = Arc::new(ratelimit_client::RemoteRatelimiter::new( 46 | settings.config.clone(), 47 | )); 48 | let https = hyper_rustls::HttpsConnectorBuilder::new() 49 | .with_native_roots() 50 | .https_or_http() 51 | .enable_http1() 52 | .build(); 53 | 54 | let client: Client<_, hyper::Body> = Client::builder().build(https); 55 | let token = settings.config.discord.token.clone(); 56 | let config = settings.config.clone(); 57 | let service_fn = make_service_fn(move |_: &AddrStream| { 58 | let client = client.clone(); 59 | let ratelimiter = ratelimiter.clone(); 60 | let token = token.clone(); 61 | let config = config.clone(); 62 | async move { 63 | Ok::<_, Infallible>(service_fn(move |request: Request| { 64 | let token = token.clone(); 65 | let parent_cx = global::get_text_map_propagator(|propagator| { 66 | propagator.extract(&HeaderExtractor(request.headers())) 67 | }); 68 | 69 | let span = tracing::span!(tracing::Level::INFO, "request process"); 70 | span.set_parent(parent_cx); 71 | 72 | let client = client.clone(); 73 | let ratelimiter = ratelimiter.clone(); 74 | 75 | let config = config.clone(); 76 | async move { 77 | let token = token.clone(); 78 | let ratelimiter = ratelimiter.clone(); 79 | handle_request(client, ratelimiter, config, token, request).await 80 | } 81 | })) 82 | } 83 | }); 84 | 85 | let server = Server::bind(&settings.config.server.listening_adress) 86 | .http1_only(true) 87 | .serve(service_fn); 88 | 89 | server 90 | .with_graceful_shutdown(async { 91 | stop.await.expect("should not fail"); 92 | }) 93 | .await?; 94 | 95 | Ok(()) 96 | }) 97 | } 98 | 99 | fn new() -> Self { 100 | Self {} 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /exes/rest/src/main.rs: -------------------------------------------------------------------------------- 1 | use leash::ignite; 2 | use rest::ReverseProxyServer; 3 | 4 | ignite!(ReverseProxyServer); 5 | -------------------------------------------------------------------------------- /exes/rest/src/ratelimit_client/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::config::ReverseProxy; 2 | 3 | use self::remote_hashring::{HashRingWrapper, MetadataMap, VNode}; 4 | use anyhow::anyhow; 5 | use opentelemetry::global; 6 | use proto::nova::ratelimit::ratelimiter::{BucketSubmitTicketRequest, HeadersSubmitRequest}; 7 | use std::collections::HashMap; 8 | use std::fmt::Debug; 9 | use std::future::Future; 10 | use std::pin::Pin; 11 | use std::sync::Arc; 12 | use std::time::{Duration, SystemTime}; 13 | use tokio::sync::{broadcast, RwLock}; 14 | use tonic::Request; 15 | use tracing::{debug, error, info_span, instrument, trace_span, Instrument, Span}; 16 | use tracing_opentelemetry::OpenTelemetrySpanExt; 17 | 18 | mod remote_hashring; 19 | 20 | #[derive(Clone, Debug)] 21 | pub struct RemoteRatelimiter { 22 | remotes: Arc>, 23 | current_remotes: Vec, 24 | 25 | stop: Arc>, 26 | config: ReverseProxy, 27 | } 28 | 29 | impl Drop for RemoteRatelimiter { 30 | fn drop(&mut self) { 31 | let _ = self 32 | .stop 33 | .clone() 34 | .send(()) 35 | .map_err(|_| error!("ratelimiter was already stopped")); 36 | } 37 | } 38 | 39 | impl RemoteRatelimiter { 40 | async fn get_ratelimiters(&self) -> Result<(), anyhow::Error> { 41 | // get list of dns responses 42 | let responses: Vec = dns_lookup::lookup_host(&self.config.ratelimiter_address)? 43 | .into_iter() 44 | .filter(std::net::IpAddr::is_ipv4) 45 | .map(|address| address.to_string()) 46 | .collect(); 47 | 48 | let mut write = self.remotes.write().await; 49 | 50 | for ip in &responses { 51 | if !self.current_remotes.contains(ip) { 52 | let a = VNode::new(ip.clone(), self.config.ratelimiter_port).await?; 53 | write.add(a.clone()); 54 | } 55 | } 56 | 57 | Ok(()) 58 | } 59 | 60 | #[must_use] 61 | pub fn new(config: ReverseProxy) -> Self { 62 | let (rx, mut tx) = broadcast::channel(1); 63 | let obj = Self { 64 | remotes: Arc::new(RwLock::new(HashRingWrapper::default())), 65 | stop: Arc::new(rx), 66 | config, 67 | current_remotes: vec![], 68 | }; 69 | 70 | let obj_clone = obj.clone(); 71 | // Task to update the ratelimiters in the background 72 | tokio::spawn(async move { 73 | loop { 74 | debug!("refreshing"); 75 | 76 | match obj_clone.get_ratelimiters().await { 77 | Ok(_) => { 78 | debug!("refreshed ratelimiting servers"); 79 | } 80 | Err(err) => { 81 | error!("refreshing ratelimiting servers failed {}", err); 82 | } 83 | } 84 | 85 | let sleep = tokio::time::sleep(Duration::from_secs(5)); 86 | tokio::pin!(sleep); 87 | tokio::select! { 88 | () = &mut sleep => { 89 | debug!("timer elapsed"); 90 | }, 91 | _ = tx.recv() => {} 92 | } 93 | } 94 | }); 95 | 96 | obj 97 | } 98 | 99 | #[instrument(name = "ticket task")] 100 | pub fn ticket( 101 | &self, 102 | path: String, 103 | ) -> Pin> + Send + 'static>> { 104 | let remotes = self.remotes.clone(); 105 | Box::pin( 106 | async move { 107 | // Getting the node managing this path 108 | let mut node = remotes 109 | .write() 110 | .instrument(trace_span!("acquiring ring lock")) 111 | .await 112 | .get(&path) 113 | .cloned() 114 | .ok_or_else(|| { 115 | anyhow!( 116 | "did not compute ratelimit because no ratelimiter nodes are detected" 117 | ) 118 | })?; 119 | 120 | // Initialize span for tracing (headers injection) 121 | let span = info_span!("remote request"); 122 | let context = span.context(); 123 | let mut request = Request::new(BucketSubmitTicketRequest { path }); 124 | global::get_text_map_propagator(|propagator| { 125 | propagator.inject_context(&context, &mut MetadataMap(request.metadata_mut())); 126 | }); 127 | 128 | // Requesting 129 | node.submit_ticket(request) 130 | .instrument(info_span!("waiting for ticket response")) 131 | .await?; 132 | 133 | Ok(()) 134 | } 135 | .instrument(Span::current()), 136 | ) 137 | } 138 | 139 | pub fn submit_headers( 140 | &self, 141 | path: String, 142 | headers: HashMap, 143 | ) -> Pin> + Send + 'static>> { 144 | let remotes = self.remotes.clone(); 145 | Box::pin(async move { 146 | let mut node = remotes 147 | .write() 148 | .instrument(trace_span!("acquiring ring lock")) 149 | .await 150 | .get(&path) 151 | .cloned() 152 | .ok_or_else(|| { 153 | anyhow!("did not compute ratelimit because no ratelimiter nodes are detected") 154 | })?; 155 | 156 | let span = info_span!("remote request"); 157 | let context = span.context(); 158 | let time = SystemTime::now() 159 | .duration_since(SystemTime::UNIX_EPOCH)? 160 | .as_millis(); 161 | // truncation is expected 162 | #[allow(clippy::cast_possible_truncation)] 163 | let mut request = Request::new(HeadersSubmitRequest { 164 | path, 165 | precise_time: time as u64, 166 | headers, 167 | }); 168 | global::get_text_map_propagator(|propagator| { 169 | propagator.inject_context(&context, &mut MetadataMap(request.metadata_mut())); 170 | }); 171 | 172 | node.submit_headers(request).await?; 173 | 174 | Ok(()) 175 | }) 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /exes/rest/src/ratelimit_client/remote_hashring.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::Debug; 2 | use opentelemetry::propagation::Injector; 3 | use proto::nova::ratelimit::ratelimiter::ratelimiter_client::RatelimiterClient; 4 | use std::convert::TryFrom; 5 | use std::hash::Hash; 6 | use std::ops::Deref; 7 | use std::ops::DerefMut; 8 | use tonic::transport::Channel; 9 | use tracing::debug; 10 | 11 | #[derive(Debug, Clone)] 12 | pub struct VNode { 13 | address: String, 14 | 15 | client: RatelimiterClient, 16 | } 17 | 18 | impl Deref for VNode { 19 | type Target = RatelimiterClient; 20 | 21 | fn deref(&self) -> &Self::Target { 22 | &self.client 23 | } 24 | } 25 | 26 | impl DerefMut for VNode { 27 | fn deref_mut(&mut self) -> &mut Self::Target { 28 | &mut self.client 29 | } 30 | } 31 | 32 | impl Hash for VNode { 33 | fn hash(&self, state: &mut H) { 34 | self.address.hash(state); 35 | } 36 | } 37 | 38 | pub struct MetadataMap<'a>(pub &'a mut tonic::metadata::MetadataMap); 39 | 40 | impl<'a> Injector for MetadataMap<'a> { 41 | /// Set a key and value in the `MetadataMap`. Does nothing if the key or value are not valid inputs 42 | fn set(&mut self, key: &str, value: String) { 43 | if let Ok(key) = tonic::metadata::MetadataKey::from_bytes(key.as_bytes()) { 44 | if let Ok(val) = tonic::metadata::MetadataValue::try_from(&value) { 45 | self.0.insert(key, val); 46 | } 47 | } 48 | } 49 | } 50 | 51 | impl VNode { 52 | pub async fn new(address: String, port: u16) -> Result { 53 | let host = format!("http://{}:{port}", address.clone()); 54 | debug!("connecting to {}", host); 55 | let client = RatelimiterClient::connect(host).await?; 56 | 57 | Ok(Self { address, client }) 58 | } 59 | } 60 | 61 | #[repr(transparent)] 62 | #[derive(Default)] 63 | pub struct HashRingWrapper(hashring::HashRing); 64 | 65 | impl Deref for HashRingWrapper { 66 | type Target = hashring::HashRing; 67 | 68 | fn deref(&self) -> &Self::Target { 69 | &self.0 70 | } 71 | } 72 | 73 | impl DerefMut for HashRingWrapper { 74 | fn deref_mut(&mut self) -> &mut Self::Target { 75 | &mut self.0 76 | } 77 | } 78 | 79 | impl Debug for HashRingWrapper { 80 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 81 | f.debug_tuple("HashRing").finish() 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /exes/webhook/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "webhook" 3 | version = "0.1.0" 4 | edition = "2018" 5 | 6 | [dependencies] 7 | tokio = { workspace = true } 8 | shared = { workspace = true } 9 | proto = { workspace = true } 10 | leash = { workspace = true } 11 | tracing = { workspace = true } 12 | serde = { workspace = true } 13 | serde_json = { workspace = true } 14 | hyper = { workspace = true } 15 | anyhow = { workspace = true } 16 | 17 | hex = "0.4.3" 18 | ed25519-dalek = "1" 19 | twilight-model = "0.15.2" 20 | 21 | async-nats = "0.29.0" 22 | -------------------------------------------------------------------------------- /exes/webhook/src/config.rs: -------------------------------------------------------------------------------- 1 | use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; 2 | 3 | use ed25519_dalek::PublicKey; 4 | use serde::{Deserialize, Deserializer}; 5 | 6 | fn default_listening_address() -> SocketAddr { 7 | SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 8091)) 8 | } 9 | 10 | #[derive(Debug, Deserialize, Clone, Copy)] 11 | pub struct ServerSettings { 12 | pub listening_adress: SocketAddr, 13 | } 14 | impl Default for ServerSettings { 15 | fn default() -> Self { 16 | Self { 17 | listening_adress: default_listening_address(), 18 | } 19 | } 20 | } 21 | 22 | fn deserialize_pk<'de, D>(deserializer: D) -> Result 23 | where 24 | D: Deserializer<'de>, 25 | { 26 | let str = String::deserialize(deserializer)?; 27 | let public_key = PublicKey::from_bytes(&hex::decode(str).unwrap()).unwrap(); 28 | Ok(public_key) 29 | } 30 | 31 | #[derive(Debug, Deserialize, Clone, Default, Copy)] 32 | pub struct Discord { 33 | #[serde(deserialize_with = "deserialize_pk")] 34 | pub public_key: PublicKey, 35 | } 36 | 37 | #[derive(Debug, Deserialize, Clone, Default, Copy)] 38 | pub struct Webhook { 39 | pub server: ServerSettings, 40 | pub discord: Discord, 41 | } 42 | -------------------------------------------------------------------------------- /exes/webhook/src/handler/make_service.rs: -------------------------------------------------------------------------------- 1 | use hyper::service::Service; 2 | use std::{ 3 | future::{ready, Ready}, 4 | task::{Context, Poll}, 5 | }; 6 | 7 | pub struct MakeSvc { 8 | pub service: T, 9 | } 10 | 11 | impl Service for MakeSvc { 12 | type Response = V; 13 | type Error = std::io::Error; 14 | type Future = Ready>; 15 | 16 | fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { 17 | Ok(()).into() 18 | } 19 | 20 | fn call(&mut self, _: T) -> Self::Future { 21 | ready(Ok(self.service.clone())) 22 | } 23 | } 24 | 25 | impl MakeSvc { 26 | pub const fn new(service: T) -> Self { 27 | Self { service } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /exes/webhook/src/handler/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::config::Webhook; 2 | use anyhow::bail; 3 | use async_nats::Client; 4 | use ed25519_dalek::PublicKey; 5 | use hyper::{ 6 | body::{to_bytes, Bytes}, 7 | service::Service, 8 | Body, Method, Request, Response, 9 | }; 10 | use shared::payloads::{CachePayload, DispatchEventTagged}; 11 | use signature::validate; 12 | use std::{ 13 | future::Future, 14 | pin::Pin, 15 | task::{Context, Poll}, 16 | }; 17 | use tracing::{debug, error}; 18 | use twilight_model::gateway::event::DispatchEvent; 19 | use twilight_model::{ 20 | application::interaction::{Interaction, InteractionType}, 21 | gateway::payload::incoming::InteractionCreate, 22 | }; 23 | 24 | pub mod make_service; 25 | mod signature; 26 | 27 | #[cfg(test)] 28 | pub mod tests; 29 | 30 | /// Hyper service used to handle the discord webhooks 31 | #[derive(Clone)] 32 | pub struct WebhookService { 33 | pub config: Webhook, 34 | pub nats: Client, 35 | } 36 | 37 | impl WebhookService { 38 | async fn check_request(req: Request, pk: PublicKey) -> Result { 39 | if req.method() == Method::POST { 40 | let signature = if let Some(sig) = req.headers().get("X-Signature-Ed25519") { 41 | sig.clone() 42 | } else { 43 | bail!("Missing signature header"); 44 | }; 45 | 46 | let timestamp = if let Some(timestamp) = req.headers().get("X-Signature-Timestamp") { 47 | timestamp.clone() 48 | } else { 49 | bail!("Missing timestamp header"); 50 | }; 51 | let data = to_bytes(req.into_body()).await?; 52 | 53 | if validate( 54 | &pk, 55 | &[timestamp.as_bytes().to_vec(), data.to_vec()].concat(), 56 | signature.to_str()?, 57 | ) { 58 | Ok(data) 59 | } else { 60 | bail!("invalid signature"); 61 | } 62 | } else { 63 | bail!("not found"); 64 | } 65 | } 66 | 67 | async fn process_request( 68 | req: Request, 69 | nats: Client, 70 | pk: PublicKey, 71 | ) -> Result, anyhow::Error> { 72 | let data = Self::check_request(req, pk).await?; 73 | let interaction: Interaction = serde_json::from_slice(&data)?; 74 | 75 | if interaction.kind == InteractionType::Ping { 76 | Ok(Response::builder() 77 | .header("Content-Type", "application/json") 78 | .body(r#"{"type":1}"#.into()) 79 | .unwrap()) 80 | } else { 81 | debug!("calling nats"); 82 | // this should hopefully not fail ? 83 | 84 | let data = CachePayload { 85 | data: DispatchEventTagged(DispatchEvent::InteractionCreate(Box::new( 86 | InteractionCreate(interaction), 87 | ))), 88 | }; 89 | 90 | let payload = serde_json::to_string(&data).unwrap(); 91 | 92 | match nats 93 | .request( 94 | "nova.cache.dispatch.INTERACTION_CREATE".to_string(), 95 | Bytes::from(payload), 96 | ) 97 | .await 98 | { 99 | Ok(response) => Ok(Response::builder() 100 | .header("Content-Type", "application/json") 101 | .body(Body::from(response.payload)) 102 | .unwrap()), 103 | 104 | Err(error) => { 105 | error!("failed to request nats: {}", error); 106 | Err(anyhow::anyhow!("internal error")) 107 | } 108 | } 109 | } 110 | } 111 | } 112 | 113 | /// Implementation of the service 114 | impl Service> for WebhookService { 115 | type Response = hyper::Response; 116 | type Error = anyhow::Error; 117 | type Future = Pin> + Send>>; 118 | 119 | fn poll_ready(&mut self, _: &mut Context) -> Poll> { 120 | Poll::Ready(Ok(())) 121 | } 122 | 123 | fn call(&mut self, req: Request) -> Self::Future { 124 | let future = Self::process_request(req, self.nats.clone(), self.config.discord.public_key); 125 | Box::pin(async move { 126 | let response = future.await; 127 | 128 | match response { 129 | Ok(r) => Ok(r), 130 | Err(e) => Err(e), 131 | } 132 | }) 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /exes/webhook/src/handler/signature.rs: -------------------------------------------------------------------------------- 1 | use ed25519_dalek::{PublicKey, Signature, Verifier}; 2 | 3 | #[inline] 4 | pub fn validate(public_key: &PublicKey, data: &[u8], hex_signature: &str) -> bool { 5 | let mut slice: [u8; Signature::BYTE_SIZE] = [0; Signature::BYTE_SIZE]; 6 | let signature_result = hex::decode_to_slice(hex_signature, &mut slice); 7 | 8 | if signature_result.is_ok() { 9 | public_key.verify(data, &Signature::from(slice)).is_ok() 10 | } else { 11 | false 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /exes/webhook/src/handler/tests/handler.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /exes/webhook/src/handler/tests/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod handler; 2 | pub mod signature; 3 | -------------------------------------------------------------------------------- /exes/webhook/src/handler/tests/signature.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::signature::validate; 2 | use ed25519_dalek::PublicKey; 3 | 4 | #[test] 5 | fn validate_signature_test() { 6 | let signature = "543ec3547d57f9ddb1ec4c5c36503ebf288ffda3da3d510764c9a49c2abb57690ef974c63d174771bdd2481de1066966f57abbec12a3ec171b9f6e2373837002"; 7 | let content = b"message de test incroyable"; 8 | let public_key = PublicKey::from_bytes( 9 | &hex::decode("eefe0c24473737cb2035232e3b4eb91c206f0a14684168f3503f7d8316058d6f").unwrap(), 10 | ) 11 | .unwrap(); 12 | 13 | assert!(validate(&public_key, content, signature)); 14 | } 15 | 16 | #[test] 17 | fn validate_signature_reverse_test() { 18 | let signature = "543ec3547d57f9ddb1ec4c5c36503ebf288ffda3da3d510764c9a49c2abb57690ef974c63d174771bdd2481de1066966f57abbec12a3ec171b9f6e2373837002"; 19 | let public_key = PublicKey::from_bytes( 20 | &hex::decode("c029eea18437292c87c62aec34e7d1bd4e38fe6126f3f7c446de6375dc666044").unwrap(), 21 | ) 22 | .unwrap(); 23 | 24 | let content = b"ceci est un test qui ne fonctionnera pas!"; 25 | assert!(!validate(&public_key, content, signature)); 26 | } 27 | 28 | #[test] 29 | fn invalid_hex() { 30 | let signature = "zzz"; 31 | let public_key = PublicKey::from_bytes( 32 | &hex::decode("c029eea18437292c87c62aec34e7d1bd4e38fe6126f3f7c446de6375dc666044").unwrap(), 33 | ) 34 | .unwrap(); 35 | 36 | let content = b"ceci est un test qui ne fonctionnera pas!"; 37 | assert!(!validate(&public_key, content, signature)); 38 | } 39 | -------------------------------------------------------------------------------- /exes/webhook/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | clippy::all, 3 | clippy::correctness, 4 | clippy::suspicious, 5 | clippy::style, 6 | clippy::complexity, 7 | clippy::perf, 8 | clippy::pedantic, 9 | clippy::nursery, 10 | unsafe_code 11 | )] 12 | 13 | mod config; 14 | mod handler; 15 | use std::{future::Future, pin::Pin}; 16 | 17 | use crate::{ 18 | config::Webhook, 19 | handler::{make_service::MakeSvc, WebhookService}, 20 | }; 21 | use async_nats::Client; 22 | use hyper::Server; 23 | use leash::{AnyhowResultFuture, Component}; 24 | use shared::config::Settings; 25 | use tokio::sync::oneshot; 26 | use tracing::info; 27 | #[derive(Clone, Copy)] 28 | pub struct WebhookServer {} 29 | 30 | impl Component for WebhookServer { 31 | type Config = Webhook; 32 | const SERVICE_NAME: &'static str = "webhook"; 33 | 34 | fn start( 35 | &self, 36 | settings: Settings, 37 | stop: oneshot::Receiver<()>, 38 | ) -> AnyhowResultFuture<()> { 39 | Box::pin(async move { 40 | info!("Starting server on {}", settings.server.listening_adress); 41 | 42 | let bind = settings.server.listening_adress; 43 | info!("Nats connected!"); 44 | let nats = Into::> + Send>>>::into( 45 | settings.nats, 46 | ) 47 | .await?; 48 | 49 | let make_service = MakeSvc::new(WebhookService { 50 | config: settings.config, 51 | nats: nats.clone(), 52 | }); 53 | 54 | let server = Server::bind(&bind).serve(make_service); 55 | 56 | server 57 | .with_graceful_shutdown(async { 58 | stop.await.expect("should not fail"); 59 | }) 60 | .await?; 61 | 62 | Ok(()) 63 | }) 64 | } 65 | 66 | fn new() -> Self { 67 | Self {} 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /exes/webhook/src/main.rs: -------------------------------------------------------------------------------- 1 | use leash::ignite; 2 | use webhook::WebhookServer; 3 | 4 | ignite!(WebhookServer); 5 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/discordnova/nova 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/Jeffail/gabs v1.4.0 7 | github.com/alicebob/miniredis/v2 v2.23.1 8 | github.com/nats-io/nats-server/v2 v2.9.10 9 | github.com/yuin/gopher-lua v1.0.0 // indirect 10 | golang.org/x/crypto v0.0.0-20221010152910-d6f0a8c073c2 // indirect 11 | golang.org/x/sys v0.0.0-20221010170243-090e33056c14 // indirect 12 | ) 13 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= 2 | github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= 3 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= 4 | github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= 5 | github.com/alicebob/miniredis/v2 v2.23.1 h1:jR6wZggBxwWygeXcdNyguCOCIjPsZyNUNlAkTx2fu0U= 6 | github.com/alicebob/miniredis/v2 v2.23.1/go.mod h1:84TWKZlxYkfgMucPBf5SOQBYJceZeQRFIaQgNMiCX6Q= 7 | github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= 8 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= 9 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= 10 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 11 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 12 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 13 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 14 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 15 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 16 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 17 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 18 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 19 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 20 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 21 | github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= 22 | github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= 23 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 24 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 25 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 26 | github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= 27 | github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= 28 | github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI= 29 | github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= 30 | github.com/nats-io/nats-server/v2 v2.9.10 h1:LMC46Oi9E6BUx/xBsaCVZgofliAqKQzRPU6eKWkN8jE= 31 | github.com/nats-io/nats-server/v2 v2.9.10/go.mod h1:AB6hAnGZDlYfqb7CTAm66ZKMZy9DpfierY1/PbpvI2g= 32 | github.com/nats-io/nats.go v1.19.0 h1:H6j8aBnTQFoVrTGB6Xjd903UMdE7jz6DS4YkmAqgZ9Q= 33 | github.com/nats-io/nats.go v1.19.0/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA= 34 | github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= 35 | github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= 36 | github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= 37 | github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= 38 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 39 | github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= 40 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 41 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 42 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 43 | github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= 44 | github.com/yuin/gopher-lua v1.0.0 h1:pQCf0LN67Kf7M5u7vRd40A8M1I8IMLrxlqngUJgZ0Ow= 45 | github.com/yuin/gopher-lua v1.0.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= 46 | go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= 47 | golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= 48 | golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= 49 | golang.org/x/crypto v0.0.0-20221010152910-d6f0a8c073c2 h1:x8vtB3zMecnlqZIwJNUUpwYKYSqCz5jXbiyv0ZJJZeI= 50 | golang.org/x/crypto v0.0.0-20221010152910-d6f0a8c073c2/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= 51 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 52 | golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 53 | golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 54 | golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 55 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 56 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 57 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 58 | golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 59 | golang.org/x/sys v0.0.0-20221010170243-090e33056c14 h1:k5II8e6QD8mITdi+okbbmR/cIyEbeXLBhy5Ha4nevyc= 60 | golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 61 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 62 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 63 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 64 | golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= 65 | golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 66 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 67 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 68 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 69 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 70 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 71 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 72 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 73 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 74 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 75 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 76 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 77 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 78 | -------------------------------------------------------------------------------- /internal/pkg/all-in-one/all-in-one.go: -------------------------------------------------------------------------------- 1 | package allinone 2 | 3 | /* 4 | #cgo LDFLAGS: -L../../../build/lib -lall_in_one -lz -lm 5 | #include "./all_in_one.h" 6 | #include "./error_handler.h" 7 | */ 8 | import "C" 9 | import ( 10 | "fmt" 11 | "time" 12 | 13 | "github.com/Jeffail/gabs" 14 | "github.com/alicebob/miniredis/v2" 15 | "github.com/nats-io/nats-server/v2/server" 16 | ) 17 | 18 | type AllInOne struct { 19 | redis *miniredis.Miniredis 20 | nats *server.Server 21 | instance *C.AllInOneInstance 22 | } 23 | 24 | func NewAllInOne() (*AllInOne, error) { 25 | redis := miniredis.NewMiniRedis() 26 | nats, err := server.NewServer(&server.Options{}) 27 | 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | return &AllInOne{ 33 | redis: redis, 34 | nats: nats, 35 | }, nil 36 | } 37 | 38 | func (s *AllInOne) Start() error { 39 | err := s.redis.Start() 40 | if err != nil { 41 | return err 42 | } 43 | 44 | go s.nats.Start() 45 | 46 | if !s.nats.ReadyForConnections(5 * time.Second) { 47 | return fmt.Errorf("nats server didn't start after 5 seconds, please check if there is another service listening on the same port as nats") 48 | } 49 | 50 | handler := C.ErrorHandler(C.allInOneErrorHandler) 51 | // Set the error handler 52 | C.set_error_handler(handler) 53 | config := C.GoString(C.load_config()) 54 | 55 | json, _ := gabs.ParseJSON([]byte(config)) 56 | json.Set(fmt.Sprintf("redis://%s", s.redis.Addr()), "redis", "url") 57 | json.Set("localhost", "nats", "host") 58 | json.Set(1, "webhook", "discord", "client_id") 59 | 60 | a := "" 61 | a += ("Starting nova All-in-one!\n") 62 | a += fmt.Sprintf(" * Rest proxy running on : http://%s\n", json.Path("rest.server.listening_adress").Data().(string)) 63 | a += fmt.Sprintf(" * Webhook server running on : http://%s\n", json.Path("webhook.server.listening_adress").Data().(string)) 64 | a += fmt.Sprintf(" * Ratelimiter server running on : grpc://%s\n", json.Path("ratelimiter.server.listening_adress").Data().(string)) 65 | a += (" * The gateway server should be running\n") 66 | a += (" * The cache server should be running\n") 67 | a += (" * Servers\n") 68 | a += fmt.Sprintf(" * Running MiniREDIS on %s\n", s.redis.Addr()) 69 | a += fmt.Sprintf(" * Running NATS on %s\n", s.nats.ClientURL()) 70 | s.instance = C.create_instance(C.CString(json.String())) 71 | 72 | print(a) 73 | 74 | return nil 75 | } 76 | 77 | func (s *AllInOne) Stop() { 78 | C.stop_instance(s.instance) 79 | } 80 | -------------------------------------------------------------------------------- /internal/pkg/all-in-one/all_in_one.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | /** 7 | * Represents a all in one instance 8 | */ 9 | typedef struct AllInOneInstance AllInOneInstance; 10 | 11 | void set_error_handler(void (*func)(int, char*)); 12 | 13 | /** 14 | * Loads the config json using the nova shared config loader 15 | */ 16 | char *load_config(void); 17 | 18 | void stop_instance(struct AllInOneInstance *instance); 19 | 20 | /** 21 | * # Panics 22 | * Panics if an incorrect `RUST_LOG` variables is specified. 23 | */ 24 | struct AllInOneInstance *create_instance(char *config); 25 | -------------------------------------------------------------------------------- /internal/pkg/all-in-one/error_handler.h: -------------------------------------------------------------------------------- 1 | extern void goErrorHandler(int, char*); 2 | 3 | typedef void (*ErrorHandler)(int, char*); 4 | 5 | void allInOneErrorHandler(int size, char* string) { 6 | goErrorHandler(size, string); 7 | } -------------------------------------------------------------------------------- /internal/pkg/all-in-one/handler.go: -------------------------------------------------------------------------------- 1 | package allinone 2 | 3 | import "C" 4 | import "unsafe" 5 | 6 | //go:linkname goErrorHandler c.goErrorHandler 7 | //export goErrorHandler 8 | func goErrorHandler(size C.int, start *C.char) { 9 | dest := make([]byte, size) 10 | copy(dest, (*(*[1024]byte)(unsafe.Pointer(start)))[:size:size]) 11 | 12 | println("Error from all in one runner: %s", string(dest)) 13 | } 14 | -------------------------------------------------------------------------------- /libs/all_in_one/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "all_in_one" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | libc = "0.2.147" 10 | leash = { path = "../../libs/leash" } 11 | shared = { path = "../../libs/shared" } 12 | 13 | cache = { path = "../../exes/cache" } 14 | gateway = { path = "../../exes/gateway" } 15 | ratelimit = { path = "../../exes/ratelimit" } 16 | rest = { path = "../../exes/rest" } 17 | webhook = { path = "../../exes/webhook" } 18 | ctrlc = "3.4.0" 19 | 20 | tokio = { version = "1.29.1", features = ["rt"] } 21 | serde = "1.0.166" 22 | serde_json = "1.0.100" 23 | anyhow = { version = "1.0.71", features = ["backtrace"] } 24 | 25 | tracing = "0.1.37" 26 | 27 | config = "0.13.3" 28 | 29 | tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } 30 | tracing-opentelemetry = "0.19.0" 31 | opentelemetry = { version ="0.19.0", features = ["rt-tokio"] } 32 | opentelemetry-otlp = { version = "0.12.0" } 33 | 34 | [lib] 35 | crate-type = ["staticlib", "rlib"] 36 | 37 | [build-dependencies] 38 | cbindgen = "0.24.5" 39 | -------------------------------------------------------------------------------- /libs/all_in_one/build.rs: -------------------------------------------------------------------------------- 1 | extern crate cbindgen; 2 | 3 | use cbindgen::{Config, Language}; 4 | use std::env; 5 | use std::error::Error; 6 | use std::path::PathBuf; 7 | 8 | /// Generates the headers for the go program. 9 | fn main() -> Result<(), Box> { 10 | let crate_dir = env::var("CARGO_MANIFEST_DIR")?; 11 | let package_name = env::var("CARGO_PKG_NAME")?; 12 | 13 | // We export the header file to build/{package_name}.h 14 | let output_file = PathBuf::from("../../internal/pkg/all-in-one") 15 | .join(format!("{}.h", package_name)) 16 | .display() 17 | .to_string(); 18 | 19 | let config = Config { 20 | language: Language::C, 21 | ..Default::default() 22 | }; 23 | 24 | cbindgen::generate_with_config(crate_dir, config)?.write_to_file(output_file); 25 | 26 | Ok(()) 27 | } 28 | -------------------------------------------------------------------------------- /libs/all_in_one/src/errors.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | 3 | use anyhow::Result; 4 | use tracing::error; 5 | 6 | thread_local! { 7 | pub static ERROR_HANDLER: std::cell::RefCell> = RefCell::new(None); 8 | } 9 | 10 | /// Update the most recent error, clearing whatever may have been there before. 11 | #[must_use] pub fn stacktrace(err: &anyhow::Error) -> String { 12 | format!("{err}") 13 | } 14 | 15 | pub fn wrap_result(func: F) -> Option 16 | where 17 | F: Fn() -> Result, 18 | { 19 | let result = func(); 20 | 21 | match result { 22 | Ok(ok) => Some(ok), 23 | Err(error) => { 24 | // Call the handler 25 | handle_error(&error); 26 | None 27 | } 28 | } 29 | } 30 | 31 | /// # Panics 32 | /// Panics if the stacktrace size is > than an i32 33 | pub fn handle_error(error: &anyhow::Error) { 34 | ERROR_HANDLER.with(|val| { 35 | let mut stacktrace = stacktrace(error); 36 | 37 | error!("Error emitted: {}", stacktrace); 38 | if let Some(func) = *val.borrow() { 39 | // Call the error handler 40 | unsafe { 41 | func( 42 | (stacktrace.len() + 1).try_into().unwrap(), 43 | stacktrace.as_mut_ptr().cast::(), 44 | ); 45 | } 46 | } 47 | }); 48 | } 49 | 50 | #[cfg(test)] 51 | mod tests { 52 | // todo 53 | } 54 | -------------------------------------------------------------------------------- /libs/all_in_one/src/ffi.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::missing_safety_doc)] 2 | use std::{ 3 | ffi::{c_char, c_int, CString}, 4 | mem::take, 5 | ptr, 6 | str::FromStr, 7 | time::Duration, 8 | }; 9 | 10 | use gateway::GatewayServer; 11 | use opentelemetry::{global::set_text_map_propagator, sdk::propagation::TraceContextPropagator}; 12 | use ratelimit::RatelimiterServerComponent; 13 | use rest::ReverseProxyServer; 14 | use tokio::{runtime::Runtime, sync::mpsc}; 15 | use tracing::{debug, error}; 16 | use tracing_subscriber::{ 17 | filter::Directive, fmt, prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, 18 | EnvFilter, 19 | }; 20 | use webhook::WebhookServer; 21 | 22 | use crate::{ 23 | errors::{handle_error, wrap_result, ERROR_HANDLER}, 24 | utils::{load_config_file, start_component, AllInOneInstance}, 25 | }; 26 | 27 | #[no_mangle] 28 | pub unsafe extern "C" fn set_error_handler(func: unsafe extern "C" fn(c_int, *mut c_char)) { 29 | debug!("Setting error handler"); 30 | ERROR_HANDLER.with(|prev| { 31 | *prev.borrow_mut() = Some(func); 32 | }); 33 | } 34 | 35 | #[no_mangle] 36 | /// Loads the config json using the nova shared config loader 37 | pub extern "C" fn load_config() -> *mut c_char { 38 | wrap_result(move || { 39 | let config = serde_json::to_string(&load_config_file()?)?; 40 | let c_str_song = CString::new(config)?; 41 | Ok(c_str_song.into_raw()) 42 | }) 43 | .or(Some(ptr::null::() as *mut c_char)) 44 | .expect("something has gone terribly wrong") 45 | } 46 | 47 | #[no_mangle] 48 | pub unsafe extern "C" fn stop_instance(instance: *mut AllInOneInstance) { 49 | wrap_result(move || { 50 | let mut instance = Box::from_raw(instance); 51 | let handles = take(&mut instance.handles); 52 | instance.runtime.block_on(async move { 53 | for (name, sender, join) in handles { 54 | debug!("Halting component {}", name); 55 | let _ = sender 56 | .send(()) 57 | .map_err(|_| error!("Component {} is not online", name)); 58 | match join.await { 59 | Ok(_) => {} 60 | Err(error) => error!("Task for component {} panic'ed {}", name, error), 61 | }; 62 | debug!("Component {} halted", name); 63 | } 64 | }); 65 | 66 | instance.runtime.shutdown_timeout(Duration::from_secs(5)); 67 | 68 | Ok(()) 69 | }); 70 | } 71 | 72 | /// # Panics 73 | /// Panics if an incorrect `RUST_LOG` variables is specified. 74 | #[no_mangle] 75 | pub unsafe extern "C" fn create_instance(config: *mut c_char) -> *mut AllInOneInstance { 76 | // Returning a null pointer (unaligned) is expected. 77 | #[allow(clippy::cast_ptr_alignment)] 78 | wrap_result(move || { 79 | let value = CString::from_raw(config); 80 | let json = value.to_str()?; 81 | 82 | // Main stop signal for this instance 83 | let (error_sender, mut errors) = mpsc::channel(50); 84 | let mut handles = vec![]; 85 | 86 | let runtime = Runtime::new()?; 87 | 88 | // Setup the tracing system 89 | set_text_map_propagator(TraceContextPropagator::new()); 90 | tracing_subscriber::registry() 91 | .with(fmt::layer()) 92 | .with( 93 | EnvFilter::builder() 94 | .with_default_directive(Directive::from_str("info").expect("")) 95 | .from_env() 96 | .unwrap(), 97 | ) 98 | .init(); 99 | 100 | // Error handling task 101 | runtime.spawn(async move { 102 | while let Some(error) = errors.recv().await { 103 | handle_error(&error); 104 | } 105 | }); 106 | 107 | handles.push(start_component::( 108 | json, 109 | error_sender.clone(), 110 | &runtime, 111 | )?); 112 | 113 | std::thread::sleep(Duration::from_secs(1)); 114 | 115 | handles.push(start_component::( 116 | json, 117 | error_sender.clone(), 118 | &runtime, 119 | )?); 120 | 121 | std::thread::sleep(Duration::from_secs(1)); 122 | 123 | handles.push(start_component::( 124 | json, 125 | error_sender.clone(), 126 | &runtime, 127 | )?); 128 | 129 | std::thread::sleep(Duration::from_secs(1)); 130 | 131 | handles.push(start_component::( 132 | json, 133 | error_sender, 134 | &runtime, 135 | )?); 136 | 137 | let all_in_one = Box::into_raw(Box::new(AllInOneInstance { runtime, handles })); 138 | 139 | Ok(all_in_one) 140 | }) 141 | .or(Some(ptr::null::() as *mut AllInOneInstance)) 142 | .expect("something has gone terribly wrong") 143 | } 144 | -------------------------------------------------------------------------------- /libs/all_in_one/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | clippy::all, 3 | clippy::correctness, 4 | clippy::suspicious, 5 | clippy::style, 6 | clippy::complexity, 7 | clippy::perf, 8 | clippy::pedantic, 9 | clippy::nursery, 10 | )] 11 | 12 | pub mod errors; 13 | pub mod ffi; 14 | pub mod utils; 15 | -------------------------------------------------------------------------------- /libs/all_in_one/src/utils.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use config::{Config, Environment, File}; 3 | use leash::Component; 4 | use serde::de::DeserializeOwned; 5 | use serde_json::Value; 6 | use shared::config::Settings; 7 | use tokio::{ 8 | runtime::Runtime, 9 | sync::{mpsc, oneshot::Sender}, 10 | task::JoinHandle, 11 | }; 12 | use tracing::{ 13 | debug, 14 | error, info, 15 | }; 16 | 17 | /// Represents a all in one instance 18 | pub struct AllInOneInstance { 19 | pub runtime: Runtime, 20 | pub(crate) handles: Vec<(&'static str, Sender<()>, JoinHandle<()>)>, 21 | } 22 | 23 | /// Loads the settings from a component using a string 24 | fn load_settings_for( 25 | settings: &str, 26 | name: &str, 27 | ) -> Result> { 28 | let value: Value = serde_json::from_str(settings)?; 29 | let section: T = serde_json::from_value(value.get(name).unwrap().clone())?; 30 | let mut settings: Settings = serde_json::from_value(value)?; 31 | settings.config = section; 32 | 33 | Ok(settings) 34 | } 35 | 36 | pub(crate) fn start_component( 37 | json: &str, 38 | error_sender: mpsc::Sender, 39 | runtime: &Runtime, 40 | ) -> Result<(&'static str, Sender<()>, JoinHandle<()>)> { 41 | let name = T::SERVICE_NAME; 42 | let instance = T::new(); 43 | 44 | // We setup stop signals 45 | let (stop, signal) = tokio::sync::oneshot::channel(); 46 | let settings = load_settings_for(json, name)?; 47 | 48 | let handle = runtime.spawn(async move { 49 | debug!("starting component {}", name); 50 | match instance.start(settings, signal).await { 51 | Ok(_) => info!("Component {} gracefully exited", name), 52 | Err(error) => { 53 | error!("Component {} exited with error {}", name, error); 54 | error_sender 55 | .send(error) 56 | .await 57 | .expect("Couldn't send the error notification to the error mpsc"); 58 | } 59 | } 60 | }); 61 | 62 | Ok((name, stop, handle)) 63 | } 64 | 65 | pub(crate) fn load_config_file() -> Result { 66 | let mut builder = Config::builder(); 67 | 68 | builder = builder.add_source(File::with_name("config/default")); 69 | let mode = std::env::var("ENV").unwrap_or_else(|_| "development".into()); 70 | info!("Configuration Environment: {}", mode); 71 | 72 | builder = builder.add_source(File::with_name(&format!("config/{mode}")).required(false)); 73 | builder = builder.add_source(File::with_name("config/local").required(false)); 74 | 75 | let env = Environment::with_prefix("NOVA").separator("__"); 76 | // we can configure each component using environment variables 77 | builder = builder.add_source(env); 78 | 79 | let config: Value = builder.build()?.try_deserialize()?; 80 | 81 | Ok(config) 82 | } 83 | -------------------------------------------------------------------------------- /libs/leash/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "leash" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | shared = { workspace = true } 10 | anyhow = { workspace = true } 11 | tokio = { workspace = true } 12 | serde = { workspace = true } 13 | opentelemetry = { workspace = true } 14 | tracing-opentelemetry = { workspace = true } 15 | tracing = { workspace = true } 16 | 17 | tracing-log = { version = "0.1.3", features = ["env_logger"] } 18 | tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } 19 | opentelemetry-otlp = { version = "0.12.0", features = ["metrics"] } 20 | -------------------------------------------------------------------------------- /libs/leash/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | clippy::all, 3 | clippy::correctness, 4 | clippy::suspicious, 5 | clippy::style, 6 | clippy::complexity, 7 | clippy::perf, 8 | clippy::pedantic, 9 | clippy::nursery 10 | )] 11 | 12 | use anyhow::Result; 13 | use opentelemetry::global::shutdown_tracer_provider; 14 | use opentelemetry::sdk::export::metrics::aggregation::stateless_temporality_selector; 15 | use opentelemetry::sdk::metrics::selectors; 16 | use opentelemetry::sdk::propagation::TraceContextPropagator; 17 | use opentelemetry::sdk::trace::{self}; 18 | use opentelemetry::sdk::Resource; 19 | use opentelemetry::{global, KeyValue}; 20 | use opentelemetry_otlp::WithExportConfig; 21 | use serde::de::DeserializeOwned; 22 | use shared::config::Settings; 23 | use std::str::FromStr; 24 | use std::time::Duration; 25 | use std::{future::Future, pin::Pin}; 26 | use tokio::sync::oneshot; 27 | use tracing::{info, trace, error}; 28 | use tracing_subscriber::filter::Directive; 29 | use tracing_subscriber::{fmt, prelude::*, EnvFilter}; 30 | 31 | pub type AnyhowResultFuture = Pin> + Send>>; 32 | pub trait Component: Send + Sync + 'static + Sized { 33 | type Config: Default + Clone + DeserializeOwned + Send; 34 | 35 | const SERVICE_NAME: &'static str; 36 | fn start( 37 | &self, 38 | settings: Settings, 39 | stop: oneshot::Receiver<()>, 40 | ) -> AnyhowResultFuture<()>; 41 | fn new() -> Self; 42 | } 43 | 44 | /// # Panics 45 | /// Panics in case of an invalid `RUST_LOG` variable. 46 | pub fn start_component(component: Y) -> AnyhowResultFuture<()> 47 | where 48 | Y: Component, 49 | C: Default + Clone + DeserializeOwned + Send, 50 | { 51 | Box::pin(async move { 52 | let settings = Settings::::new(Y::SERVICE_NAME)?; 53 | 54 | if let Some(meter_config) = settings 55 | .opentelemetry 56 | .as_ref() 57 | .and_then(|f| f.metrics.clone()) 58 | { 59 | let meter = opentelemetry_otlp::new_pipeline() 60 | .metrics( 61 | selectors::simple::histogram([0.1, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0]), 62 | stateless_temporality_selector(), 63 | opentelemetry::runtime::Tokio, 64 | ) 65 | .with_exporter( 66 | opentelemetry_otlp::new_exporter() 67 | .tonic() 68 | .with_export_config(meter_config.into()), 69 | ) 70 | .with_period(Duration::from_secs(3)) 71 | .with_timeout(Duration::from_secs(10)) 72 | .build()?; 73 | // Using the opentelemetry_otlp meter 74 | global::set_meter_provider(meter); 75 | } 76 | // Use the text propagator 77 | global::set_text_map_propagator(TraceContextPropagator::new()); 78 | // Print debug errors 79 | global::set_error_handler(|error| { 80 | error!("OpenTelemetry error: {}", error); 81 | })?; 82 | 83 | if let Some(tracer_config) = settings 84 | .opentelemetry 85 | .as_ref() 86 | .and_then(|f| f.traces.clone()) 87 | { 88 | let tracer = opentelemetry_otlp::new_pipeline() 89 | .tracing() 90 | .with_trace_config(trace::config().with_resource(Resource::new(vec![ 91 | KeyValue::new("service.name", Y::SERVICE_NAME), 92 | ]))) 93 | .with_exporter( 94 | opentelemetry_otlp::new_exporter() 95 | .tonic() 96 | .with_export_config(tracer_config.into()), 97 | ) 98 | .install_batch(opentelemetry::runtime::Tokio)?; 99 | let otel_layer = tracing_opentelemetry::layer().with_tracer(tracer); 100 | 101 | tracing_subscriber::registry() 102 | .with(fmt::layer()) 103 | .with(otel_layer) 104 | .with( 105 | // Use the info level as default 106 | EnvFilter::builder() 107 | .with_default_directive(Directive::from_str("info").unwrap()) 108 | .from_env()?, 109 | ) 110 | .init(); 111 | } else { 112 | // Setup tracing 113 | tracing_subscriber::registry() 114 | .with(fmt::layer()) 115 | .with( 116 | // Use the info level as default 117 | EnvFilter::builder() 118 | .with_default_directive(Directive::from_str("info").unwrap()) 119 | .from_env()?, 120 | ) 121 | .init(); 122 | } 123 | 124 | // Finally starting nova 125 | info!("Starting nova component {}", Y::SERVICE_NAME); 126 | let (stop, stop_channel) = oneshot::channel(); 127 | 128 | tokio::spawn(async move { 129 | trace!("started signal watching"); 130 | #[cfg(unix)] 131 | tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) 132 | .unwrap() 133 | .recv() 134 | .await; 135 | #[cfg(not(unix))] 136 | tokio::signal::ctrl_c().await.unwrap(); 137 | 138 | stop.send(()).unwrap(); 139 | shutdown_tracer_provider(); 140 | }); 141 | 142 | trace!( 143 | "Starting component {component}", 144 | component = Y::SERVICE_NAME 145 | ); 146 | component.start(settings, stop_channel).await 147 | }) 148 | } 149 | 150 | #[macro_export] 151 | macro_rules! ignite { 152 | ($c:ty) => { 153 | #[allow(dead_code)] 154 | fn main() -> anyhow::Result<()> { 155 | use $crate::Component; 156 | let rt = tokio::runtime::Runtime::new()?; 157 | rt.block_on($crate::start_component(<$c as Component>::new()))?; 158 | Ok(()) 159 | } 160 | }; 161 | } 162 | 163 | #[cfg(test)] 164 | mod test { 165 | use serde::Deserialize; 166 | use tokio::sync::oneshot; 167 | 168 | use crate as leash; 169 | 170 | #[derive(Clone, Copy)] 171 | struct TestComponent {} 172 | 173 | #[derive(Default, Clone, Deserialize, Copy)] 174 | struct TestComponentConfig {} 175 | 176 | impl leash::Component for TestComponent { 177 | type Config = TestComponentConfig; 178 | const SERVICE_NAME: &'static str = "test_component"; 179 | 180 | fn start( 181 | &self, 182 | _settings: shared::config::Settings, 183 | _stop: oneshot::Receiver<()>, 184 | ) -> crate::AnyhowResultFuture<()> { 185 | Box::pin(async move { Ok(()) }) 186 | } 187 | 188 | fn new() -> Self { 189 | Self {} 190 | } 191 | } 192 | 193 | ignite!(TestComponent); 194 | } 195 | -------------------------------------------------------------------------------- /libs/proto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "proto" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | tonic = "0.9.2" 8 | prost = "0.11.9" 9 | 10 | [build-dependencies] 11 | tonic-build = "0.9.2" 12 | glob = "0.3.1" 13 | -------------------------------------------------------------------------------- /libs/proto/build.rs: -------------------------------------------------------------------------------- 1 | fn main() -> Result<(), Box> { 2 | let paths: Vec = glob::glob("../../proto/nova/**/*.proto")? 3 | .map(|f| f.unwrap().to_str().unwrap().to_string()) 4 | .collect(); 5 | 6 | tonic_build::configure() 7 | .include_file("genproto.rs") 8 | .compile(&paths, &["../../proto"])?; 9 | 10 | Ok(()) 11 | } 12 | -------------------------------------------------------------------------------- /libs/proto/src/lib.rs: -------------------------------------------------------------------------------- 1 | include!(concat!(env!("OUT_DIR"), concat!("/", "genproto.rs"))); 2 | -------------------------------------------------------------------------------- /libs/shared/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shared" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | serde = { workspace = true } 8 | serde_json = { workspace = true } 9 | opentelemetry = { workspace = true } 10 | tracing = { workspace = true } 11 | tokio = { workspace = true } 12 | 13 | config = { version = "0.13", default-features = false, features = ["json", "yaml-rust", "ini"] } 14 | redis = { version = "0.23.0", features = ["cluster", "connection-manager", "tokio-comp"] } 15 | 16 | async-nats = "0.29.0" 17 | twilight-model = "0.15.2" 18 | anyhow = "1.0.71" 19 | opentelemetry-otlp = "0.12.0" 20 | -------------------------------------------------------------------------------- /libs/shared/src/config.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use config::{Config, Environment, File}; 3 | use serde::{de::DeserializeOwned, Deserialize}; 4 | use std::{env, ops::Deref}; 5 | use tracing::info; 6 | 7 | #[derive(Debug, Deserialize, Clone)] 8 | pub struct Settings { 9 | #[serde(skip_deserializing)] 10 | pub config: T, 11 | pub nats: crate::nats::Configuration, 12 | pub redis: crate::redis::Configuration, 13 | pub opentelemetry: Option, 14 | } 15 | 16 | impl Settings { 17 | /// # Errors 18 | /// Fails it the config could not be deserialized to `Self::T` 19 | pub fn new(service_name: &str) -> Result { 20 | let mut builder = Config::builder(); 21 | 22 | builder = builder.add_source(File::with_name("config/default")); 23 | let mode = env::var("ENV").unwrap_or_else(|_| "development".into()); 24 | info!("Configuration Environment: {}", mode); 25 | 26 | builder = builder.add_source(File::with_name(&format!("config/{mode}")).required(false)); 27 | builder = builder.add_source(File::with_name("config/local").required(false)); 28 | 29 | let env = Environment::with_prefix("NOVA").separator("__"); 30 | // we can configure each component using environment variables 31 | builder = builder.add_source(env); 32 | 33 | let config = builder.build()?; 34 | let mut settings: Self = config.clone().try_deserialize()?; 35 | 36 | // try to load the config 37 | settings.config = config.get::(service_name)?; 38 | 39 | Ok(settings) 40 | } 41 | } 42 | 43 | impl Deref for Settings { 44 | type Target = T; 45 | 46 | fn deref(&self) -> &Self::Target { 47 | &self.config 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /libs/shared/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | clippy::all, 3 | clippy::correctness, 4 | clippy::suspicious, 5 | clippy::style, 6 | clippy::complexity, 7 | clippy::perf, 8 | clippy::pedantic, 9 | clippy::nursery, 10 | )] 11 | 12 | pub mod config; 13 | pub mod nats; 14 | pub mod payloads; 15 | pub mod redis; 16 | pub mod opentelemetry; -------------------------------------------------------------------------------- /libs/shared/src/nats.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin}; 2 | 3 | use async_nats::Client; 4 | use serde::Deserialize; 5 | 6 | #[derive(Clone, Debug, Deserialize)] 7 | pub struct Configuration { 8 | pub host: String, 9 | } 10 | 11 | impl From for Pin> + Send>> { 12 | fn from(value: Configuration) -> Self { 13 | Box::pin(async move { Ok(async_nats::connect(value.host).await?) }) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /libs/shared/src/opentelemetry.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Deref, DerefMut}; 2 | 3 | use opentelemetry_otlp::{ExportConfig, Protocol}; 4 | use serde::{de::Visitor, Deserialize}; 5 | 6 | #[derive(Debug, Default)] 7 | #[repr(transparent)] 8 | pub struct ExportConfigDeserialize(ExportConfig); 9 | impl Clone for ExportConfigDeserialize { 10 | fn clone(&self) -> Self { 11 | Self(ExportConfig { 12 | endpoint: self.0.endpoint.clone(), 13 | protocol: self.0.protocol, 14 | timeout: self.0.timeout, 15 | }) 16 | } 17 | } 18 | 19 | impl From for ExportConfig { 20 | fn from(val: ExportConfigDeserialize) -> Self { 21 | val.0 22 | } 23 | } 24 | 25 | impl Deref for ExportConfigDeserialize { 26 | type Target = ExportConfig; 27 | 28 | fn deref(&self) -> &Self::Target { 29 | &self.0 30 | } 31 | } 32 | 33 | impl DerefMut for ExportConfigDeserialize { 34 | fn deref_mut(&mut self) -> &mut Self::Target { 35 | &mut self.0 36 | } 37 | } 38 | 39 | impl<'de> Deserialize<'de> for ExportConfigDeserialize { 40 | fn deserialize(deserializer: D) -> std::result::Result 41 | where 42 | D: serde::Deserializer<'de>, 43 | { 44 | #[derive(Deserialize, Debug)] 45 | #[serde(field_identifier, rename_all = "lowercase")] 46 | enum Fields { 47 | Endpoint, 48 | Timeout, 49 | } 50 | 51 | struct OpenTelemetryExportConfigVisitor; 52 | impl<'de> Visitor<'de> for OpenTelemetryExportConfigVisitor { 53 | type Value = ExportConfigDeserialize; 54 | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { 55 | formatter.write_str("struct OpenTelemetryExportConfig") 56 | } 57 | 58 | fn visit_map(self, mut map: A) -> std::result::Result 59 | where 60 | A: serde::de::MapAccess<'de>, 61 | { 62 | let mut export_config = ExportConfigDeserialize::default(); 63 | export_config.0.protocol = Protocol::Grpc; 64 | while let Some(name) = map.next_key::()? { 65 | match name { 66 | Fields::Endpoint => { 67 | export_config.0.endpoint = map.next_value()?; 68 | } 69 | Fields::Timeout => { 70 | export_config.0.timeout = map.next_value()?; 71 | } 72 | } 73 | } 74 | 75 | Ok(export_config) 76 | } 77 | } 78 | 79 | deserializer.deserialize_struct( 80 | "OpenTelemetryExportConfig", 81 | &["endpoint", "protocol", "timeout"], 82 | OpenTelemetryExportConfigVisitor, 83 | ) 84 | } 85 | } 86 | 87 | #[derive(Debug, Clone, Deserialize)] 88 | pub struct Configuration { 89 | pub traces: Option, 90 | pub metrics: Option, 91 | } 92 | -------------------------------------------------------------------------------- /libs/shared/src/payloads.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | use std::ops::{Deref, DerefMut}; 3 | 4 | use serde::de::DeserializeSeed; 5 | use serde::Deserializer; 6 | use serde::{Deserialize, Serialize}; 7 | use serde_json::Value; 8 | use tracing::trace_span; 9 | use twilight_model::gateway::event::{DispatchEvent, DispatchEventWithTypeDeserializer}; 10 | 11 | #[derive(Debug, Clone, PartialEq)] 12 | pub struct DispatchEventTagged(pub DispatchEvent); 13 | 14 | impl Deref for DispatchEventTagged { 15 | type Target = DispatchEvent; 16 | fn deref(&self) -> &Self::Target { 17 | &self.0 18 | } 19 | } 20 | impl DerefMut for DispatchEventTagged { 21 | fn deref_mut(&mut self) -> &mut Self::Target { 22 | &mut self.0 23 | } 24 | } 25 | 26 | #[derive(Serialize, Deserialize)] 27 | struct DispatchEventTaggedSerialized { 28 | #[serde(rename = "d")] 29 | pub data: Value, 30 | #[serde(rename = "t")] 31 | pub kind: String, 32 | } 33 | 34 | // todo(MatthieuCoder): Remove the use of the Value 35 | impl<'de> Deserialize<'de> for DispatchEventTagged { 36 | fn deserialize(deserializer: D) -> Result 37 | where 38 | D: Deserializer<'de>, 39 | { 40 | let _s = trace_span!("deserializing DispatchEventTagged"); 41 | let tagged = DispatchEventTaggedSerialized::deserialize(deserializer)?; 42 | let deserializer_seed = DispatchEventWithTypeDeserializer::new(&tagged.kind); 43 | let dispatch_event = deserializer_seed.deserialize(tagged.data).unwrap(); 44 | Ok(Self(dispatch_event)) 45 | } 46 | } 47 | 48 | impl Serialize for DispatchEventTagged { 49 | fn serialize(&self, serializer: S) -> Result 50 | where 51 | S: serde::Serializer, 52 | { 53 | let _s = trace_span!("serializing DispatchEventTagged"); 54 | let kind = self.0.kind().name().unwrap(); 55 | DispatchEventTaggedSerialized { 56 | data: serde_json::to_value(&self.0).unwrap(), 57 | kind: kind.to_string(), 58 | } 59 | .serialize(serializer) 60 | } 61 | } 62 | 63 | /// Payload send to the nova cache queues 64 | #[derive(Serialize, Deserialize, Debug, Clone)] 65 | pub struct CachePayload { 66 | #[serde(flatten)] 67 | pub data: DispatchEventTagged, 68 | } 69 | #[cfg(test)] 70 | mod tests { 71 | use serde_json::json; 72 | use twilight_model::gateway::event::DispatchEvent; 73 | 74 | use super::DispatchEventTagged; 75 | 76 | #[test] 77 | fn serialize_event_tagged() { 78 | let dispatch_event = DispatchEventTagged(DispatchEvent::GiftCodeUpdate); 79 | 80 | let value = serde_json::to_value(&dispatch_event); 81 | assert!(value.is_ok()); 82 | let value = value.unwrap(); 83 | let kind = value.get("t").and_then(serde_json::Value::as_str); 84 | assert_eq!(kind, Some("GIFT_CODE_UPDATE")); 85 | } 86 | 87 | #[test] 88 | fn deserialize_event_tagged() { 89 | let json = json!({ 90 | "t": "GIFT_CODE_UPDATE", 91 | "d": {} 92 | }); 93 | 94 | let dispatch_event = serde_json::from_value::(json); 95 | assert!(dispatch_event.is_ok()); 96 | 97 | let dispatch_event_tagged = dispatch_event.unwrap(); 98 | 99 | assert_eq!( 100 | DispatchEventTagged(DispatchEvent::GiftCodeUpdate), 101 | dispatch_event_tagged 102 | ); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /libs/shared/src/redis.rs: -------------------------------------------------------------------------------- 1 | use redis::{aio::MultiplexedConnection, Client}; 2 | use serde::Deserialize; 3 | use std::{future::Future, pin::Pin}; 4 | 5 | #[derive(Clone, Debug, Deserialize)] 6 | pub struct Configuration { 7 | pub url: String, 8 | } 9 | 10 | impl From 11 | for Pin> + Send>> 12 | { 13 | fn from(value: Configuration) -> Self { 14 | Box::pin(async move { 15 | let con = Client::open(value.url)?; 16 | let (multiplex, ready) = con.create_multiplexed_tokio_connection().await?; 17 | 18 | tokio::spawn(ready); 19 | 20 | Ok(multiplex) 21 | }) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /proto/nova/ratelimit/ratelimiter.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "google/protobuf/empty.proto"; 4 | 5 | package nova.ratelimit.ratelimiter; 6 | 7 | service Ratelimiter { 8 | rpc SubmitTicket(BucketSubmitTicketRequest) returns (google.protobuf.Empty); 9 | rpc SubmitHeaders(HeadersSubmitRequest) returns (google.protobuf.Empty); 10 | } 11 | 12 | message BucketSubmitTicketRequest { 13 | string path = 1; 14 | } 15 | 16 | message HeadersSubmitRequest { 17 | map headers = 1; 18 | uint64 precise_time = 2; 19 | string path = 3; 20 | } -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | # This defines a function taking `pkgs` as parameter, and uses 2 | # `nixpkgs` by default if no argument is passed to it. 3 | { pkgs ? import {} }: 4 | # This avoid typings `pkgs.` before each package name. 5 | with pkgs; 6 | 7 | # Defines a shell. 8 | mkShell { 9 | # Sets the build inputs, i.e. what will be available in our 10 | # local environment. 11 | buildInputs = [ 12 | cargo 13 | gcc 14 | go 15 | gnumake 16 | protobuf 17 | rustc 18 | zlib 19 | mdbook 20 | ]; 21 | } 22 | --------------------------------------------------------------------------------