├── .dockerignore ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── audit.yml │ ├── build.yml │ ├── release.yml │ └── rustfmt.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── docker-compose.yml ├── example_config.toml ├── jaeger-db-setup ├── APACHE_LICENSE ├── Dockerfile ├── README.md ├── create.sh └── v004.cql.tmpl ├── pre-commit ├── rustfmt.toml └── src ├── bot ├── commands │ ├── blocks.rs │ ├── keywords.rs │ ├── mod.rs │ ├── mutes.rs │ ├── opt_out.rs │ └── util.rs ├── highlighting.rs ├── mod.rs └── util.rs ├── db ├── backup.rs ├── block.rs ├── channel_keyword.rs ├── guild_keyword.rs ├── ignore.rs ├── keyword.rs ├── migration │ ├── m2022_08_04_000001_init.rs │ ├── m2023_01_08_000001_composite_notification_key.rs │ ├── m2023_05_18_000001_rename_pkey_index.rs │ └── mod.rs ├── mod.rs ├── mute.rs ├── notification.rs ├── opt_out.rs └── user_state.rs ├── global.rs ├── logging ├── mod.rs ├── monitoring.rs └── reporting.rs ├── main.rs └── settings.rs /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !src/ 3 | !rustfmt.toml 4 | !Cargo.* 5 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Project maintainer 2 | * @ThatsNoMoon 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report a problem with Highlights 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Description of bug** 11 | Concisely describe what the bug is. 12 | 13 | **To Reproduce** 14 | List the steps taken to produce the bug: 15 | 1. Use command __ 16 | 2. Send a message with another account 17 | 3. … 18 | 19 | **Expected behavior** 20 | Concisely describe what you expected to happen. 21 | 22 | **Screenshots** 23 | If necessary, add screenshots to help explain the problem. 24 | 25 | **Additional context** 26 | Add any other context about the problem here. 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest a feature or improvement for Highlights 4 | title: '' 5 | labels: enhancement 6 | assignees: ThatsNoMoon 7 | 8 | --- 9 | 10 | **Problem this feature solves** 11 | Concisely describe what problem this feature or enhancement solves. For example: "Highlights always does ___ which I don't like because ___". 12 | 13 | **Solution for this problem** 14 | Concisely describe what solution you want to see for this problem. 15 | 16 | **Additional context** 17 | Add any other context or screenshots about the feature request here. 18 | -------------------------------------------------------------------------------- /.github/workflows/audit.yml: -------------------------------------------------------------------------------- 1 | name: Security audit 2 | on: 3 | push: 4 | paths: 5 | - '**/Cargo.toml' 6 | - '**/Cargo.lock' 7 | pull_request: 8 | branches: [ dev ] 9 | paths: 10 | - '**/Cargo.toml' 11 | - '**/Cargo.lock' 12 | jobs: 13 | security_audit: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v1 17 | - uses: actions-rs/audit-check@v1 18 | with: 19 | token: ${{ secrets.GITHUB_TOKEN }} 20 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: [ dev ] 6 | pull_request: 7 | branches: [ dev ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Build 20 | run: cargo build --verbose 21 | - name: Clippy 22 | run: cargo clippy --verbose 23 | - name: Test 24 | run: cargo test --verbose 25 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | tags: 5 | - "v*" 6 | 7 | jobs: 8 | create-release: 9 | runs-on: ${{ matrix.os }} 10 | 11 | strategy: 12 | matrix: 13 | include: 14 | - build: linux-amd64 15 | os: ubuntu-latest 16 | cross: false 17 | 18 | - build: macos-amd64 19 | os: macos-latest 20 | cross: false 21 | 22 | - build: windows-amd64 23 | os: windows-latest 24 | cross: false 25 | 26 | - build: linux-aarch64 27 | os: ubuntu-latest 28 | cross: true 29 | linker-package: gcc-aarch64-linux-gnu 30 | linker: aarch64-linux-gnu-gcc 31 | target: aarch64-unknown-linux-gnu 32 | 33 | - build: macos-aarch64 34 | os: macos-latest 35 | cross: true 36 | target: aarch64-apple-darwin 37 | 38 | steps: 39 | - name: Checkout 40 | uses: actions/checkout@v2 41 | 42 | - name: Extract version from tag 43 | id: version 44 | uses: damienaicheh/extract-version-from-tag-action@v1.0.0 45 | 46 | - name: Install cross-compiler 47 | if: matrix.os == 'ubuntu-latest' && matrix.cross 48 | run: | 49 | sudo apt update 50 | sudo apt install ${{ matrix.linker-package }} 51 | mkdir -p .cargo 52 | echo [target.${{ matrix.target }}] > .cargo/config.toml 53 | echo linker = '"'${{ matrix.linker }}'"' >> .cargo/config.toml 54 | 55 | - name: Install cross-compiling toolchain 56 | if: matrix.cross 57 | run: rustup target add ${{ matrix.target }} 58 | 59 | - name: Build 60 | if: "!matrix.cross" 61 | run: cargo build --release --verbose 62 | 63 | - name: Build (linux cross) 64 | if: matrix.os == 'ubuntu-latest' && matrix.cross 65 | run: cargo build --release --verbose --target ${{ matrix.target }} 66 | env: 67 | TARGET_CC: ${{ matrix.linker }} 68 | 69 | - name: Build (mac cross) 70 | if: matrix.os == 'macos-latest' && matrix.cross 71 | run: cargo build --release --verbose --target ${{ matrix.target }} 72 | 73 | - name: Archive executable 74 | if: matrix.os != 'windows-latest' && !matrix.cross 75 | working-directory: ./target/release 76 | run: tar czvf ../../highlights-${{ matrix.build }}.tar.gz highlights 77 | 78 | - name: Archive executable (cross) 79 | if: matrix.os != 'windows-latest' && matrix.cross 80 | working-directory: ./target/${{ matrix.target }}/release 81 | run: tar czvf ../../../highlights-${{ matrix.build }}.tar.gz highlights 82 | 83 | - name: Archive executable (windows) 84 | if: matrix.os == 'windows-latest' 85 | working-directory: ./target/release 86 | run: Compress-Archive -LiteralPath highlights.exe -DestinationPath ../../highlights-windows-amd64.zip 87 | 88 | - name: Create release 89 | uses: softprops/action-gh-release@v1 90 | with: 91 | files: highlights-* 92 | draft: true 93 | prerelease: ${{ env.PRE_RELEASE != ''}} 94 | 95 | release-docker: 96 | runs-on: ubuntu-latest 97 | 98 | steps: 99 | - name: Checkout 100 | uses: actions/checkout@v2 101 | 102 | - name: Extract version from tag 103 | id: version 104 | uses: damienaicheh/extract-version-from-tag-action@v1.0.0 105 | 106 | - name: Login 107 | uses: docker/login-action@v1 108 | with: 109 | username: thatsnomoon 110 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} 111 | 112 | - name: Set up Buildx 113 | uses: docker/setup-buildx-action@v2 114 | 115 | - name: Build and push amd64 116 | uses: docker/build-push-action@v2 117 | with: 118 | context: ./ 119 | file: ./Dockerfile 120 | platforms: linux/amd64 121 | push: true 122 | tags: thatsnomoon/highlights-amd64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }} 123 | cache-from: type=registry,ref=thatsnomoon/highlights:buildcache-amd64 124 | cache-to: type=registry,ref=thatsnomoon/highlights:buildcache-amd64,mode=max 125 | 126 | - name: Build and push arm64 127 | uses: docker/build-push-action@v2 128 | with: 129 | context: ./ 130 | file: ./Dockerfile 131 | platforms: linux/arm64 132 | build-args: | 133 | RUSTTARGET=aarch64-unknown-linux-musl 134 | MUSLHOST=x86_64-linux-musl 135 | MUSLTARGET=aarch64-linux-musl 136 | push: true 137 | tags: thatsnomoon/highlights-arm64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }} 138 | cache-from: type=registry,ref=thatsnomoon/highlights:buildcache-arm64 139 | cache-to: type=registry,ref=thatsnomoon/highlights:buildcache-arm64,mode=max 140 | 141 | - name: Create and push multi-arch manifest 142 | run: "\ 143 | docker manifest create thatsnomoon/highlights:latest \ 144 | thatsnomoon/highlights-amd64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }} \ 145 | thatsnomoon/highlights-arm64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }}\n\ 146 | docker manifest create \ 147 | thatsnomoon/highlights:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }} \ 148 | thatsnomoon/highlights-amd64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }} \ 149 | thatsnomoon/highlights-arm64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }}\n\ 150 | docker manifest create \ 151 | thatsnomoon/highlights:${{ env.MAJOR }}.${{ env.MINOR }} \ 152 | thatsnomoon/highlights-amd64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }} \ 153 | thatsnomoon/highlights-arm64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }}\n\ 154 | docker manifest create \ 155 | thatsnomoon/highlights:${{ env.MAJOR }} \ 156 | thatsnomoon/highlights-amd64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }} \ 157 | thatsnomoon/highlights-arm64:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }}\n\ 158 | 159 | 160 | docker manifest push thatsnomoon/highlights:latest\n\ 161 | docker manifest push \ 162 | thatsnomoon/highlights:${{ env.MAJOR }}.${{ env.MINOR }}.${{ env.PATCH }}\n\ 163 | docker manifest push \ 164 | thatsnomoon/highlights:${{ env.MAJOR }}.${{ env.MINOR }}\n\ 165 | docker manifest push \ 166 | thatsnomoon/highlights:${{ env.MAJOR }}" 167 | -------------------------------------------------------------------------------- /.github/workflows/rustfmt.yml: -------------------------------------------------------------------------------- 1 | name: Format 2 | 3 | on: 4 | push: 5 | branches: [ dev ] 6 | pull_request: 7 | branches: [ dev ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | rustfmt: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Check formatting 20 | run: cargo fmt -- --check 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /data 3 | *.sublime* 4 | /.vscode 5 | .env 6 | config.toml 7 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "highlights" 3 | version = "2.1.6" 4 | authors = ["ThatsNoMoon "] 5 | repository = "https://github.com/ThatsNoMoon/highlights" 6 | license = "OSL-3.0" 7 | edition = "2021" 8 | 9 | [features] 10 | default = ["bot", "monitoring", "reporting", "sqlite", "backup", "postgresql"] 11 | bot = [ 12 | "indoc", 13 | "lazy-regex", 14 | "serde_json", 15 | "serenity/builder", 16 | "serenity/cache", 17 | "serenity/client", 18 | "serenity/collector", 19 | "serenity/gateway", 20 | "serenity/http", 21 | "serenity/unstable_discord_api", 22 | ] 23 | monitoring = [ 24 | "opentelemetry", 25 | "opentelemetry-jaeger", 26 | "tracing-opentelemetry", 27 | ] 28 | reporting = ["reqwest"] 29 | backup = ["chrono", "sqlite", "rusqlite"] 30 | sqlite = ["sea-orm/sqlx-sqlite", "sea-orm-migration/sqlx-sqlite"] 31 | postgresql = ["sea-orm/sqlx-postgres", "sea-orm-migration/sqlx-postgres"] 32 | 33 | [dependencies] 34 | anyhow = "1.0" 35 | chrono = { version = "0.4", optional = true } 36 | futures-util = "0.3" 37 | indoc = { version = "2.0", optional = true } 38 | lazy-regex = { version = "2.2", optional = true } 39 | once_cell = "1.4" 40 | rand = "0.8" 41 | serde = { version = "1.0", features = ["derive"] } 42 | serde_json = { version = "1.0", optional = true } 43 | tinyvec = { version = "1.5", features = ["alloc"] } 44 | tracing = "0.1" 45 | tracing-opentelemetry = { version = "0.18", optional = true } 46 | tracing-subscriber = { version = "0.3", features = ["json"] } 47 | humantime-serde = "1.1.1" 48 | 49 | [dependencies.config] 50 | version = "0.13" 51 | default-features = false 52 | features = ["toml"] 53 | 54 | [dependencies.opentelemetry] 55 | version = "0.18" 56 | features = ["rt-tokio"] 57 | optional = true 58 | 59 | [dependencies.opentelemetry-jaeger] 60 | version = "0.17" 61 | features = ["rt-tokio"] 62 | optional = true 63 | 64 | [dependencies.reqwest] 65 | version = "0.11" 66 | default-features = false 67 | features = ["blocking", "rustls-tls", "json"] 68 | optional = true 69 | 70 | [dependencies.rusqlite] 71 | version = "0.27" 72 | features = ["bundled", "backup"] 73 | optional = true 74 | 75 | [dependencies.serenity] 76 | version = "0.11" 77 | default-features = false 78 | features = ["model", "rustls_backend"] 79 | 80 | [dependencies.sea-orm] 81 | version = "0.11" 82 | default-features = false 83 | features = ["macros", "runtime-tokio-rustls"] 84 | 85 | [dependencies.sea-orm-migration] 86 | version = "0.11" 87 | features = ["runtime-tokio-rustls"] 88 | 89 | [dependencies.tokio] 90 | version = "1.0" 91 | features = ["macros", "rt", "rt-multi-thread", "time", "fs"] 92 | 93 | [dependencies.url] 94 | version = "2.2" 95 | features = ["serde"] 96 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=$BUILDPLATFORM rust:1.69-slim-bullseye AS auditor 2 | RUN apt-get update && \ 3 | apt-get install -y --no-install-recommends pkg-config=0.29.2-1 libssl-dev=1.1.1n-0+deb11u4 && \ 4 | USER=root cargo new --bin highlights && \ 5 | cargo install cargo-audit 6 | COPY ["Cargo.*", "./"] 7 | RUN cargo audit -D unsound -D yanked 8 | 9 | FROM --platform=$BUILDPLATFORM rust:1.69-alpine3.17 AS builder 10 | RUN apk add --no-cache --update musl-dev=1.2.3-r5 && \ 11 | USER=root cargo new --bin highlights 12 | 13 | ARG RUSTTARGET 14 | ARG MUSLHOST 15 | ARG MUSLTARGET 16 | RUN if [[ ! -z "$RUSTTARGET" ]]; then \ 17 | rustup target add $RUSTTARGET && \ 18 | wget https://more.musl.cc/11.2.1/$MUSLHOST/$MUSLTARGET-cross.tgz && \ 19 | tar xzf $MUSLTARGET-cross.tgz; \ 20 | fi 21 | 22 | WORKDIR /highlights 23 | COPY ["Cargo.toml", "Cargo.lock", "./"] 24 | RUN cargo fetch ${RUSTTARGET:+--target $RUSTTARGET} 25 | RUN if [[ ! -z "$RUSTTARGET" ]]; then \ 26 | export TARGET_CC=/$MUSLTARGET-cross/bin/$MUSLTARGET-gcc; \ 27 | mkdir .cargo && \ 28 | echo "[target.$RUSTTARGET]" > .cargo/config.toml && \ 29 | echo "linker = \"$TARGET_CC\"" >> .cargo/config.toml; \ 30 | fi; \ 31 | cargo build --release ${RUSTTARGET:+--target $RUSTTARGET} && \ 32 | rm src/main.rs target/$RUSTTARGET/release/deps/highlights* 33 | COPY ["src", "./src"] 34 | RUN if [[ ! -z "$RUSTTARGET" ]]; then \ 35 | export TARGET_CC=/$MUSLTARGET-cross/bin/$MUSLTARGET-gcc; \ 36 | fi; \ 37 | cargo build --release ${RUSTTARGET:+--target $RUSTTARGET} && \ 38 | if [[ ! -z "$RUSTTARGET" ]]; then \ 39 | mv target/$RUSTTARGET/release/highlights target/release/highlights; \ 40 | fi 41 | 42 | FROM alpine:3.17.0 43 | RUN apk add --no-cache --update tini=0.19.0-r1 && \ 44 | addgroup -g 1000 highlights \ 45 | && adduser -u 1000 -H -D -G highlights -s /bin/sh highlights 46 | ENTRYPOINT ["/sbin/tini", "--"] 47 | USER highlights 48 | WORKDIR /opt/highlights 49 | RUN mkdir data 50 | COPY --from=builder /highlights/target/release/highlights /usr/local/bin/highlights 51 | CMD ["/usr/local/bin/highlights"] 52 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Open Software License ("OSL") v. 3.0 2 | 3 | This Open Software License (the "License") applies to any original work of 4 | authorship (the "Original Work") whose owner (the "Licensor") has placed the 5 | following licensing notice adjacent to the copyright notice for the Original 6 | Work: 7 | 8 | Licensed under the Open Software License version 3.0 9 | 10 | 1) Grant of Copyright License. Licensor grants You a worldwide, royalty-free, 11 | non-exclusive, sublicensable license, for the duration of the copyright, to do 12 | the following: 13 | 14 | a) to reproduce the Original Work in copies, either alone or as part of a 15 | collective work; 16 | 17 | b) to translate, adapt, alter, transform, modify, or arrange the Original 18 | Work, thereby creating derivative works ("Derivative Works") based upon the 19 | Original Work; 20 | 21 | c) to distribute or communicate copies of the Original Work and Derivative 22 | Works to the public, with the proviso that copies of Original Work or 23 | Derivative Works that You distribute or communicate shall be licensed under 24 | this Open Software License; 25 | 26 | d) to perform the Original Work publicly; and 27 | 28 | e) to display the Original Work publicly. 29 | 30 | 2) Grant of Patent License. Licensor grants You a worldwide, royalty-free, 31 | non-exclusive, sublicensable license, under patent claims owned or controlled 32 | by the Licensor that are embodied in the Original Work as furnished by the 33 | Licensor, for the duration of the patents, to make, use, sell, offer for sale, 34 | have made, and import the Original Work and Derivative Works. 35 | 36 | 3) Grant of Source Code License. The term "Source Code" means the preferred 37 | form of the Original Work for making modifications to it and all available 38 | documentation describing how to modify the Original Work. Licensor agrees to 39 | provide a machine-readable copy of the Source Code of the Original Work along 40 | with each copy of the Original Work that Licensor distributes. Licensor 41 | reserves the right to satisfy this obligation by placing a machine-readable 42 | copy of the Source Code in an information repository reasonably calculated to 43 | permit inexpensive and convenient access by You for as long as Licensor 44 | continues to distribute the Original Work. 45 | 46 | 4) Exclusions From License Grant. Neither the names of Licensor, nor the names 47 | of any contributors to the Original Work, nor any of their trademarks or 48 | service marks, may be used to endorse or promote products derived from this 49 | Original Work without express prior permission of the Licensor. Except as 50 | expressly stated herein, nothing in this License grants any license to 51 | Licensor's trademarks, copyrights, patents, trade secrets or any other 52 | intellectual property. No patent license is granted to make, use, sell, offer 53 | for sale, have made, or import embodiments of any patent claims other than the 54 | licensed claims defined in Section 2. No license is granted to the trademarks 55 | of Licensor even if such marks are included in the Original Work. Nothing in 56 | this License shall be interpreted to prohibit Licensor from licensing under 57 | terms different from this License any Original Work that Licensor otherwise 58 | would have a right to license. 59 | 60 | 5) External Deployment. The term "External Deployment" means the use, 61 | distribution, or communication of the Original Work or Derivative Works in any 62 | way such that the Original Work or Derivative Works may be used by anyone 63 | other than You, whether those works are distributed or communicated to those 64 | persons or made available as an application intended for use over a network. 65 | As an express condition for the grants of license hereunder, You must treat 66 | any External Deployment by You of the Original Work or a Derivative Work as a 67 | distribution under section 1(c). 68 | 69 | 6) Attribution Rights. You must retain, in the Source Code of any Derivative 70 | Works that You create, all copyright, patent, or trademark notices from the 71 | Source Code of the Original Work, as well as any notices of licensing and any 72 | descriptive text identified therein as an "Attribution Notice." You must cause 73 | the Source Code for any Derivative Works that You create to carry a prominent 74 | Attribution Notice reasonably calculated to inform recipients that You have 75 | modified the Original Work. 76 | 77 | 7) Warranty of Provenance and Disclaimer of Warranty. Licensor warrants that 78 | the copyright in and to the Original Work and the patent rights granted herein 79 | by Licensor are owned by the Licensor or are sublicensed to You under the 80 | terms of this License with the permission of the contributor(s) of those 81 | copyrights and patent rights. Except as expressly stated in the immediately 82 | preceding sentence, the Original Work is provided under this License on an "AS 83 | IS" BASIS and WITHOUT WARRANTY, either express or implied, including, without 84 | limitation, the warranties of non-infringement, merchantability or fitness for 85 | a particular purpose. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK 86 | IS WITH YOU. This DISCLAIMER OF WARRANTY constitutes an essential part of this 87 | License. No license to the Original Work is granted by this License except 88 | under this disclaimer. 89 | 90 | 8) Limitation of Liability. Under no circumstances and under no legal theory, 91 | whether in tort (including negligence), contract, or otherwise, shall the 92 | Licensor be liable to anyone for any indirect, special, incidental, or 93 | consequential damages of any character arising as a result of this License or 94 | the use of the Original Work including, without limitation, damages for loss 95 | of goodwill, work stoppage, computer failure or malfunction, or any and all 96 | other commercial damages or losses. This limitation of liability shall not 97 | apply to the extent applicable law prohibits such limitation. 98 | 99 | 9) Acceptance and Termination. If, at any time, You expressly assented to this 100 | License, that assent indicates your clear and irrevocable acceptance of this 101 | License and all of its terms and conditions. If You distribute or communicate 102 | copies of the Original Work or a Derivative Work, You must make a reasonable 103 | effort under the circumstances to obtain the express assent of recipients to 104 | the terms of this License. This License conditions your rights to undertake 105 | the activities listed in Section 1, including your right to create Derivative 106 | Works based upon the Original Work, and doing so without honoring these terms 107 | and conditions is prohibited by copyright law and international treaty. 108 | Nothing in this License is intended to affect copyright exceptions and 109 | limitations (including "fair use" or "fair dealing"). This License shall 110 | terminate immediately and You may no longer exercise any of the rights granted 111 | to You by this License upon your failure to honor the conditions in Section 112 | 1(c). 113 | 114 | 10) Termination for Patent Action. This License shall terminate automatically 115 | and You may no longer exercise any of the rights granted to You by this 116 | License as of the date You commence an action, including a cross-claim or 117 | counterclaim, against Licensor or any licensee alleging that the Original Work 118 | infringes a patent. This termination provision shall not apply for an action 119 | alleging patent infringement by combinations of the Original Work with other 120 | software or hardware. 121 | 122 | 11) Jurisdiction, Venue and Governing Law. Any action or suit relating to this 123 | License may be brought only in the courts of a jurisdiction wherein the 124 | Licensor resides or in which Licensor conducts its primary business, and under 125 | the laws of that jurisdiction excluding its conflict-of-law provisions. The 126 | application of the United Nations Convention on Contracts for the 127 | International Sale of Goods is expressly excluded. Any use of the Original 128 | Work outside the scope of this License or after its termination shall be 129 | subject to the requirements and penalties of copyright or patent law in the 130 | appropriate jurisdiction. This section shall survive the termination of this 131 | License. 132 | 133 | 12) Attorneys' Fees. In any action to enforce the terms of this License or 134 | seeking damages relating thereto, the prevailing party shall be entitled to 135 | recover its costs and expenses, including, without limitation, reasonable 136 | attorneys' fees and costs incurred in connection with such action, including 137 | any appeal of such action. This section shall survive the termination of this 138 | License. 139 | 140 | 13) Miscellaneous. If any provision of this License is held to be 141 | unenforceable, such provision shall be reformed only to the extent necessary 142 | to make it enforceable. 143 | 144 | 14) Definition of "You" in This License. "You" throughout this License, 145 | whether in upper or lower case, means an individual or a legal entity 146 | exercising rights under, and complying with all of the terms of, this License. 147 | For legal entities, "You" includes any entity that controls, is controlled by, 148 | or is under common control with you. For purposes of this definition, 149 | "control" means (i) the power, direct or indirect, to cause the direction or 150 | management of such entity, whether by contract or otherwise, or (ii) ownership 151 | of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial 152 | ownership of such entity. 153 | 154 | 15) Right to Use. You may use the Original Work in all ways not otherwise 155 | restricted or conditioned by this License or by law, and Licensor promises not 156 | to interfere with or be responsible for such uses by You. 157 | 158 | 16) Modification of This License. This License is Copyright © 2005 Lawrence 159 | Rosen. Permission is granted to copy, distribute, or communicate this License 160 | without modification. Nothing in this License permits You to modify this 161 | License as applied to the Original Work or to Derivative Works. However, You 162 | may modify the text of this License and copy, distribute or communicate your 163 | modified version (the "Modified License") and apply it to other original works 164 | of authorship subject to the following conditions: (i) You may not indicate in 165 | any way that your Modified License is the "Open Software License" or "OSL" and 166 | you may not use those names in the name of your Modified License; (ii) You 167 | must replace the notice specified in the first paragraph above with the notice 168 | "Licensed under " or with a notice of your own 169 | that is not confusingly similar to the notice in this License; and (iii) You 170 | may not claim that your original works are open source software unless your 171 | Modified License has been approved by Open Source Initiative (OSI) and You 172 | comply with its license review and certification process. 173 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Highlights 2 | 3 | Highlights is a simple but flexible highlighting bot for Discord. Add a keyword to get notified in direct messages when your keyword appears in that server. 4 | 5 | You can add highlights to your server directly with [this link](https://discord.com/api/oauth2/authorize?client_id=740802975576096829&scope=bot+applications.commands). If you run into any problems, please [make an issue here](https://github.com/ThatsNoMoon/highlights/issues/new?template=bug_report.md) or let me know on [the Highlights dev server](https://discord.gg/9phBJ9tzQ2), `@ThatsNoMoon#0175`. 6 | 7 | ## Features 8 | - Add keywords to be notified about, per-server or per-channel 9 | - Ignore phrases to make your keywords more specific 10 | - Mute channels to filter out noise 11 | - Block obnoxious users 12 | 13 | For self-hosters, highlights includes: 14 | - PostgreSQL and SQLite support 15 | - Automatic SQLite backups and backup pruning 16 | - Error reporting via [Discord webhook](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks) 17 | - Performance monitoring and observability with [Jaeger](https://jaegertracing.io/) 18 | - Feature flags for smaller custom builds 19 | 20 | ## Docker 21 | You can find a Dockerfile in this repository, or use [`thatsnomoon/highlights`](https://hub.docker.com/r/thatsnomoon/highlights). Also provided is a `docker-compose.yml` that will organize Highlights, a Jaeger agent, collector, and query server, and Cassandra, and should set up Cassandra to accept Jaeger logs. 22 | 23 | ### AArch64, other alternate architectures 24 | 25 | The Dockerfile provided supports building to any architecture supported by both Rust and [musl.cc](https://musl.cc). I build `thatsnomoon/highlights` for AArch64 alongside x86_64; if you need a different architecture, use `docker buildx build` with `--platform=linux/` and provide appropriate values for the following build args: 26 | - `--build-arg RUSTTARGET=` (ex: `aarch64-unknown-linux-musl`) 27 | - `--build-arg MUSLHOST=` (ex: `x86_64-linux-musl`; see [supported musl.cc hosts](https://more.musl.cc/10.2.1)) 28 | - `--build-arg MUSLTARGET=` (ex: `aarch64-linux-musl`; for x86_64, see [supported musl.cc targets here](https://more.musl.cc/10.2.1/x86_64-linux-musl)) 29 | 30 | ## Heroku 31 | [![Deploy on Heroku](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy?template=https://github.com/ThatsNoMoon/highlights/tree/heroku) 32 | 33 | ## Download 34 | You can find downloads for 64 bit Windows and Linux, as well as 64 bit Linux ARM (for e.g. Raspberry Pi) on [the releases page](https://github.com/ThatsNoMoon/highlights/releases/). 35 | 36 | ## Building 37 | Highlights requires `cargo` to be built. [rustup](https://rustup.rs) is the recommended installation method for most use-cases. 38 | 39 | Once you have `cargo` installed, run `cargo build --release` (or `cargo build` for an unoptimized build) to produce an executable at `target/release/highlights` (or `target/debug/highlights`). 40 | 41 | If you're contributing to highlights, I recommend moving the `pre-commit` file to `.git/hooks` so your code is checked for issues before committing (avoiding the need for commits to fix `rustfmt` or `clippy` errors). 42 | 43 | ## Configuration 44 | 45 | Highlights is configured using a TOML file at `./config.toml` by default. To use a different path, set the `HIGHLIGHTS_CONFIG` environment variable. The default config with documentation is provided [here](example_config.toml). All options can be set using environment variables using this format: `HIGHLIGHTS_SECTION_PROPERTY`. Examples: 46 | ``` 47 | HIGHLIGHTS_BOT_TOKEN="your bot token goes here" 48 | HIGHLIGHTS_BOT_APPLICATIONID="your discord application id (not bot token) here" 49 | HIGHLIGHTS_DATABASE_PATH="highlights_data" 50 | ``` 51 | As in the above example, underscores in property names should be removed so that they aren't interpreted as section separators. 52 | 53 | ## Backups 54 | 55 | Unless backups are disabled in the config, highlights automatically backs up its database every time it starts, and every 24hrs after that. These backups are saved to the `./backups` folder in the configured database path. These backups are a full snapshot of the database, so to restore one you can just move it back to the database path and rename it to `data.db`. Highlights doesn't delete any backups from the last 24hrs, but it does clean up older backups automatically: 56 | - Seven daily backups are kept 57 | - Four weekly backups are kept 58 | - Twelve monthly backups are kept 59 | - Indefinite yearly backups are kept 60 | 61 | Note that highlights will always keep up to these numbers of backups. For example, even if there are not seven backups from the last week, the seven most recent backups made at least a day apart will be saved; likewise, the next four most recent backups made at least a week apart will be saved, and so on. 62 | 63 | Highlights uses the timestamp embedded in the backup name to determine how old it is, so don't mess with the file names (it'll log a warning about any files it doesn't recognize). 64 | 65 | ## Monitoring 66 | 67 | If you set the `logging.jaeger` config option, highlights will trace execution times to be reported by [Jaeger](https://jaegertracing.io/). The address should be in the form `address:port`, e.g. `127.0.0.1:6831`. You should provide the address of your Jaeger agent. 68 | 69 | ## License 70 | 71 | Highlights is licensed under the [OSL 3.0](https://choosealicense.com/licenses/osl-3.0/). Derivatives must be licensed under OSL 3.0, but this does not include any linking restrictions; you may link this code to closed-source code. 72 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | highlights: 3 | image: thatsnomoon/highlights:2 4 | networks: 5 | - net 6 | secrets: 7 | - highlights-config 8 | environment: 9 | HIGHLIGHTS_CONFIG: "/run/secrets/highlights-config" 10 | HIGHLIGHTS_LOGGING.JAEGER: "jaeger-agent:6831" 11 | volumes: 12 | - "highlights-data:/opt/highlights/data" 13 | depends_on: 14 | jaeger-agent: 15 | condition: service_started 16 | restart: unless-stopped 17 | 18 | jaeger-agent: 19 | image: jaegertracing/jaeger-agent:1.34 20 | networks: 21 | - net 22 | command: "--reporter.grpc.host-port=jaeger-collector:14250" 23 | depends_on: 24 | jaeger-collector: 25 | condition: service_started 26 | restart: unless-stopped 27 | 28 | jaeger-collector: 29 | image: jaegertracing/jaeger-collector:1.34 30 | networks: 31 | - net 32 | environment: 33 | SPAN_STORAGE_TYPE: "cassandra" 34 | CASSANDRA_SERVERS: "cassandra" 35 | CASSANDRA_KEYSPACE: "jaeger_v1_dc1" 36 | depends_on: 37 | cassandra-setup: 38 | condition: service_completed_successfully 39 | restart: unless-stopped 40 | 41 | jaeger-query: 42 | image: jaegertracing/jaeger-query:1.34 43 | networks: 44 | - net 45 | ports: 46 | - "16686:16686" 47 | environment: 48 | SPAN_STORAGE_TYPE: "cassandra" 49 | CASSANDRA_SERVERS: "cassandra" 50 | CASSANDRA_KEYSPACE: "jaeger_v1_dc1" 51 | depends_on: 52 | cassandra-setup: 53 | condition: service_completed_successfully 54 | restart: unless-stopped 55 | 56 | cassandra-setup: 57 | image: thatsnomoon/highlights-jaeger-cassandra-setup:1.0.0 58 | networks: 59 | - net 60 | environment: 61 | MODE: "prod" 62 | DATACENTER: "dc1" 63 | REPLICATION_FACTOR: 1 64 | depends_on: 65 | cassandra: 66 | condition: service_healthy 67 | 68 | cassandra: 69 | image: cassandra:4.0 70 | networks: 71 | - net 72 | volumes: 73 | - "cassandra-data:/opt/cassandra/data" 74 | healthcheck: 75 | test: ["CMD", "cqlsh", "-e", "show version;"] 76 | interval: 30s 77 | timeout: 10s 78 | restart: unless-stopped 79 | 80 | 81 | secrets: 82 | highlights-config: 83 | file: "./config.toml" 84 | 85 | networks: 86 | net: {} 87 | 88 | volumes: 89 | highlights-data: 90 | cassandra-data: 91 | -------------------------------------------------------------------------------- /example_config.toml: -------------------------------------------------------------------------------- 1 | # All fields other than bot.token and bot.application_id are optional 2 | # Defaults shown below 3 | 4 | [bot] 5 | # Discord bot token 6 | token = "..." 7 | # Discord application (not bot account!) ID 8 | application_id = "..." 9 | # Disables sharing the bot invite link in about command 10 | private = false 11 | 12 | [behavior] 13 | # Maximum amount of keywords users can subscribe to 14 | max_keywords = 100 15 | # Amount of time to wait for activity before sending a notification 16 | # Other examples: "1m 30sec", "5minutes" 17 | # See https://docs.rs/humantime/latest/humantime/fn.parse_duration.html for complete list 18 | patience = "2min" 19 | # Amount of time to leave notifications visible 20 | # This uses the same format as patience 21 | # Other examples: "1y", "90d", "1M" (one month) 22 | #notification_lifetime = "1month" 23 | 24 | [logging] 25 | # Discord webhook to send errors and panics to 26 | #webhook = "..." 27 | # IP and port to Jaeger agent 28 | #jaeger = "..." 29 | # Ratio of traces to sample when monitoring with Jaeger, between 0.0 and 1.0 30 | sample_ratio = 1.0 31 | # Default log level for entire application 32 | level = "WARN" 33 | # Whether or not to use ANSI color codes (may not work on Windows) 34 | color = true 35 | # Format of standard output logging (can be "compact", "pretty", or "json") 36 | format = "compact" 37 | [logging.filters] 38 | # The `highlights` crate will log at INFO instead 39 | highlights = "INFO" 40 | 41 | [database] 42 | # The folder where the SQLite database and backups are stored 43 | path = "./data" 44 | # Database connection URL for use with PostgreSQL (or SQLite, if desired. 45 | # Backups cannot be done using an SQLite URL.). 46 | #url = "postgres://username:password@host:port/database" 47 | # Whether to automatically backup SQLite database 48 | backup = true 49 | -------------------------------------------------------------------------------- /jaeger-db-setup/APACHE_LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /jaeger-db-setup/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM cassandra:4.0 2 | 3 | WORKDIR /scripts 4 | 5 | COPY ["create.sh", "v004.cql.tmpl", "./"] 6 | 7 | CMD ["/scripts/create.sh"] 8 | -------------------------------------------------------------------------------- /jaeger-db-setup/README.md: -------------------------------------------------------------------------------- 1 | ## Jaeger DB Setup Image 2 | This is a simple Dockerfile for an image to set up a Cassandra 4.0 database for Jaeger. 3 | 4 | You may be able to use [`jaegertracing/jaeger-cassandra-schema`](https://hub.docker.com/r/jaegertracing/jaeger-cassandra-schema), but I had problems using it with Cassandra 4.0, and problems using Cassandra 3.11 with Jaeger. 5 | 6 | ### License 7 | The scripts in this folder, `create.sh` and `v004.cql.tmpl`, are adapted from those in [the Jaeger repository](https://github.com/jaegertracing/jaeger/tree/4f9f7dfa7ea3e5346679a1a045a6cd346a061870/plugin/storage/cassandra/schema), and are thus licensed under the Apache 2.0 license. 8 | -------------------------------------------------------------------------------- /jaeger-db-setup/create.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2021 Jaeger authors, 2022 ThatsNoMoon 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | function usage { 18 | >&2 echo "Error: $1" 19 | >&2 echo "" 20 | >&2 echo "The following parameters can be set via environment:" 21 | >&2 echo " MODE - prod or test. Test keyspace is usable on a single node cluster (no replication)" 22 | >&2 echo " CASSANDRA - hostname of cassandra (default: cassandra)" 23 | >&2 echo " DATACENTER - datacenter name for network topology used in prod (optional in MODE=test)" 24 | >&2 echo " TRACE_TTL - time to live for trace data, in seconds (default: 172800, 2 days)" 25 | >&2 echo " DEPENDENCIES_TTL - time to live for dependencies data, in seconds (default: 0, no TTL)" 26 | >&2 echo " KEYSPACE - keyspace (default: jaeger_v1_{datacenter})" 27 | >&2 echo " REPLICATION_FACTOR - replication factor for prod (default: 2 for prod, 1 for test)" 28 | exit 1 29 | } 30 | 31 | trace_ttl=${TRACE_TTL:-172800} 32 | dependencies_ttl=${DEPENDENCIES_TTL:-0} 33 | cassandra=${CASSANDRA:-cassandra} 34 | 35 | template=${1:-v004.cql.tmpl} 36 | 37 | if [[ "$MODE" == "" ]]; then 38 | usage "missing MODE parameter" 39 | elif [[ "$MODE" == "prod" ]]; then 40 | if [[ "$DATACENTER" == "" ]]; then usage "missing DATACENTER parameter for prod mode"; fi 41 | datacenter=$DATACENTER 42 | replication_factor=${REPLICATION_FACTOR:-2} 43 | elif [[ "$MODE" == "test" ]]; then 44 | datacenter=${DATACENTER:-'test'} 45 | replication_factor=${REPLICATION_FACTOR:-1} 46 | else 47 | usage "invalid MODE=$MODE, expecting 'prod' or 'test'" 48 | fi 49 | 50 | replication="{'class': 'SimpleStrategy', 'replication_factor': '${replication_factor}'}" 51 | keyspace=${KEYSPACE:-"jaeger_v1_${datacenter}"} 52 | 53 | if [[ $keyspace =~ [^a-zA-Z0-9_] ]]; then 54 | usage "invalid characters in KEYSPACE=$keyspace parameter, please use letters, digits or underscores" 55 | fi 56 | 57 | if cqlsh -e "use $keyspace;" $cassandra 2> /dev/null; then 58 | echo "$keyspace already exists. nothing to do." 59 | exit 0 60 | fi 61 | 62 | >&2 cat <>, 52 | ); 53 | 54 | CREATE TYPE IF NOT EXISTS ${keyspace}.span_ref ( 55 | ref_type text, 56 | trace_id blob, 57 | span_id bigint, 58 | ); 59 | 60 | CREATE TYPE IF NOT EXISTS ${keyspace}.process ( 61 | service_name text, 62 | tags list>, 63 | ); 64 | 65 | -- Notice we have span_hash. This exists only for zipkin backwards compat. Zipkin allows spans with the same ID. 66 | -- Note: Cassandra re-orders non-PK columns alphabetically, so the table looks differently in CQLSH "describe table". 67 | -- start_time is bigint instead of timestamp as we require microsecond precision 68 | CREATE TABLE IF NOT EXISTS ${keyspace}.traces ( 69 | trace_id blob, 70 | span_id bigint, 71 | span_hash bigint, 72 | parent_id bigint, 73 | operation_name text, 74 | flags int, 75 | start_time bigint, // microseconds since epoch 76 | duration bigint, // microseconds 77 | tags list>, 78 | logs list>, 79 | refs list>, 80 | process frozen, 81 | PRIMARY KEY (trace_id, span_id, span_hash) 82 | ) 83 | WITH compaction = { 84 | 'compaction_window_size': '1', 85 | 'compaction_window_unit': 'HOURS', 86 | 'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy' 87 | } 88 | AND default_time_to_live = ${trace_ttl} 89 | AND speculative_retry = 'NONE' 90 | AND gc_grace_seconds = 10800; -- 3 hours of downtime acceptable on nodes 91 | 92 | CREATE TABLE IF NOT EXISTS ${keyspace}.service_names ( 93 | service_name text, 94 | PRIMARY KEY (service_name) 95 | ) 96 | WITH compaction = { 97 | 'min_threshold': '4', 98 | 'max_threshold': '32', 99 | 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' 100 | } 101 | AND default_time_to_live = ${trace_ttl} 102 | AND speculative_retry = 'NONE' 103 | AND gc_grace_seconds = 10800; -- 3 hours of downtime acceptable on nodes 104 | 105 | CREATE TABLE IF NOT EXISTS ${keyspace}.operation_names_v2 ( 106 | service_name text, 107 | span_kind text, 108 | operation_name text, 109 | PRIMARY KEY ((service_name), span_kind, operation_name) 110 | ) 111 | WITH compaction = { 112 | 'min_threshold': '4', 113 | 'max_threshold': '32', 114 | 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' 115 | } 116 | AND default_time_to_live = ${trace_ttl} 117 | AND speculative_retry = 'NONE' 118 | AND gc_grace_seconds = 10800; -- 3 hours of downtime acceptable on nodes 119 | 120 | -- index of trace IDs by service + operation names, sorted by span start_time. 121 | CREATE TABLE IF NOT EXISTS ${keyspace}.service_operation_index ( 122 | service_name text, 123 | operation_name text, 124 | start_time bigint, // microseconds since epoch 125 | trace_id blob, 126 | PRIMARY KEY ((service_name, operation_name), start_time) 127 | ) WITH CLUSTERING ORDER BY (start_time DESC) 128 | AND compaction = { 129 | 'compaction_window_size': '1', 130 | 'compaction_window_unit': 'HOURS', 131 | 'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy' 132 | } 133 | AND default_time_to_live = ${trace_ttl} 134 | AND speculative_retry = 'NONE' 135 | AND gc_grace_seconds = 10800; -- 3 hours of downtime acceptable on nodes 136 | 137 | CREATE TABLE IF NOT EXISTS ${keyspace}.service_name_index ( 138 | service_name text, 139 | bucket int, 140 | start_time bigint, // microseconds since epoch 141 | trace_id blob, 142 | PRIMARY KEY ((service_name, bucket), start_time) 143 | ) WITH CLUSTERING ORDER BY (start_time DESC) 144 | AND compaction = { 145 | 'compaction_window_size': '1', 146 | 'compaction_window_unit': 'HOURS', 147 | 'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy' 148 | } 149 | AND default_time_to_live = ${trace_ttl} 150 | AND speculative_retry = 'NONE' 151 | AND gc_grace_seconds = 10800; -- 3 hours of downtime acceptable on nodes 152 | 153 | CREATE TABLE IF NOT EXISTS ${keyspace}.duration_index ( 154 | service_name text, // service name 155 | operation_name text, // operation name, or blank for queries without span name 156 | bucket timestamp, // time bucket, - the start_time of the given span rounded to an hour 157 | duration bigint, // span duration, in microseconds 158 | start_time bigint, // microseconds since epoch 159 | trace_id blob, 160 | PRIMARY KEY ((service_name, operation_name, bucket), duration, start_time, trace_id) 161 | ) WITH CLUSTERING ORDER BY (duration DESC, start_time DESC) 162 | AND compaction = { 163 | 'compaction_window_size': '1', 164 | 'compaction_window_unit': 'HOURS', 165 | 'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy' 166 | } 167 | AND default_time_to_live = ${trace_ttl} 168 | AND speculative_retry = 'NONE' 169 | AND gc_grace_seconds = 10800; -- 3 hours of downtime acceptable on nodes 170 | 171 | -- a bucketing strategy may have to be added for tag queries 172 | -- we can make this table even better by adding a timestamp to it 173 | CREATE TABLE IF NOT EXISTS ${keyspace}.tag_index ( 174 | service_name text, 175 | tag_key text, 176 | tag_value text, 177 | start_time bigint, // microseconds since epoch 178 | trace_id blob, 179 | span_id bigint, 180 | PRIMARY KEY ((service_name, tag_key, tag_value), start_time, trace_id, span_id) 181 | ) 182 | WITH CLUSTERING ORDER BY (start_time DESC) 183 | AND compaction = { 184 | 'compaction_window_size': '1', 185 | 'compaction_window_unit': 'HOURS', 186 | 'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy' 187 | } 188 | AND default_time_to_live = ${trace_ttl} 189 | AND speculative_retry = 'NONE' 190 | AND gc_grace_seconds = 10800; -- 3 hours of downtime acceptable on nodes 191 | 192 | CREATE TYPE IF NOT EXISTS ${keyspace}.dependency ( 193 | parent text, 194 | child text, 195 | call_count bigint, 196 | source text, 197 | ); 198 | 199 | -- compaction strategy is intentionally different as compared to other tables due to the size of dependencies data 200 | CREATE TABLE IF NOT EXISTS ${keyspace}.dependencies_v2 ( 201 | ts_bucket timestamp, 202 | ts timestamp, 203 | dependencies list>, 204 | PRIMARY KEY (ts_bucket, ts) 205 | ) WITH CLUSTERING ORDER BY (ts DESC) 206 | AND compaction = { 207 | 'min_threshold': '4', 208 | 'max_threshold': '32', 209 | 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' 210 | } 211 | AND default_time_to_live = ${dependencies_ttl}; 212 | 213 | -- adaptive sampling tables 214 | -- ./plugin/storage/cassandra/samplingstore/storage.go 215 | CREATE TABLE IF NOT EXISTS ${keyspace}.operation_throughput ( 216 | bucket int, 217 | ts timeuuid, 218 | throughput text, 219 | PRIMARY KEY(bucket, ts) 220 | ) WITH CLUSTERING ORDER BY (ts desc); 221 | 222 | CREATE TABLE IF NOT EXISTS ${keyspace}.sampling_probabilities ( 223 | bucket int, 224 | ts timeuuid, 225 | hostname text, 226 | probabilities text, 227 | PRIMARY KEY(bucket, ts) 228 | ) WITH CLUSTERING ORDER BY (ts desc); 229 | 230 | -- distributed lock 231 | -- ./plugin/pkg/distributedlock/cassandra/lock.go 232 | CREATE TABLE IF NOT EXISTS ${keyspace}.leases ( 233 | name text, 234 | owner text, 235 | PRIMARY KEY (name) 236 | ); 237 | -------------------------------------------------------------------------------- /pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # remove changes that won't be in the commit as applied 4 | git stash push -q --include-untracked --keep-index 5 | 6 | cargo fmt -- --check 7 | FMT_RESULT=$? 8 | 9 | cargo clippy -- --deny warnings 10 | CLIPPY_RESULT=$? 11 | 12 | if [[ "$FMT_RESULT" -eq 0 && "$CLIPPY_RESULT" -eq 0 ]]; then 13 | git stash pop -q 14 | else 15 | exit 1 16 | fi 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | hard_tabs = true 2 | newline_style = "Unix" 3 | max_width = 80 4 | group_imports = "StdExternalCrate" 5 | imports_granularity = "Crate" 6 | use_field_init_shorthand = true 7 | -------------------------------------------------------------------------------- /src/bot/commands/blocks.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Commands for adding, removing, and listing blocked users. 5 | 6 | use anyhow::{Context as _, Result}; 7 | use serenity::{ 8 | client::Context, 9 | model::application::interaction::application_command::ApplicationCommandInteraction as Command, 10 | }; 11 | 12 | use crate::{bot::util::respond_eph, db::Block}; 13 | 14 | /// Block a user. 15 | /// 16 | /// Usage: `/block ` 17 | #[tracing::instrument( 18 | skip_all, 19 | fields( 20 | user_id = %command.user.id, 21 | channel_id = %command.channel_id, 22 | command = %command.data.name, 23 | ) 24 | )] 25 | pub(crate) async fn block(ctx: Context, mut command: Command) -> Result<()> { 26 | check_opt_out!(ctx, command); 27 | 28 | let user = command 29 | .data 30 | .resolved 31 | .users 32 | .drain() 33 | .next() 34 | .map(|(_, user)| user) 35 | .context("User to block not provided")?; 36 | 37 | if user.id == command.user.id { 38 | return respond_eph(&ctx, &command, "❌ You can't block yourself!") 39 | .await; 40 | } 41 | 42 | let block = Block { 43 | user_id: command.user.id, 44 | blocked_id: user.id, 45 | }; 46 | 47 | if block.clone().exists().await? { 48 | respond_eph( 49 | &ctx, 50 | &command, 51 | format!("❌ You already blocked <@{}>!", user.id), 52 | ) 53 | .await 54 | } else { 55 | block.insert().await?; 56 | respond_eph(&ctx, &command, format!("✅ Blocked <@{}>", user.id)).await 57 | } 58 | } 59 | 60 | /// Unblock a user. 61 | /// 62 | /// Usage: `/unblock ` 63 | #[tracing::instrument( 64 | skip_all, 65 | fields( 66 | user_id = %command.user.id, 67 | channel_id = %command.channel_id, 68 | command = %command.data.name, 69 | ) 70 | )] 71 | pub(crate) async fn unblock(ctx: Context, mut command: Command) -> Result<()> { 72 | check_opt_out!(ctx, command); 73 | 74 | let user = command 75 | .data 76 | .resolved 77 | .users 78 | .drain() 79 | .next() 80 | .map(|(_, user)| user) 81 | .context("User to unblock not provided")?; 82 | 83 | if user.id == command.user.id { 84 | return respond_eph(&ctx, &command, "❌ You can't unblock yourself!") 85 | .await; 86 | } 87 | 88 | let block = Block { 89 | user_id: command.user.id, 90 | blocked_id: user.id, 91 | }; 92 | 93 | if !block.clone().exists().await? { 94 | respond_eph( 95 | &ctx, 96 | &command, 97 | format!("❌ You haven't blocked <@{}>!", user.id), 98 | ) 99 | .await 100 | } else { 101 | block.delete().await?; 102 | respond_eph(&ctx, &command, format!("✅ Unblocked <@{}>", user.id)) 103 | .await 104 | } 105 | } 106 | 107 | /// Lists blocked users. 108 | /// 109 | /// Usage: `/blocks` 110 | #[tracing::instrument( 111 | skip_all, 112 | fields( 113 | user_id = %command.user.id, 114 | channel_id = %command.channel_id, 115 | command = %command.data.name, 116 | ) 117 | )] 118 | pub(crate) async fn blocks(ctx: Context, command: Command) -> Result<()> { 119 | check_opt_out!(ctx, command); 120 | 121 | let blocks = Block::user_blocks(command.user.id) 122 | .await? 123 | .into_iter() 124 | .map(|block| format!("<@{}>", block.blocked_id)) 125 | .collect::>(); 126 | 127 | if blocks.is_empty() { 128 | respond_eph(&ctx, &command, "You haven't blocked any users!").await 129 | } else { 130 | let msg = format!( 131 | "{}'s blocked users:\n - {}", 132 | command.user.name, 133 | blocks.join("\n - ") 134 | ); 135 | 136 | respond_eph(&ctx, &command, msg).await 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /src/bot/commands/keywords.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Commands for adding, removing, and listing keywords. 5 | 6 | use std::{collections::HashMap, fmt::Write}; 7 | 8 | use anyhow::{Context as _, Result}; 9 | use futures_util::{stream::FuturesUnordered, TryStreamExt}; 10 | use indoc::indoc; 11 | use lazy_regex::regex; 12 | use once_cell::sync::Lazy; 13 | use serenity::{ 14 | client::Context, 15 | http::error::ErrorResponse, 16 | model::{ 17 | application::interaction::application_command::ApplicationCommandInteraction as Command, 18 | channel::{Channel, ChannelType, GuildChannel}, 19 | id::{ChannelId, GuildId}, 20 | }, 21 | prelude::HttpError, 22 | Error as SerenityError, 23 | }; 24 | 25 | use super::util::get_text_channels_in_guild; 26 | use crate::{ 27 | bot::{ 28 | highlighting::warn_for_failed_dm, 29 | util::{respond_eph, success, user_can_read_channel}, 30 | }, 31 | db::{Ignore, Keyword, KeywordKind}, 32 | settings::settings, 33 | }; 34 | 35 | /// Add a keyword. 36 | /// 37 | /// Usage: `/add [channel]` 38 | #[tracing::instrument( 39 | skip_all, 40 | fields( 41 | user_id = %command.user.id, 42 | channel_id = %command.channel_id, 43 | command = %command.data.name, 44 | ) 45 | )] 46 | pub(crate) async fn add(ctx: Context, command: Command) -> Result<()> { 47 | check_opt_out!(ctx, command); 48 | let guild_id = require_guild!(&ctx, &command); 49 | let user_id = command.user.id; 50 | 51 | let keyword_count = Keyword::user_keyword_count(user_id).await?; 52 | 53 | if keyword_count >= settings().behavior.max_keywords as u64 { 54 | static MSG: Lazy String> = Lazy::new(|| { 55 | format!( 56 | "You can't create more than {} keywords!", 57 | settings().behavior.max_keywords 58 | ) 59 | }); 60 | 61 | return respond_eph(&ctx, &command, MSG.as_str()).await; 62 | } 63 | 64 | let keyword = command 65 | .data 66 | .options 67 | .get(0) 68 | .and_then(|o| o.value.as_ref()) 69 | .context("No keyword to add provided")? 70 | .as_str() 71 | .context("Keyword provided was not a string")? 72 | .trim() 73 | .to_lowercase(); 74 | 75 | if keyword.len() < 3 { 76 | return respond_eph( 77 | &ctx, 78 | &command, 79 | "❌ You can't highlight keywords shorter than 3 characters!", 80 | ) 81 | .await; 82 | } 83 | 84 | if !is_valid_keyword(&keyword) { 85 | return respond_eph(&ctx, &command, "❌ You can't add that keyword!") 86 | .await; 87 | } 88 | 89 | let keyword = match command.data.resolved.channels.values().next() { 90 | Some(channel) => { 91 | let channel = match ctx.cache.guild_channel(channel.id) { 92 | Some(c) if c.kind == ChannelType::Text => c, 93 | _ => { 94 | return respond_eph( 95 | &ctx, 96 | &command, 97 | "❌ Please provide a text channel!", 98 | ) 99 | .await 100 | } 101 | }; 102 | 103 | let self_id = ctx.cache.current_user_id(); 104 | match user_can_read_channel(&ctx, &channel, self_id).await { 105 | Ok(Some(true)) => Keyword { 106 | keyword, 107 | user_id, 108 | kind: KeywordKind::Channel(channel.id), 109 | }, 110 | Ok(Some(false)) => { 111 | return respond_eph( 112 | &ctx, 113 | &command, 114 | format!("❌ I can't read <#{}>!", channel.id), 115 | ) 116 | .await 117 | } 118 | Ok(None) => return Err(anyhow::anyhow!( 119 | "Self permissions not found in channel {} in guild {}", 120 | channel.id, 121 | guild_id 122 | )), 123 | Err(e) => return Err(e.context( 124 | "Failed to check for self permissions to read muted channel", 125 | )), 126 | } 127 | } 128 | None => Keyword { 129 | keyword, 130 | user_id, 131 | kind: KeywordKind::Guild(guild_id), 132 | }, 133 | }; 134 | 135 | if keyword.clone().exists().await? { 136 | return respond_eph( 137 | &ctx, 138 | &command, 139 | "❌ You already added that keyword!", 140 | ) 141 | .await; 142 | } 143 | 144 | keyword.insert().await?; 145 | 146 | success(&ctx, &command).await?; 147 | 148 | if keyword_count == 0 { 149 | let dm_channel = command.user.create_dm_channel(&ctx).await?; 150 | 151 | match dm_channel 152 | .say( 153 | &ctx, 154 | indoc!( 155 | " 156 | Test message; if you can read this, \ 157 | I can send you notifications successfully!" 158 | ), 159 | ) 160 | .await 161 | { 162 | Err(SerenityError::Http(err)) => match &*err { 163 | HttpError::UnsuccessfulRequest(ErrorResponse { 164 | error, .. 165 | }) if error.message == "Cannot send messages to this user" => { 166 | warn_for_failed_dm(&ctx, &command).await?; 167 | } 168 | 169 | _ => return Err(SerenityError::Http(err).into()), 170 | }, 171 | Err(err) => return Err(err.into()), 172 | _ => {} 173 | } 174 | } 175 | 176 | Ok(()) 177 | } 178 | 179 | fn is_valid_keyword(keyword: &str) -> bool { 180 | !regex!(r"<([@#&]|a?:)").is_match(keyword) 181 | } 182 | 183 | /// Remove a keyword. 184 | /// 185 | /// Usage: `/remove [channel]` 186 | #[tracing::instrument( 187 | skip_all, 188 | fields( 189 | user_id = %command.user.id, 190 | channel_id = %command.channel_id, 191 | command = %command.data.name, 192 | ) 193 | )] 194 | pub(crate) async fn remove(ctx: Context, command: Command) -> Result<()> { 195 | check_opt_out!(ctx, command); 196 | let guild_id = require_guild!(&ctx, &command); 197 | let user_id = command.user.id; 198 | 199 | let keyword = command 200 | .data 201 | .options 202 | .get(0) 203 | .and_then(|o| o.value.as_ref()) 204 | .context("No keyword to add provided")? 205 | .as_str() 206 | .context("Keyword provided was not a string")? 207 | .trim() 208 | .to_lowercase(); 209 | 210 | let keyword = match command.data.resolved.channels.values().next() { 211 | Some(channel) => Keyword { 212 | keyword, 213 | user_id, 214 | kind: KeywordKind::Channel(channel.id), 215 | }, 216 | None => Keyword { 217 | keyword, 218 | user_id, 219 | kind: KeywordKind::Guild(guild_id), 220 | }, 221 | }; 222 | 223 | if !keyword.clone().exists().await? { 224 | return respond_eph( 225 | &ctx, 226 | &command, 227 | "❌ You haven't added that keyword!", 228 | ) 229 | .await; 230 | } 231 | 232 | keyword.delete().await?; 233 | 234 | success(&ctx, &command).await 235 | } 236 | 237 | /// Add an ignored phrase. 238 | /// 239 | /// Usage: `/ignore ` 240 | #[tracing::instrument( 241 | skip_all, 242 | fields( 243 | user_id = %command.user.id, 244 | channel_id = %command.channel_id, 245 | command = %command.data.name, 246 | ) 247 | )] 248 | pub(crate) async fn ignore(ctx: Context, command: Command) -> Result<()> { 249 | check_opt_out!(&ctx, command); 250 | let guild_id = require_guild!(&ctx, &command); 251 | 252 | let phrase = command 253 | .data 254 | .options 255 | .get(0) 256 | .and_then(|o| o.value.as_ref()) 257 | .context("No phrase to ignore provided")? 258 | .as_str() 259 | .context("Phrase provided not string")?; 260 | 261 | if phrase.len() < 3 { 262 | return respond_eph( 263 | &ctx, 264 | &command, 265 | "❌ You can't ignore phrases shorter than 3 characters!", 266 | ) 267 | .await; 268 | } 269 | 270 | let ignore = Ignore { 271 | user_id: command.user.id, 272 | guild_id, 273 | phrase: phrase.to_lowercase(), 274 | }; 275 | 276 | if ignore.clone().exists().await? { 277 | return respond_eph( 278 | &ctx, 279 | &command, 280 | "❌ You already ignored that phrase!", 281 | ) 282 | .await; 283 | } 284 | 285 | ignore.insert().await?; 286 | 287 | success(&ctx, &command).await 288 | } 289 | 290 | /// Remove an ignored phrase. 291 | /// 292 | /// Usage: `/unignore ` 293 | #[tracing::instrument( 294 | skip_all, 295 | fields( 296 | user_id = %command.user.id, 297 | channel_id = %command.channel_id, 298 | command = %command.data.name, 299 | ) 300 | )] 301 | pub(crate) async fn unignore(ctx: Context, command: Command) -> Result<()> { 302 | check_opt_out!(&ctx, command); 303 | let guild_id = require_guild!(&ctx, &command); 304 | 305 | let phrase = command 306 | .data 307 | .options 308 | .get(0) 309 | .and_then(|o| o.value.as_ref()) 310 | .context("No phrase to ignore provided")? 311 | .as_str() 312 | .context("Phrase provided not string")?; 313 | 314 | let ignore = Ignore { 315 | user_id: command.user.id, 316 | guild_id, 317 | phrase: phrase.to_lowercase(), 318 | }; 319 | 320 | if !ignore.clone().exists().await? { 321 | return respond_eph( 322 | &ctx, 323 | &command, 324 | "❌ You haven't ignored that phrase!", 325 | ) 326 | .await; 327 | } 328 | 329 | ignore.delete().await?; 330 | 331 | success(&ctx, &command).await 332 | } 333 | 334 | /// List ignored phrases in the current guild, or in all guilds when used in 335 | /// DMs. 336 | /// 337 | /// Usage: `/ignores` 338 | #[tracing::instrument( 339 | skip_all, 340 | fields( 341 | user_id = %command.user.id, 342 | channel_id = %command.channel_id, 343 | command = %command.data.name, 344 | ) 345 | )] 346 | pub(crate) async fn ignores(ctx: Context, command: Command) -> Result<()> { 347 | check_opt_out!(&ctx, command); 348 | 349 | match command.guild_id { 350 | Some(guild_id) => { 351 | let ignores = Ignore::user_guild_ignores(command.user.id, guild_id) 352 | .await? 353 | .into_iter() 354 | .map(|ignore| ignore.phrase) 355 | .collect::>(); 356 | 357 | if ignores.is_empty() { 358 | return respond_eph( 359 | &ctx, 360 | &command, 361 | "❌ You haven't ignored any phrases!", 362 | ) 363 | .await; 364 | } 365 | 366 | let guild_name = ctx 367 | .cache 368 | .guild_field(guild_id, |g| g.name.clone()) 369 | .context("Couldn't get guild to list ignores")?; 370 | 371 | let response = format!( 372 | "{}'s ignored phrases in {}:\n - {}", 373 | command.user.name, 374 | guild_name, 375 | ignores.join("\n - ") 376 | ); 377 | 378 | respond_eph(&ctx, &command, response).await 379 | } 380 | None => { 381 | let ignores = Ignore::user_ignores(command.user.id).await?; 382 | 383 | if ignores.is_empty() { 384 | return respond_eph( 385 | &ctx, 386 | &command, 387 | "❌ You haven't ignored any phrases!", 388 | ) 389 | .await; 390 | } 391 | 392 | let mut ignores_by_guild = HashMap::new(); 393 | 394 | for ignore in ignores { 395 | ignores_by_guild 396 | .entry(ignore.guild_id) 397 | .or_insert_with(Vec::new) 398 | .push(ignore.phrase); 399 | } 400 | 401 | let mut response = String::new(); 402 | 403 | for (guild_id, phrases) in ignores_by_guild { 404 | if !response.is_empty() { 405 | response.push_str("\n\n"); 406 | } 407 | 408 | let guild_name = ctx 409 | .cache 410 | .guild_field(guild_id, |g| g.name.clone()) 411 | .context("Couldn't get guild to list ignores")?; 412 | 413 | write!( 414 | &mut response, 415 | "Your ignored phrases in {}:\n – {}", 416 | guild_name, 417 | phrases.join("\n – ") 418 | ) 419 | .unwrap(); 420 | } 421 | 422 | respond_eph(&ctx, &command, response).await 423 | } 424 | } 425 | } 426 | 427 | /// Remove keywords and ignores in a guild by ID. 428 | /// 429 | /// Usage: `/remove-server ` 430 | #[tracing::instrument( 431 | skip_all, 432 | fields( 433 | user_id = %command.user.id, 434 | channel_id = %command.channel_id, 435 | command = %command.data.name, 436 | ) 437 | )] 438 | pub(crate) async fn remove_server( 439 | ctx: Context, 440 | command: Command, 441 | ) -> Result<()> { 442 | check_opt_out!(&ctx, command); 443 | 444 | let arg = command 445 | .data 446 | .options 447 | .get(0) 448 | .and_then(|o| o.value.as_ref()) 449 | .context("No guild ID to remove provided")? 450 | .as_str() 451 | .context("Guild ID to remove was not a string")?; 452 | 453 | let guild_id = match arg.parse() { 454 | Ok(id) => GuildId(id), 455 | Err(_) => { 456 | return respond_eph(&ctx, &command, "❌ Invalid server ID!").await 457 | } 458 | }; 459 | 460 | let channels: Option> = 461 | ctx.cache.guild_field(guild_id, |g| { 462 | g.channels 463 | .iter() 464 | .filter(|(_, channel)| { 465 | matches!( 466 | channel, 467 | Channel::Guild(GuildChannel { 468 | kind: ChannelType::Text, 469 | .. 470 | }) 471 | ) 472 | }) 473 | .map(|(&id, _)| id) 474 | .collect() 475 | }); 476 | 477 | let guild_keywords_deleted = 478 | Keyword::delete_in_guild(command.user.id, guild_id).await?; 479 | 480 | let ignores_deleted = 481 | Ignore::delete_in_guild(command.user.id, guild_id).await?; 482 | 483 | let channel_keywords_deleted = match channels { 484 | Some(channels) => { 485 | let futures: FuturesUnordered<_> = channels 486 | .into_iter() 487 | .map(|channel| { 488 | Keyword::delete_in_channel(command.user.id, channel) 489 | }) 490 | .collect(); 491 | 492 | futures 493 | .try_fold(0, |acc, n| async move { Ok(acc + n) }) 494 | .await? 495 | } 496 | None => 0, 497 | }; 498 | 499 | if guild_keywords_deleted + ignores_deleted + channel_keywords_deleted == 0 500 | { 501 | respond_eph( 502 | &ctx, 503 | &command, 504 | "❌ You didn't have any keywords or ignores in that server!", 505 | ) 506 | .await 507 | } else { 508 | success(&ctx, &command).await 509 | } 510 | } 511 | 512 | /// List keywords in the current guild, or in all guilds when used in DMs. 513 | /// 514 | /// Usage: `/keywords` 515 | #[tracing::instrument( 516 | skip_all, 517 | fields( 518 | user_id = %command.user.id, 519 | channel_id = %command.channel_id, 520 | command = %command.data.name, 521 | ) 522 | )] 523 | pub(crate) async fn keywords(ctx: Context, command: Command) -> Result<()> { 524 | check_opt_out!(&ctx, command); 525 | 526 | match command.guild_id { 527 | Some(guild_id) => { 528 | let guild_keywords = 529 | Keyword::user_guild_keywords(command.user.id, guild_id) 530 | .await? 531 | .into_iter() 532 | .map(|keyword| keyword.keyword) 533 | .collect::>(); 534 | 535 | let guild_channels = get_text_channels_in_guild(&ctx, guild_id)?; 536 | 537 | let mut channel_keywords = HashMap::new(); 538 | 539 | for keyword in 540 | Keyword::user_channel_keywords(command.user.id).await? 541 | { 542 | let channel_id = match keyword.kind { 543 | KeywordKind::Channel(id) => id, 544 | _ => { 545 | panic!("user_channel_keywords returned a guild keyword") 546 | } 547 | }; 548 | 549 | if !guild_channels.contains_key(&channel_id) { 550 | continue; 551 | } 552 | 553 | channel_keywords 554 | .entry(channel_id) 555 | .or_insert_with(Vec::new) 556 | .push(keyword.keyword); 557 | } 558 | 559 | if guild_keywords.is_empty() && channel_keywords.is_empty() { 560 | return respond_eph( 561 | &ctx, 562 | &command, 563 | "❌ You haven't added any keywords yet!", 564 | ) 565 | .await; 566 | } 567 | 568 | let guild_name = ctx 569 | .cache 570 | .guild_field(guild_id, |g| g.name.clone()) 571 | .context("Couldn't get guild to list keywords")?; 572 | 573 | let mut response = String::with_capacity(45); 574 | 575 | if guild_keywords.is_empty() { 576 | write!(&mut response, "Your keywords in {}:", guild_name) 577 | .unwrap(); 578 | } else { 579 | write!( 580 | &mut response, 581 | "Your keywords in {}:\n – {}", 582 | guild_name, 583 | guild_keywords.join("\n – ") 584 | ) 585 | .unwrap(); 586 | } 587 | 588 | for (channel_id, channel_keywords) in channel_keywords { 589 | response.push('\n'); 590 | 591 | write!( 592 | &mut response, 593 | " In <#{}>:\n - {1}", 594 | channel_id, 595 | channel_keywords.join("\n - "), 596 | ) 597 | .unwrap(); 598 | } 599 | 600 | respond_eph(&ctx, &command, response).await 601 | } 602 | None => { 603 | let keywords = Keyword::user_keywords(command.user.id).await?; 604 | 605 | if keywords.is_empty() { 606 | return respond_eph( 607 | &ctx, 608 | &command, 609 | "❌ You haven't added any keywords yet!", 610 | ) 611 | .await; 612 | } 613 | 614 | let mut keywords_by_guild = HashMap::new(); 615 | 616 | let mut unknown_channel_keywords = HashMap::new(); 617 | 618 | for keyword in keywords { 619 | match keyword.kind { 620 | KeywordKind::Guild(guild_id) => { 621 | let guild_keywords = &mut keywords_by_guild 622 | .entry(guild_id) 623 | .or_insert_with(|| (Vec::new(), HashMap::new())) 624 | .0; 625 | 626 | guild_keywords.push(keyword.keyword); 627 | } 628 | KeywordKind::Channel(channel_id) => { 629 | let guild_id = ctx 630 | .cache 631 | .guild_channel_field(channel_id, |c| c.guild_id); 632 | 633 | match guild_id { 634 | Some(guild_id) => { 635 | keywords_by_guild 636 | .entry(guild_id) 637 | .or_insert_with(|| { 638 | (Vec::new(), HashMap::new()) 639 | }) 640 | .1 641 | .entry(channel_id) 642 | .or_insert_with(Vec::new) 643 | .push(keyword.keyword); 644 | } 645 | None => { 646 | unknown_channel_keywords 647 | .entry(channel_id) 648 | .or_insert_with(Vec::new) 649 | .push(keyword.keyword); 650 | } 651 | } 652 | } 653 | } 654 | } 655 | 656 | let mut response = String::new(); 657 | 658 | for (guild_id, (guild_keywords, channel_keywords)) in 659 | keywords_by_guild 660 | { 661 | if !response.is_empty() { 662 | response.push_str("\n\n"); 663 | } 664 | 665 | let guild_name = ctx 666 | .cache 667 | .guild_field(guild_id, |g| g.name.clone()) 668 | .unwrap_or_else(|| { 669 | format!(" ({})", guild_id) 670 | }); 671 | 672 | if guild_keywords.is_empty() { 673 | write!(&mut response, "Your keywords in {}:", guild_name) 674 | .unwrap(); 675 | } else { 676 | write!( 677 | &mut response, 678 | "Your keywords in {}:\n – {}", 679 | guild_name, 680 | guild_keywords.join("\n – ") 681 | ) 682 | .unwrap(); 683 | } 684 | 685 | for (channel_id, channel_keywords) in channel_keywords { 686 | response.push('\n'); 687 | 688 | write!( 689 | &mut response, 690 | " In <#{0}> ({0}):\n - {1}", 691 | channel_id, 692 | channel_keywords.join("\n - "), 693 | ) 694 | .unwrap(); 695 | } 696 | } 697 | 698 | respond_eph(&ctx, &command, response).await 699 | } 700 | } 701 | } 702 | -------------------------------------------------------------------------------- /src/bot/commands/mutes.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Commands for adding, removing, and listing channel mutes. 5 | 6 | use std::{collections::HashMap, fmt::Write}; 7 | 8 | use anyhow::{Context as _, Result}; 9 | use serenity::{ 10 | client::Context, 11 | model::application::interaction::application_command::ApplicationCommandInteraction as Command, 12 | }; 13 | 14 | use crate::{ 15 | bot::util::{respond_eph, user_can_read_channel}, 16 | db::Mute, 17 | }; 18 | 19 | /// Mute a channel. 20 | /// 21 | /// Usage: `/mute ` 22 | pub(crate) async fn mute(ctx: Context, mut command: Command) -> Result<()> { 23 | check_opt_out!(ctx, command); 24 | let guild_id = require_guild!(&ctx, &command); 25 | 26 | let channel_id = command 27 | .data 28 | .resolved 29 | .channels 30 | .drain() 31 | .next() 32 | .map(|(id, _)| id) 33 | .context("No channel to mute provided")?; 34 | 35 | let channel = ctx 36 | .cache 37 | .guild_channel(channel_id) 38 | .context("Failed to get guild channel to mute")?; 39 | 40 | match user_can_read_channel(&ctx, &channel, ctx.cache.current_user_id()) 41 | .await 42 | { 43 | Ok(Some(true)) => { 44 | let mute = Mute { 45 | user_id: command.user.id, 46 | channel_id, 47 | }; 48 | 49 | if mute.clone().exists().await? { 50 | respond_eph( 51 | &ctx, 52 | &command, 53 | format!("❌ You've already muted <#{}>!", channel_id), 54 | ) 55 | .await 56 | } else { 57 | mute.insert().await?; 58 | respond_eph( 59 | &ctx, 60 | &command, 61 | format!("✅ Muted <#{}>", channel_id), 62 | ) 63 | .await 64 | } 65 | } 66 | Ok(Some(false)) => { 67 | respond_eph( 68 | &ctx, 69 | &command, 70 | format!("❌ I can't read <#{}>!", channel_id), 71 | ) 72 | .await 73 | } 74 | Ok(None) => Err(anyhow::anyhow!( 75 | "Self permissions not found in channel {} in guild {}", 76 | channel_id, 77 | guild_id 78 | )), 79 | Err(e) => Err(e.context( 80 | "Failed to check for self permissions to read muted channel", 81 | )), 82 | } 83 | } 84 | 85 | /// Unmute a channel. 86 | /// 87 | /// Usage: `/unmute ` 88 | pub(crate) async fn unmute(ctx: Context, mut command: Command) -> Result<()> { 89 | check_opt_out!(ctx, command); 90 | 91 | let channel_id = command 92 | .data 93 | .resolved 94 | .channels 95 | .drain() 96 | .next() 97 | .map(|(id, _)| id) 98 | .context("No channel to mute provided")?; 99 | 100 | let mute = Mute { 101 | user_id: command.user.id, 102 | channel_id, 103 | }; 104 | 105 | if !mute.clone().exists().await? { 106 | respond_eph( 107 | &ctx, 108 | &command, 109 | format!("❌ You haven't muted <#{}>!", channel_id), 110 | ) 111 | .await 112 | } else { 113 | mute.delete().await?; 114 | respond_eph(&ctx, &command, format!("✅ Unmuted <#{}>", channel_id)) 115 | .await 116 | } 117 | } 118 | 119 | /// List muted channels in the current guild. 120 | /// 121 | /// Usage: `/mutes` 122 | pub(crate) async fn mutes(ctx: Context, command: Command) -> Result<()> { 123 | check_opt_out!(ctx, command); 124 | match command.guild_id { 125 | Some(guild_id) => { 126 | let channels = ctx 127 | .cache 128 | .guild_channels(guild_id) 129 | .context("Couldn't get guild channels to list mutes")?; 130 | 131 | let mutes = Mute::user_mutes(command.user.id) 132 | .await? 133 | .into_iter() 134 | .filter(|mute| channels.contains_key(&mute.channel_id)) 135 | .map(|mute| format!("<#{}>", mute.channel_id)) 136 | .collect::>(); 137 | 138 | if mutes.is_empty() { 139 | return respond_eph( 140 | &ctx, 141 | &command, 142 | "❌ You haven't muted any channels!", 143 | ) 144 | .await; 145 | } 146 | 147 | let guild_name = ctx 148 | .cache 149 | .guild_field(guild_id, |g| g.name.clone()) 150 | .context("Couldn't get guild to list mutes")?; 151 | 152 | let response = format!( 153 | "Your muted channels in {}:\n - {}", 154 | guild_name, 155 | mutes.join("\n - ") 156 | ); 157 | 158 | respond_eph(&ctx, &command, response).await 159 | } 160 | None => { 161 | let mutes = Mute::user_mutes(command.user.id).await?; 162 | 163 | if mutes.is_empty() { 164 | return respond_eph( 165 | &ctx, 166 | &command, 167 | "❌ You haven't muted any channels!", 168 | ) 169 | .await; 170 | } 171 | 172 | let mut mutes_by_guild = HashMap::new(); 173 | let mut not_found = Vec::new(); 174 | 175 | for mute in mutes { 176 | let channel = match ctx.cache.guild_channel(mute.channel_id) { 177 | Some(channel) => channel, 178 | None => { 179 | not_found 180 | .push(format!("<#{0}> ({0})", mute.channel_id)); 181 | continue; 182 | } 183 | }; 184 | 185 | mutes_by_guild 186 | .entry(channel.guild_id) 187 | .or_insert_with(Vec::new) 188 | .push(format!("<#{}>", mute.channel_id)); 189 | } 190 | 191 | let mut response = String::new(); 192 | 193 | for (guild_id, channel_ids) in mutes_by_guild { 194 | if !response.is_empty() { 195 | response.push_str("\n\n"); 196 | } 197 | 198 | let guild_name = ctx 199 | .cache 200 | .guild_field(guild_id, |g| g.name.clone()) 201 | .context("Couldn't get guild to list mutes")?; 202 | 203 | write!( 204 | &mut response, 205 | "Your muted channels in {}:\n – {}", 206 | guild_name, 207 | channel_ids.join("\n – ") 208 | ) 209 | .unwrap(); 210 | } 211 | 212 | if !not_found.is_empty() { 213 | write!( 214 | &mut response, 215 | "\n\nCouldn't find (deleted?) muted channels:\n – {}", 216 | not_found.join("\n – ") 217 | ) 218 | .unwrap(); 219 | } 220 | 221 | respond_eph(&ctx, &command, response).await 222 | } 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /src/bot/commands/opt_out.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Commands for opting out (and in) of having messages highlighted. 5 | 6 | use std::time::Duration; 7 | 8 | use anyhow::Result; 9 | use futures_util::StreamExt; 10 | use indoc::indoc; 11 | use rand::{distributions::Standard, Rng}; 12 | use serenity::{ 13 | client::Context, 14 | collector::ComponentInteractionCollectorBuilder, 15 | model::application::{ 16 | component::ButtonStyle, 17 | interaction::{ 18 | application_command::ApplicationCommandInteraction as Command, 19 | MessageFlags, 20 | }, 21 | }, 22 | }; 23 | 24 | use crate::{ 25 | bot::util::{respond_eph, success}, 26 | db::OptOut, 27 | }; 28 | 29 | /// Opt-out of being highlighted. 30 | /// 31 | /// Usage: `/opt-out` 32 | #[tracing::instrument( 33 | skip_all, 34 | fields( 35 | user_id = %command.user.id, 36 | channel_id = %command.channel_id, 37 | command = %command.data.name, 38 | ) 39 | )] 40 | pub(crate) async fn opt_out(ctx: Context, command: Command) -> Result<()> { 41 | let opt_out = OptOut { 42 | user_id: command.user.id, 43 | }; 44 | 45 | if opt_out.exists().await? { 46 | return respond_eph(&ctx, &command, "❌ You already opted out!").await; 47 | } 48 | 49 | const OPT_OUT_WARNING: &str = indoc!( 50 | " 51 | ⚠️ Are you sure you want to opt out? 52 | 53 | All of your keywords, muted channels, blocked users, and ignored phrases \ 54 | will be lost forever. 55 | 56 | You will no longer be able to receive notifications. 57 | 58 | Others will not receive notifications about your messages." 59 | ); 60 | 61 | let nonce = rand::thread_rng() 62 | .sample_iter::(Standard) 63 | .take(90) 64 | .collect::(); 65 | 66 | let confirm_id = format!("confirm{}", nonce); 67 | let cancel_id = format!("cancel{}", nonce); 68 | 69 | command 70 | .create_interaction_response(&ctx, |r| { 71 | r.interaction_response_data(|m| { 72 | m.flags(MessageFlags::EPHEMERAL) 73 | .content(OPT_OUT_WARNING) 74 | .components(|c| { 75 | c.create_action_row(|row| { 76 | row.create_button(|b| { 77 | b.style(ButtonStyle::Danger) 78 | .label("Opt out") 79 | .custom_id(&confirm_id) 80 | }) 81 | .create_button(|b| { 82 | b.style(ButtonStyle::Secondary) 83 | .label("Cancel") 84 | .custom_id(&cancel_id) 85 | }) 86 | }) 87 | }) 88 | }) 89 | }) 90 | .await?; 91 | 92 | let button_press = ComponentInteractionCollectorBuilder::new(&ctx) 93 | .collect_limit(1) 94 | .author_id(command.user.id) 95 | .filter({ 96 | let confirm_id = confirm_id.clone(); 97 | let cancel_id = cancel_id.clone(); 98 | move |interaction| { 99 | let id = interaction.data.custom_id.as_str(); 100 | id == confirm_id || id == cancel_id 101 | } 102 | }) 103 | .timeout(Duration::from_secs(10)) 104 | .build() 105 | .next() 106 | .await; 107 | 108 | match button_press { 109 | None => { 110 | command 111 | .edit_original_interaction_response(&ctx, |r| { 112 | r.content("Timed out.").components(|c| c) 113 | }) 114 | .await?; 115 | } 116 | Some(press) => match press.data.custom_id.as_str() { 117 | id if id == confirm_id => { 118 | let opt_out = OptOut { 119 | user_id: press.user.id, 120 | }; 121 | opt_out.clone().delete_user_data().await?; 122 | opt_out.insert().await?; 123 | command 124 | .edit_original_interaction_response(&ctx, |r| { 125 | r.content("✅ You have been opted out") 126 | .components(|c| c) 127 | }) 128 | .await?; 129 | } 130 | id if id == cancel_id => { 131 | command 132 | .edit_original_interaction_response(&ctx, |r| { 133 | r.content("✅ You have not been opted out") 134 | .components(|c| c) 135 | }) 136 | .await?; 137 | } 138 | other => { 139 | return Err(anyhow::anyhow!( 140 | "Unknown opt-out message component ID {}", 141 | other 142 | )); 143 | } 144 | }, 145 | } 146 | 147 | Ok(()) 148 | } 149 | 150 | /// Opt-in to being highlighted, after having opted out. 151 | /// 152 | /// Usage: `/opt-in` 153 | #[tracing::instrument( 154 | skip_all, 155 | fields( 156 | user_id = %command.user.id, 157 | channel_id = %command.channel_id, 158 | command = %command.data.name, 159 | ) 160 | )] 161 | pub(crate) async fn opt_in(ctx: Context, command: Command) -> Result<()> { 162 | let opt_out = OptOut { 163 | user_id: command.user.id, 164 | }; 165 | 166 | if !opt_out.clone().exists().await? { 167 | return respond_eph(&ctx, &command, "❌ You haven't opted out!").await; 168 | } 169 | 170 | opt_out.delete().await?; 171 | 172 | success(&ctx, &command).await 173 | } 174 | -------------------------------------------------------------------------------- /src/bot/commands/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Miscellaneous utility functions and macros used by commands. 5 | 6 | use std::collections::HashMap; 7 | 8 | use anyhow::{Context as _, Result}; 9 | use serenity::{ 10 | client::Context, 11 | model::{ 12 | channel::{ChannelType, GuildChannel}, 13 | id::{ChannelId, GuildId}, 14 | }, 15 | }; 16 | 17 | /// Requires the given command to have come from a guild channel. 18 | /// 19 | /// Displays an error message and returns if the command did not come from a 20 | /// guild channel. Evaluates to the guild's ID otherwise. 21 | #[macro_export] 22 | macro_rules! require_guild { 23 | ($ctx:expr, $command:expr) => {{ 24 | #[allow(clippy::needless_borrow)] 25 | match $command.guild_id { 26 | None => { 27 | return $crate::bot::util::respond_eph( 28 | $ctx, 29 | $command, 30 | "❌ You must run this command in a server!", 31 | ) 32 | .await 33 | } 34 | Some(id) => id, 35 | } 36 | }}; 37 | } 38 | 39 | /// Requires the author of a given command to not be opted out. 40 | /// 41 | /// Displays an error message and returns if the user is opted out. 42 | #[macro_export] 43 | macro_rules! check_opt_out { 44 | ($ctx:expr, $command:expr) => {{ 45 | let opt_out = $crate::db::OptOut { 46 | user_id: $command.user.id, 47 | }; 48 | 49 | if opt_out.exists().await? { 50 | return $crate::bot::util::respond_eph( 51 | &$ctx, 52 | &$command, 53 | "❌ You can't use this command after opting out!", 54 | ) 55 | .await; 56 | } 57 | }}; 58 | } 59 | 60 | /// Requires the current bot member to have permission to send embeds. 61 | /// 62 | /// Displays an error message and returns if the current member does not have 63 | /// permission to send embeds. Does nothing if used on a command in a DM 64 | /// channel. 65 | #[macro_export] 66 | macro_rules! require_embed_perms { 67 | ($ctx:expr, $command:expr) => { 68 | #[allow(clippy::needless_borrow)] 69 | if $command.guild_id.is_some() { 70 | use ::anyhow::Context as _; 71 | let self_id = $ctx.cache.current_user_id(); 72 | 73 | let channel = $ctx 74 | .cache 75 | .guild_channel($command.channel_id) 76 | .context("Nonexistent guild channel")?; 77 | 78 | let permissions = channel 79 | .permissions_for_user($ctx, self_id) 80 | .context("Failed to get permissions for self")?; 81 | 82 | if !permissions.embed_links() { 83 | $crate::bot::util::respond_eph( 84 | $ctx, 85 | $command, 86 | "Sorry, I need permission to embed links to use that \ 87 | command 😔", 88 | ) 89 | .await 90 | .context("Failed to send missing embed permission message")?; 91 | 92 | return Ok(()); 93 | } 94 | } 95 | }; 96 | } 97 | 98 | /// Convenience function to get a map of all cached text channels in the given 99 | /// guild. 100 | pub(crate) fn get_text_channels_in_guild( 101 | ctx: &Context, 102 | guild_id: GuildId, 103 | ) -> Result> { 104 | let channels = ctx 105 | .cache 106 | .guild_channels(guild_id) 107 | .context("Couldn't get guild to get channels")?; 108 | let channels = channels 109 | .into_iter() 110 | .filter(|(_, channel)| channel.kind == ChannelType::Text) 111 | .collect(); 112 | 113 | Ok(channels) 114 | } 115 | -------------------------------------------------------------------------------- /src/bot/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Discord client creation and behavior. 5 | 6 | #[macro_use] 7 | mod util; 8 | mod commands; 9 | mod highlighting; 10 | 11 | use std::{collections::HashMap, sync::Arc, time::Instant}; 12 | 13 | use anyhow::{Context as _, Result}; 14 | use futures_util::{ 15 | stream::{self, FuturesUnordered}, 16 | StreamExt, TryStreamExt, 17 | }; 18 | use once_cell::sync::OnceCell; 19 | use serenity::{ 20 | builder::CreateEmbed, 21 | client::{bridge::gateway::ShardManager, Client, Context, EventHandler}, 22 | http::{ 23 | error::{DiscordJsonError, ErrorResponse}, 24 | HttpError, 25 | }, 26 | model::{ 27 | application::interaction::{ 28 | application_command::ApplicationCommandInteraction as Command, 29 | Interaction, MessageFlags, 30 | }, 31 | channel::Message, 32 | event::MessageUpdateEvent, 33 | gateway::{Activity, GatewayIntents, Ready}, 34 | id::{ChannelId, GuildId, MessageId}, 35 | }, 36 | prelude::{Mutex, TypeMapKey}, 37 | Error as SerenityError, 38 | }; 39 | use tinyvec::TinyVec; 40 | use tracing::{ 41 | debug, error, 42 | field::{display, Empty}, 43 | info, info_span, Span, 44 | }; 45 | 46 | use self::highlighting::CachedMessages; 47 | use crate::{ 48 | bot::highlighting::start_notification_clearing, 49 | db::{Ignore, Keyword, Notification}, 50 | global::ERROR_COLOR, 51 | settings::settings, 52 | }; 53 | 54 | /// Type to serve as an event handler. 55 | struct Handler; 56 | 57 | #[serenity::async_trait] 58 | impl EventHandler for Handler { 59 | /// Message listener to check for keywords. 60 | /// 61 | /// Calls [`handle_keywords`] for any non-bot messages in a guild to check 62 | /// if there are any keywords to notify others of. 63 | async fn message(&self, ctx: Context, message: Message) { 64 | if message.author.bot { 65 | return; 66 | } 67 | 68 | let guild_id = match message.guild_id { 69 | Some(id) => id, 70 | None => return, 71 | }; 72 | 73 | handle_keywords(&ctx, &message, guild_id).await; 74 | } 75 | 76 | /// Message listener to check messages for notifications to delete. 77 | /// 78 | /// Calls [`handle_deletion`] for any non-bot messages in a guild to check 79 | /// if there are any notifications of that message to delete. 80 | async fn message_delete( 81 | &self, 82 | ctx: Context, 83 | channel_id: ChannelId, 84 | message_id: MessageId, 85 | guild_id: Option, 86 | ) { 87 | let guild_id = match guild_id { 88 | Some(id) => id, 89 | None => return, 90 | }; 91 | 92 | handle_deletion(ctx, channel_id, message_id, guild_id).await; 93 | } 94 | 95 | /// Message listener to edit notifications 96 | /// 97 | /// Calls [`handle_update`] for any non-bot messages in a guild to check if 98 | /// there are any notifications of that message to update. 99 | async fn message_update( 100 | &self, 101 | ctx: Context, 102 | _: Option, 103 | new: Option, 104 | event: MessageUpdateEvent, 105 | ) { 106 | let guild_id = match event.guild_id { 107 | Some(g) => g, 108 | None => return, 109 | }; 110 | 111 | handle_update(ctx, new, event, guild_id).await; 112 | } 113 | 114 | /// Runs minor setup for when the bot starts. 115 | /// 116 | /// Calls [`ready`]. 117 | async fn ready(&self, ctx: Context, _: Ready) { 118 | ready(ctx).await; 119 | } 120 | 121 | /// Responds to slash commands. 122 | async fn interaction_create(&self, ctx: Context, interaction: Interaction) { 123 | let command = match interaction { 124 | Interaction::ApplicationCommand(cmd) => cmd, 125 | _ => return, 126 | }; 127 | 128 | handle_command(ctx, command).await; 129 | } 130 | } 131 | 132 | /// [`Instant`] of when the bot was started. 133 | static STARTED: OnceCell = OnceCell::new(); 134 | 135 | /// Sets the bot's activity to "Listening to /help", 136 | /// [creates slash commands](commands::create_commands), and sets [`STARTED`]. 137 | async fn ready(ctx: Context) { 138 | let span = info_span!(parent: None, "ready"); 139 | 140 | let _entered = span.enter(); 141 | 142 | ctx.set_activity(Activity::listening("/help")).await; 143 | 144 | if let Err(e) = commands::create_commands(&ctx).await { 145 | error!("{e}\n{e:?}"); 146 | } 147 | 148 | let _ = STARTED.set(Instant::now()); 149 | 150 | start_notification_clearing(ctx); 151 | 152 | info!("Ready to highlight!"); 153 | } 154 | 155 | /// Finds notifications for an updated message and uses 156 | /// [`update_sent_notifications`](highlighting::update_sent_notifications) to 157 | /// update them. 158 | async fn handle_update( 159 | ctx: Context, 160 | new: Option, 161 | event: MessageUpdateEvent, 162 | guild_id: GuildId, 163 | ) { 164 | let span = info_span!( 165 | parent: None, 166 | "message_update", 167 | message_id = %event.id, 168 | author_id = Empty, 169 | channel_id = %event.channel_id, 170 | guild_id = %guild_id, 171 | ); 172 | 173 | let _entered = span.enter(); 174 | 175 | let content = match event.content.as_ref() { 176 | Some(s) => s.clone(), 177 | None => return, 178 | }; 179 | 180 | if let Some(old_content) = ctx 181 | .data 182 | .write() 183 | .await 184 | .get_mut::() 185 | .expect("No message cache") 186 | .get_mut(&event.id) 187 | { 188 | *old_content = content; 189 | } 190 | 191 | let notifications = match Notification::notifications_of_message(event.id) 192 | .await 193 | .context("Failed to get notifications for message") 194 | { 195 | Ok(n) => n, 196 | Err(e) => { 197 | error!("{:?}", e); 198 | return; 199 | } 200 | }; 201 | 202 | if notifications.is_empty() { 203 | return; 204 | } 205 | 206 | let message = match new { 207 | Some(m) => m, 208 | None => { 209 | match ctx 210 | .http 211 | .get_message(event.channel_id.0, event.id.0) 212 | .await 213 | .context("Failed to fetch updated message") 214 | { 215 | Ok(m) => m, 216 | Err(e) => { 217 | error!("{:?}", e); 218 | return; 219 | } 220 | } 221 | } 222 | }; 223 | 224 | span.record("author_id", &display(message.author.id)); 225 | 226 | highlighting::update_sent_notifications( 227 | &ctx, 228 | guild_id, 229 | message, 230 | notifications, 231 | ) 232 | .await; 233 | } 234 | 235 | /// Finds notifications for a deleted message and uses 236 | /// [`delete_sent_notifications`](highlighting::clear_sent_notifications) to 237 | /// delete them. 238 | async fn handle_deletion( 239 | ctx: Context, 240 | channel_id: ChannelId, 241 | message_id: MessageId, 242 | guild_id: GuildId, 243 | ) { 244 | let span = info_span!( 245 | parent: None, 246 | "handle_deletion", 247 | channel_id = %channel_id, 248 | message_id = %message_id, 249 | guild_id = %guild_id, 250 | ); 251 | 252 | let _entered = span.enter(); 253 | 254 | ctx.data 255 | .write() 256 | .await 257 | .get_mut::() 258 | .expect("No message cache") 259 | .remove(&message_id); 260 | 261 | let notifications = 262 | match Notification::notifications_of_message(message_id).await { 263 | Ok(n) => n 264 | .into_iter() 265 | .map(|notification| { 266 | (notification.user_id, notification.notification_message) 267 | }) 268 | .collect::>(), 269 | Err(e) => { 270 | error!("{:?}", e); 271 | return; 272 | } 273 | }; 274 | 275 | if notifications.is_empty() { 276 | return; 277 | } 278 | 279 | highlighting::clear_sent_notifications(&ctx, ¬ifications).await; 280 | 281 | if let Err(e) = 282 | Notification::delete_notifications_of_message(message_id).await 283 | { 284 | error!("{:?}", e); 285 | } 286 | } 287 | 288 | /// Handles any keywords present in a message. 289 | /// 290 | /// This function queries for any keywords that could be relevant to the sent 291 | /// message with [`get_relevant_keywords`](Keyword::get_relevant_keywords), 292 | /// collects [`Ignore`]s for any users with those keywords. It then 293 | /// calls [`notify_keywords`](highlighting::notify_keywords). 294 | async fn handle_keywords(ctx: &Context, message: &Message, guild_id: GuildId) { 295 | let res: Result<()> = async move { 296 | let channel_id = message.channel_id; 297 | 298 | let span = info_span!( 299 | parent: None, 300 | "handle_keywords", 301 | message_id = %message.id, 302 | channel_id = %channel_id, 303 | author_id = %message.author.id, 304 | guild_id = %guild_id, 305 | ); 306 | 307 | let _entered = span.enter(); 308 | 309 | let lowercase_content = &message.content.to_lowercase(); 310 | 311 | debug!("Getting keywords"); 312 | 313 | let keywords_by_user = Keyword::get_relevant_keywords( 314 | guild_id, 315 | channel_id, 316 | message.author.id, 317 | ) 318 | .await? 319 | .into_iter() 320 | .fold(HashMap::new(), |mut map, keyword| { 321 | map.entry(keyword.user_id) 322 | .or_insert_with(|| tinyvec::tiny_vec![[Keyword; 2]]) 323 | .push(keyword); 324 | map 325 | }); 326 | 327 | let mut ignores_by_user = HashMap::new(); 328 | 329 | let futures = FuturesUnordered::new(); 330 | 331 | for (user_id, keywords) in keywords_by_user { 332 | let ignores = match ignores_by_user.get(&user_id) { 333 | Some(ignores) => ignores, 334 | None => { 335 | let user_ignores = 336 | Ignore::user_guild_ignores(user_id, guild_id).await?; 337 | ignores_by_user.entry(user_id).or_insert(user_ignores) 338 | } 339 | }; 340 | 341 | let keywords = stream::iter(keywords) 342 | .map(Ok::<_, anyhow::Error>) // convert to a TryStream 343 | .try_filter_map(|keyword| async move { 344 | Ok(highlighting::should_notify_keyword( 345 | ctx, 346 | message, 347 | lowercase_content, 348 | &keyword, 349 | ignores, 350 | ) 351 | .await? 352 | .then_some(keyword)) 353 | }) 354 | .try_collect::>() 355 | .await?; 356 | 357 | if keywords.is_empty() { 358 | debug!("No keywords for {user_id}"); 359 | continue; 360 | } 361 | 362 | debug!("Notifying {user_id} of {} keywords", keywords.len()); 363 | 364 | let ctx = ctx.clone(); 365 | futures.push(highlighting::notify_keywords( 366 | ctx, 367 | message.clone(), 368 | keywords, 369 | ignores.clone(), 370 | user_id, 371 | guild_id, 372 | )); 373 | } 374 | 375 | futures.for_each(|_| async move {}).await; 376 | 377 | Ok(()) 378 | } 379 | .await; 380 | 381 | if let Err(e) = res.context("Failed to handle keywords") { 382 | error!("{:?}", e); 383 | } 384 | } 385 | 386 | /// Handles a slash [`command`](commands). 387 | async fn handle_command(ctx: Context, command: Command) { 388 | let name = command.data.name.clone(); 389 | let channel_id = command.channel_id; 390 | let user_id = command.user.id; 391 | 392 | let span = info_span!( 393 | parent: None, 394 | "interaction_create", 395 | interaction_id = %command.id, 396 | author_id = %user_id, 397 | channel_id = %channel_id, 398 | guild_id = ?command.guild_id, 399 | ); 400 | 401 | let _entered = span.enter(); 402 | 403 | let result = { 404 | use std::future::Future; 405 | 406 | use commands::*; 407 | use tokio::task::JoinHandle; 408 | 409 | fn spawn_command( 410 | ctx: Context, 411 | command: Command, 412 | f: fn(Context, Command) -> Fut, 413 | ) -> JoinHandle> 414 | where 415 | Fut: Future> + Send + 'static, 416 | { 417 | let parent = Span::current(); 418 | tokio::spawn(async move { 419 | let span = info_span!(parent: &parent, "spawn_command"); 420 | let _entered = span.enter(); 421 | f(ctx, command).await 422 | }) 423 | } 424 | 425 | let ctx = ctx.clone(); 426 | let command = command.clone(); 427 | 428 | match &*name { 429 | "add" => spawn_command(ctx, command, add), 430 | "remove" => spawn_command(ctx, command, remove), 431 | "mute" => spawn_command(ctx, command, mute), 432 | "unmute" => spawn_command(ctx, command, unmute), 433 | "ignore" => spawn_command(ctx, command, ignore), 434 | "unignore" => spawn_command(ctx, command, unignore), 435 | "block" => spawn_command(ctx, command, block), 436 | "unblock" => spawn_command(ctx, command, unblock), 437 | "remove-server" => spawn_command(ctx, command, remove_server), 438 | "keywords" => spawn_command(ctx, command, keywords), 439 | "mutes" => spawn_command(ctx, command, mutes), 440 | "ignores" => spawn_command(ctx, command, ignores), 441 | "blocks" => spawn_command(ctx, command, blocks), 442 | "opt-out" => spawn_command(ctx, command, opt_out), 443 | "opt-in" => spawn_command(ctx, command, opt_in), 444 | "help" => spawn_command(ctx, command, help), 445 | "ping" => spawn_command(ctx, command, ping), 446 | "about" => spawn_command(ctx, command, about), 447 | _ => { 448 | let err = 449 | anyhow::anyhow!("Unknown slash command received: {}", name); 450 | 451 | tokio::spawn(async move { Err(err) }) 452 | } 453 | } 454 | .await 455 | .map_err(anyhow::Error::from) 456 | .and_then(|r| r) 457 | }; 458 | 459 | if let Err(e) = result { 460 | debug!("Reporting failure to user"); 461 | const BUG_REPORT_PROMPT: &str = 462 | "I would appreciate if you could take a minute to [file a bug report]\ 463 | (https://github.com/ThatsNoMoon/highlights/issues/new?template=bug_report.md) \ 464 | so I can work on fixing this! Please include the interaction ID \ 465 | below in your report. Thanks!"; 466 | 467 | let embed = { 468 | let mut embed = CreateEmbed::default(); 469 | embed 470 | .color(ERROR_COLOR) 471 | .title("An error occurred running that command :(") 472 | .description({ 473 | let mut e = format!("{:#}", e); 474 | if e.len() > 2000 { 475 | e.truncate(2000); 476 | e.push_str("...") 477 | } 478 | e 479 | }) 480 | .field("Create a bug report", BUG_REPORT_PROMPT, true) 481 | .footer(|f| f.text(format!("Interaction ID: {}", command.id))); 482 | embed 483 | }; 484 | 485 | let response_result = command 486 | .create_interaction_response(&ctx, |r| { 487 | r.interaction_response_data(|d| { 488 | d.flags(MessageFlags::EPHEMERAL).add_embed(embed.clone()) 489 | }) 490 | }) 491 | .await; 492 | 493 | const INTERACTION_ACKNOWLEDGED: isize = 40060; 494 | 495 | let response_result = match response_result { 496 | Ok(_) => Ok(()), 497 | Err(SerenityError::Http(e)) 498 | if matches!( 499 | &*e, 500 | HttpError::UnsuccessfulRequest(ErrorResponse { 501 | error: DiscordJsonError { 502 | code: INTERACTION_ACKNOWLEDGED, 503 | .. 504 | }, 505 | .. 506 | },) 507 | ) => 508 | { 509 | command 510 | .create_followup_message(&ctx, |c| { 511 | c.flags(MessageFlags::EPHEMERAL).add_embed(embed) 512 | }) 513 | .await 514 | .context("Failed to send failure followup") 515 | .map(drop) 516 | } 517 | Err(e) => Err(e).context("Failed to send failure response"), 518 | }; 519 | 520 | error!("{:?}", e); 521 | 522 | if let Err(e) = response_result { 523 | error!("{:?}", e); 524 | } 525 | } 526 | 527 | if let Err(e) = highlighting::check_notify_user_state(&ctx, &command) 528 | .await 529 | .context("Failed to check and notify user state") 530 | { 531 | error!("{:?}", e); 532 | } 533 | } 534 | 535 | /// [`TypeMapKey`] to store a reference to the [`ShardManager`] for retrieving 536 | /// latency. 537 | struct Shards; 538 | 539 | impl TypeMapKey for Shards { 540 | type Value = Arc>; 541 | } 542 | 543 | /// Initializes the Discord client. 544 | pub(crate) async fn init() -> Result<()> { 545 | let mut client = Client::builder( 546 | &settings().bot.token, 547 | GatewayIntents::MESSAGE_CONTENT 548 | | GatewayIntents::DIRECT_MESSAGES 549 | | GatewayIntents::GUILD_MESSAGE_REACTIONS 550 | | GatewayIntents::GUILD_MESSAGES 551 | | GatewayIntents::GUILDS 552 | | GatewayIntents::GUILD_MEMBERS, 553 | ) 554 | .event_handler(Handler) 555 | .application_id(settings().bot.application_id) 556 | .await 557 | .context("Failed to create client")?; 558 | 559 | { 560 | let mut data = client.data.write().await; 561 | 562 | data.insert::(HashMap::new()); 563 | data.insert::(client.shard_manager.clone()); 564 | } 565 | 566 | client.start().await.context("Failed to run client")?; 567 | 568 | Ok(()) 569 | } 570 | -------------------------------------------------------------------------------- /src/bot/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Miscellaneous utility functions and macros. 5 | 6 | use std::fmt::Display; 7 | 8 | use anyhow::{Context as _, Result}; 9 | use serenity::{ 10 | client::Context, 11 | http::{error::ErrorResponse, CacheHttp}, 12 | model::{ 13 | application::interaction::{ 14 | application_command::ApplicationCommandInteraction as Command, 15 | MessageFlags, 16 | }, 17 | channel::GuildChannel, 18 | guild::{Guild, PartialGuild}, 19 | id::UserId, 20 | }, 21 | prelude::HttpError, 22 | Error as SerenityError, 23 | }; 24 | 25 | /// Responds to a command with a ✅ emoji. 26 | #[inline] 27 | pub(crate) async fn success(ctx: &Context, command: &Command) -> Result<()> { 28 | // zero-width space to force small emoji 29 | respond_eph(ctx, command, "✅\u{200b}") 30 | .await 31 | .context("Failed to add success reaction")?; 32 | 33 | Ok(()) 34 | } 35 | 36 | /// Responds to a command with the given message. 37 | pub(crate) async fn respond( 38 | ctx: &Context, 39 | command: &Command, 40 | response: S, 41 | ) -> Result<()> { 42 | command 43 | .create_interaction_response(ctx, |r| { 44 | r.interaction_response_data(|m| m.content(response)) 45 | }) 46 | .await 47 | .context("Failed to send command response")?; 48 | 49 | Ok(()) 50 | } 51 | 52 | /// Responds to a command with the given message ephemerally. 53 | #[tracing::instrument( 54 | skip_all, 55 | fields( 56 | user_id = %command.user.id, 57 | channel_id = %command.channel_id, 58 | command = %command.data.name, 59 | ) 60 | )] 61 | pub(crate) async fn respond_eph( 62 | ctx: &Context, 63 | command: &Command, 64 | response: S, 65 | ) -> Result<()> { 66 | command 67 | .create_interaction_response(ctx, |r| { 68 | r.interaction_response_data(|m| { 69 | m.flags(MessageFlags::EPHEMERAL).content(response) 70 | }) 71 | }) 72 | .await 73 | .context("Failed to send command response")?; 74 | 75 | Ok(()) 76 | } 77 | 78 | /// Sends a followup message to a command with the given message ephemerally. 79 | /// 80 | /// The command must have been responded to already. 81 | #[tracing::instrument( 82 | skip_all, 83 | fields( 84 | user_id = %command.user.id, 85 | channel_id = %command.channel_id, 86 | command = %command.data.name, 87 | ) 88 | )] 89 | pub(crate) async fn followup_eph( 90 | ctx: &Context, 91 | command: &Command, 92 | response: S, 93 | ) -> Result<()> { 94 | command 95 | .create_followup_message(ctx, |r| { 96 | r.flags(MessageFlags::EPHEMERAL).content(response) 97 | }) 98 | .await 99 | .context("Failed to send command followup")?; 100 | 101 | Ok(()) 102 | } 103 | 104 | /// Determines if a user with the given ID can read messages in the provided 105 | /// [`GuildChannel`]. 106 | #[tracing::instrument( 107 | skip_all, 108 | fields( 109 | user_id = %user_id, 110 | channel_id = %channel.id, 111 | ) 112 | )] 113 | pub(crate) async fn user_can_read_channel( 114 | ctx: &impl CacheHttp, 115 | channel: &GuildChannel, 116 | user_id: UserId, 117 | ) -> Result> { 118 | #[allow(clippy::large_enum_variant)] 119 | enum MaybePartialGuild { 120 | Partial(PartialGuild), 121 | FullGuild(Guild), 122 | } 123 | 124 | use MaybePartialGuild::*; 125 | 126 | let guild = match ctx.cache().unwrap().guild(channel.guild_id) { 127 | Some(g) => FullGuild(g), 128 | None => Partial(ctx.http().get_guild(channel.guild_id.0).await?), 129 | }; 130 | 131 | let member = match &guild { 132 | FullGuild(g) => optional_result(g.member(ctx, user_id).await)?, 133 | Partial(g) => optional_result(g.member(ctx, user_id).await)?, 134 | }; 135 | 136 | let member = match member { 137 | Some(m) => m, 138 | None => return Ok(None), 139 | }; 140 | 141 | let permissions = match &guild { 142 | FullGuild(g) => g.user_permissions_in(channel, &member)?, 143 | Partial(g) => g.user_permissions_in(channel, &member)?, 144 | }; 145 | 146 | Ok(Some(permissions.view_channel())) 147 | } 148 | 149 | /// Makes the result of an HTTP call optional. 150 | /// 151 | /// If the given `Result` is an `Err` containing an error with a 404 HTTP error, 152 | /// `Ok(None)` is returned. Otherwise, the `Result` is returned, `Ok(x)` being 153 | /// replaced with `Ok(Some(x))`. 154 | pub(crate) fn optional_result( 155 | res: Result, 156 | ) -> Result, SerenityError> { 157 | match res { 158 | Ok(m) => Ok(Some(m)), 159 | Err(SerenityError::Http(err)) => match &*err { 160 | HttpError::UnsuccessfulRequest(ErrorResponse { 161 | status_code, 162 | .. 163 | }) if status_code.as_u16() == 404 => Ok(None), 164 | _ => Err(SerenityError::Http(err)), 165 | }, 166 | Err(err) => Err(err), 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /src/db/backup.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Automatic backup system. 5 | 6 | use std::{ 7 | io::{Error as IoError, ErrorKind}, 8 | path::{Path, PathBuf}, 9 | time::Duration as StdDuration, 10 | }; 11 | 12 | use chrono::{DateTime, Duration, Utc}; 13 | use rusqlite::{backup::Backup, Connection, Error, OpenFlags}; 14 | use tokio::{fs, task, time::interval}; 15 | use tracing::{debug, error, info, warn}; 16 | 17 | /// Format used for backup timestamps. Can't use ISO-8601 because windows 18 | /// doesn't seem to allow file names to contain `:`. 19 | const TIMESTAMP_FORMAT: &str = "%Y-%m-%dT%H_%M_%S%.f%z"; 20 | 21 | /// Creates the dir at the specified path for backups. 22 | /// 23 | /// Returns `Ok(())` on success or when the directory already existed. 24 | #[tracing::instrument] 25 | async fn ensure_backup_dir_exists(path: &Path) -> Result<(), IoError> { 26 | let result = fs::create_dir(path).await; 27 | if let Err(error) = &result { 28 | if error.kind() == ErrorKind::AlreadyExists { 29 | return Ok(()); 30 | } 31 | } 32 | result 33 | } 34 | 35 | /// Creates a backup in the specified directory. 36 | #[tracing::instrument] 37 | async fn create_backup( 38 | conn: Connection, 39 | backup_dir: PathBuf, 40 | ) -> Result<(), Error> { 41 | task::spawn_blocking(move || { 42 | let backup_name = format!( 43 | "{}_data_backup_{}.db", 44 | env!("CARGO_PKG_NAME"), 45 | Utc::now().format(TIMESTAMP_FORMAT) 46 | ); 47 | 48 | let backup_path = backup_dir.join(backup_name); 49 | 50 | let mut output_conn = Connection::open_with_flags( 51 | backup_path, 52 | OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE, 53 | )?; 54 | 55 | let backup = Backup::new(&conn, &mut output_conn)?; 56 | 57 | backup.run_to_completion(5, StdDuration::from_millis(250), None) 58 | }) 59 | .await 60 | .expect("Failed to join backup task") 61 | } 62 | 63 | /// Cleans up old backups from the specified directory. 64 | #[tracing::instrument] 65 | async fn clean_backups(backup_dir: &Path) { 66 | #[derive(Default)] 67 | struct Backups { 68 | files: Vec<(PathBuf, DateTime)>, 69 | } 70 | 71 | impl Backups { 72 | fn add(&mut self, path: PathBuf) -> Result<(), PathBuf> { 73 | let backup_name = 74 | match path.file_name().and_then(|name| name.to_str()) { 75 | Some(name) => name, 76 | None => return Err(path), 77 | }; 78 | 79 | let backup_prefix = 80 | concat!(env!("CARGO_PKG_NAME"), "_data_backup_"); 81 | 82 | let backup_time: DateTime = match backup_name 83 | .strip_prefix(backup_prefix) 84 | .and_then(|s| s.strip_suffix(".db")) 85 | .and_then(|date_str| { 86 | DateTime::parse_from_str(date_str, TIMESTAMP_FORMAT).ok() 87 | }) { 88 | Some(date) => date.into(), 89 | None => return Err(path), 90 | }; 91 | 92 | self.files.push((path, backup_time)); 93 | Ok(()) 94 | } 95 | 96 | async fn clean(mut self) -> Vec> { 97 | if self.files.len() <= 1 { 98 | return vec![]; 99 | } 100 | 101 | let mut results = Vec::new(); 102 | 103 | // sort by most recent first 104 | self.files 105 | .sort_unstable_by(|(_, time1), (_, time2)| time2.cmp(time1)); 106 | 107 | let mut last_time = self.files.remove(0).1; 108 | let now = Utc::now(); 109 | 110 | let mut daily_found = 0; 111 | let mut weekly_found = 0; 112 | let mut monthly_found = 0; 113 | 114 | for (path, time) in self.files { 115 | if now - time < Duration::days(1) { 116 | continue; 117 | } 118 | 119 | let gap = last_time - time; 120 | 121 | if daily_found < 7 { 122 | // includes some wiggle room so backups made 23.99999 hours 123 | // apart aren't deleted 124 | if gap < Duration::days(1) - Duration::minutes(1) { 125 | debug!( 126 | "Deleting old restart backup from {}", 127 | time.date_naive() 128 | ); 129 | results.push(fs::remove_file(path).await); 130 | } else { 131 | last_time = time; 132 | daily_found += 1; 133 | } 134 | } else if weekly_found < 4 { 135 | if gap < Duration::weeks(1) - Duration::minutes(10) { 136 | debug!( 137 | "Deleting old daily backup from {}", 138 | time.date_naive() 139 | ); 140 | results.push(fs::remove_file(path).await); 141 | } else { 142 | last_time = time; 143 | weekly_found += 1; 144 | } 145 | } else if monthly_found < 12 { 146 | if gap < Duration::days(30) - Duration::minutes(30) { 147 | debug!( 148 | "Deleting old weekly backup from {}", 149 | time.date_naive() 150 | ); 151 | results.push(fs::remove_file(path).await); 152 | } else { 153 | last_time = time; 154 | monthly_found += 1; 155 | } 156 | } else if gap < Duration::days(364) { 157 | debug!( 158 | "Deleting old monthly backup from {}", 159 | time.date_naive() 160 | ); 161 | results.push(fs::remove_file(path).await); 162 | } else { 163 | last_time = time; 164 | } 165 | } 166 | 167 | results 168 | } 169 | } 170 | 171 | let mut backups = Backups::default(); 172 | 173 | let mut dir = match fs::read_dir(&backup_dir).await { 174 | Ok(dir) => dir, 175 | Err(e) => { 176 | error!( 177 | "Error reading backup directory for cleaning: {0}\n{0:?}", 178 | e 179 | ); 180 | return; 181 | } 182 | }; 183 | 184 | loop { 185 | match dir.next_entry().await { 186 | Ok(Some(dir)) => { 187 | if let Err(path) = backups.add(dir.path()) { 188 | warn!("Invalid backup name: {:?}", path); 189 | } 190 | } 191 | Ok(None) => break, 192 | Err(e) => { 193 | error!( 194 | "Error reading backup directory for cleaning: {0}\n{0:?}", 195 | e 196 | ); 197 | break; 198 | } 199 | } 200 | } 201 | 202 | for result in backups.clean().await { 203 | if let Err(e) = result { 204 | error!("Error cleaning backup: {0}\n{0:?}", e); 205 | } 206 | } 207 | } 208 | 209 | /// Starts the automatic backup cycle. 210 | /// 211 | /// Creates `/backup` if it doesn't exist already, creates a 212 | /// backup, cleans up old backups, and repeats once every 24hrs. 213 | pub(crate) fn start_backup_cycle(db_path: PathBuf, backup_dir: PathBuf) { 214 | task::spawn(async move { 215 | let mut daily = interval(StdDuration::from_secs(60 * 60 * 24)); 216 | 217 | loop { 218 | info!("Backing up database..."); 219 | if let Err(error) = ensure_backup_dir_exists(&backup_dir).await { 220 | error!("Failed to create backup directory: {0}\n{0:?}", error); 221 | continue; 222 | } 223 | 224 | let conn = match Connection::open_with_flags( 225 | &db_path, 226 | OpenFlags::SQLITE_OPEN_READ_ONLY, 227 | ) { 228 | Ok(c) => c, 229 | Err(error) => { 230 | error!( 231 | "Error connecting to database to backup: {0}\n{0:?}", 232 | error 233 | ); 234 | continue; 235 | } 236 | }; 237 | 238 | if let Err(error) = create_backup(conn, backup_dir.clone()).await { 239 | error!("Error backing up database: {0}\n{0:?}", error); 240 | } 241 | 242 | info!("Cleaning up old backups..."); 243 | clean_backups(&backup_dir).await; 244 | 245 | daily.tick().await; 246 | } 247 | }); 248 | } 249 | -------------------------------------------------------------------------------- /src/db/block.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Handling for blocked users. 5 | 6 | use anyhow::{Context as _, Result}; 7 | use futures_util::TryStreamExt; 8 | use sea_orm::{ 9 | entity::prelude::{ 10 | DeriveActiveModelBehavior, DeriveColumn, DeriveEntityModel, 11 | DerivePrimaryKey, DeriveRelation, EntityTrait, EnumIter, 12 | PrimaryKeyTrait, 13 | }, 14 | ColumnTrait, Condition, IntoActiveModel, QueryFilter, QuerySelect, 15 | }; 16 | use serenity::model::id::UserId; 17 | 18 | use super::{connection, DbInt, IdDbExt}; 19 | 20 | #[derive( 21 | Clone, Debug, PartialEq, Eq, DeriveEntityModel, DeriveActiveModelBehavior, 22 | )] 23 | #[sea_orm(table_name = "blocks")] 24 | pub struct Model { 25 | #[sea_orm(primary_key)] 26 | pub(crate) user_id: DbInt, 27 | #[sea_orm(primary_key)] 28 | pub(crate) blocked_id: DbInt, 29 | } 30 | 31 | #[derive(Debug, EnumIter, DeriveRelation)] 32 | pub enum Relation {} 33 | 34 | /// Represents a blocked user. 35 | #[derive(Debug, Clone)] 36 | pub(crate) struct Block { 37 | /// The user who blocked them. 38 | pub(crate) user_id: UserId, 39 | /// The user who was blocked. 40 | pub(crate) blocked_id: UserId, 41 | } 42 | 43 | impl Block { 44 | /// Fetches the list of blocks a user has added from the DB. 45 | #[tracing::instrument] 46 | pub(crate) async fn user_blocks(user_id: UserId) -> Result> { 47 | Entity::find() 48 | .filter(Column::UserId.eq(user_id.into_db())) 49 | .stream(connection()) 50 | .await? 51 | .map_err(Into::into) 52 | .map_ok(Block::from) 53 | .try_collect() 54 | .await 55 | } 56 | 57 | /// Adds this blocked user to the DB. 58 | #[tracing::instrument] 59 | pub(crate) async fn insert(self) -> Result<()> { 60 | Entity::insert(Model::from(self).into_active_model()) 61 | .exec(connection()) 62 | .await?; 63 | 64 | Ok(()) 65 | } 66 | 67 | /// Checks if this block exists in the DB. 68 | #[tracing::instrument] 69 | pub(crate) async fn exists(self) -> Result { 70 | let count = Entity::find() 71 | .select_only() 72 | .column_as(Column::UserId.count(), QueryAs::BlockCount) 73 | .filter( 74 | Condition::all() 75 | .add(Column::UserId.eq(self.user_id.into_db())) 76 | .add(Column::BlockedId.eq(self.blocked_id.into_db())), 77 | ) 78 | .into_values::() 79 | .one(connection()) 80 | .await?; 81 | 82 | let count = count.context("No count for blocks returned")?; 83 | Ok(count == 1) 84 | } 85 | 86 | /// Deletes this blocked user from the DB (making them not blocked anymore). 87 | #[tracing::instrument] 88 | pub(crate) async fn delete(self) -> Result<()> { 89 | Entity::delete(Model::from(self).into_active_model()) 90 | .exec(connection()) 91 | .await?; 92 | 93 | Ok(()) 94 | } 95 | } 96 | 97 | #[derive(Clone, Copy, Debug, EnumIter, DeriveColumn)] 98 | enum QueryAs { 99 | BlockCount, 100 | } 101 | 102 | impl From for Block { 103 | fn from(model: Model) -> Self { 104 | Self { 105 | user_id: UserId::from_db(model.user_id), 106 | blocked_id: UserId::from_db(model.blocked_id), 107 | } 108 | } 109 | } 110 | 111 | impl From for Model { 112 | fn from(mute: Block) -> Self { 113 | Self { 114 | user_id: mute.user_id.into_db(), 115 | blocked_id: mute.blocked_id.into_db(), 116 | } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/db/channel_keyword.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | use sea_orm::entity::prelude::{ 5 | DeriveActiveModelBehavior, DeriveEntityModel, DerivePrimaryKey, 6 | DeriveRelation, EntityTrait, EnumIter, PrimaryKeyTrait, 7 | }; 8 | 9 | use super::DbInt; 10 | 11 | #[derive( 12 | Clone, Debug, PartialEq, Eq, DeriveEntityModel, DeriveActiveModelBehavior, 13 | )] 14 | #[sea_orm(table_name = "channel_keywords")] 15 | pub struct Model { 16 | #[sea_orm(primary_key)] 17 | pub(crate) keyword: String, 18 | #[sea_orm(primary_key)] 19 | pub(crate) user_id: DbInt, 20 | #[sea_orm(primary_key)] 21 | pub(crate) channel_id: DbInt, 22 | } 23 | 24 | #[derive(Debug, EnumIter, DeriveRelation)] 25 | pub enum Relation {} 26 | -------------------------------------------------------------------------------- /src/db/guild_keyword.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | use sea_orm::entity::prelude::{ 5 | DeriveActiveModelBehavior, DeriveEntityModel, DerivePrimaryKey, 6 | DeriveRelation, EntityTrait, EnumIter, PrimaryKeyTrait, 7 | }; 8 | 9 | use super::DbInt; 10 | 11 | #[derive( 12 | Clone, Debug, PartialEq, Eq, DeriveEntityModel, DeriveActiveModelBehavior, 13 | )] 14 | #[sea_orm(table_name = "guild_keywords")] 15 | pub struct Model { 16 | #[sea_orm(primary_key)] 17 | pub(crate) keyword: String, 18 | #[sea_orm(primary_key)] 19 | pub(crate) user_id: DbInt, 20 | #[sea_orm(primary_key)] 21 | pub(crate) guild_id: DbInt, 22 | } 23 | 24 | #[derive(Debug, EnumIter, DeriveRelation)] 25 | pub enum Relation {} 26 | -------------------------------------------------------------------------------- /src/db/ignore.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Handling for ignored phrases. 5 | 6 | use anyhow::{Context as _, Result}; 7 | use futures_util::TryStreamExt; 8 | use sea_orm::{ 9 | entity::prelude::{ 10 | DeriveActiveModelBehavior, DeriveColumn, DeriveEntityModel, 11 | DerivePrimaryKey, DeriveRelation, EntityTrait, EnumIter, 12 | PrimaryKeyTrait, 13 | }, 14 | ColumnTrait, Condition, IntoActiveModel, QueryFilter, QuerySelect, 15 | }; 16 | use serenity::model::id::{GuildId, UserId}; 17 | 18 | use super::{connection, DbInt, IdDbExt}; 19 | 20 | #[derive( 21 | Clone, Debug, PartialEq, Eq, DeriveEntityModel, DeriveActiveModelBehavior, 22 | )] 23 | #[sea_orm(table_name = "guild_ignores")] 24 | pub struct Model { 25 | #[sea_orm(primary_key)] 26 | pub(crate) phrase: String, 27 | #[sea_orm(primary_key)] 28 | pub(crate) user_id: DbInt, 29 | #[sea_orm(primary_key)] 30 | pub(crate) guild_id: DbInt, 31 | } 32 | 33 | #[derive(Debug, EnumIter, DeriveRelation)] 34 | pub enum Relation {} 35 | 36 | /// Represents an ignored phrase. 37 | #[derive(Debug, Clone)] 38 | pub(crate) struct Ignore { 39 | /// The phrase that should be ignored. 40 | pub(crate) phrase: String, 41 | /// The user that ignored this phrase. 42 | pub(crate) user_id: UserId, 43 | /// The guild in which the user ignored the phrase. 44 | pub(crate) guild_id: GuildId, 45 | } 46 | 47 | impl Ignore { 48 | /// Fetches the list of ignored phrases of the specified user in the 49 | /// specified guild from the DB. 50 | #[tracing::instrument] 51 | pub(crate) async fn user_guild_ignores( 52 | user_id: UserId, 53 | guild_id: GuildId, 54 | ) -> Result> { 55 | Entity::find() 56 | .filter( 57 | Condition::all() 58 | .add(Column::UserId.eq(user_id.into_db())) 59 | .add(Column::GuildId.eq(guild_id.into_db())), 60 | ) 61 | .stream(connection()) 62 | .await? 63 | .map_err(Into::into) 64 | .map_ok(Ignore::from) 65 | .try_collect() 66 | .await 67 | } 68 | 69 | /// Fetches the list of ignored phrases of the specified user across all 70 | /// guilds from the DB. 71 | #[tracing::instrument] 72 | pub(crate) async fn user_ignores(user_id: UserId) -> Result> { 73 | Entity::find() 74 | .filter(Column::UserId.eq(user_id.into_db())) 75 | .stream(connection()) 76 | .await? 77 | .map_err(Into::into) 78 | .map_ok(Ignore::from) 79 | .try_collect() 80 | .await 81 | } 82 | 83 | /// Checks if this ignored phrase already exists in the DB. 84 | #[tracing::instrument( 85 | skip(self), 86 | fields( 87 | self.user_id = %self.user_id, 88 | self.guild_id = %self.guild_id, 89 | ))] 90 | pub(crate) async fn exists(self) -> Result { 91 | let count = Entity::find() 92 | .select_only() 93 | .column_as(Column::UserId.count(), QueryAs::IgnoreCount) 94 | .filter( 95 | Condition::all() 96 | .add(Column::UserId.eq(self.user_id.into_db())) 97 | .add(Column::GuildId.eq(self.guild_id.into_db())) 98 | .add(Column::Phrase.eq(&*self.phrase)), 99 | ) 100 | .into_values::() 101 | .one(connection()) 102 | .await?; 103 | 104 | let count = count.context("No count for ignores returned")?; 105 | Ok(count == 1) 106 | } 107 | 108 | /// Adds this ignored phrase to the DB. 109 | #[tracing::instrument( 110 | skip(self), 111 | fields( 112 | self.user_id = %self.user_id, 113 | self.guild_id = %self.guild_id, 114 | ))] 115 | pub(crate) async fn insert(self) -> Result<()> { 116 | Entity::insert(Model::from(self).into_active_model()) 117 | .exec(connection()) 118 | .await?; 119 | 120 | Ok(()) 121 | } 122 | 123 | /// Deletes this ignored phrase from the DB. 124 | #[tracing::instrument( 125 | skip(self), 126 | fields( 127 | self.user_id = %self.user_id, 128 | self.guild_id = %self.guild_id, 129 | ))] 130 | pub(crate) async fn delete(self) -> Result<()> { 131 | Entity::delete(Model::from(self).into_active_model()) 132 | .exec(connection()) 133 | .await?; 134 | 135 | Ok(()) 136 | } 137 | 138 | /// Deletes all ignored phrases of the specified user in the specified guild 139 | /// from the DB. 140 | #[tracing::instrument] 141 | pub(crate) async fn delete_in_guild( 142 | user_id: UserId, 143 | guild_id: GuildId, 144 | ) -> Result { 145 | let result = Entity::delete_many() 146 | .filter( 147 | Condition::all() 148 | .add(Column::UserId.eq(user_id.into_db())) 149 | .add(Column::GuildId.eq(guild_id.into_db())), 150 | ) 151 | .exec(connection()) 152 | .await?; 153 | 154 | Ok(result.rows_affected) 155 | } 156 | } 157 | 158 | #[derive(Clone, Copy, Debug, EnumIter, DeriveColumn)] 159 | enum QueryAs { 160 | IgnoreCount, 161 | } 162 | 163 | impl From for Ignore { 164 | fn from(model: Model) -> Self { 165 | Self { 166 | phrase: model.phrase, 167 | user_id: UserId::from_db(model.user_id), 168 | guild_id: GuildId::from_db(model.guild_id), 169 | } 170 | } 171 | } 172 | 173 | impl From for Model { 174 | fn from(ignore: Ignore) -> Self { 175 | Self { 176 | phrase: ignore.phrase, 177 | user_id: ignore.user_id.into_db(), 178 | guild_id: ignore.guild_id.into_db(), 179 | } 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/db/keyword.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Handling for keywords. 5 | 6 | use anyhow::{Context, Result}; 7 | use futures_util::TryStreamExt; 8 | use sea_orm::{ 9 | sea_query::Expr, ColumnTrait, Condition, DeriveColumn, EntityTrait, 10 | EnumIter, IntoActiveModel, QueryFilter, QuerySelect, QueryTrait, 11 | }; 12 | use serenity::model::id::{ChannelId, GuildId, UserId}; 13 | use tracing::info_span; 14 | 15 | use super::{ 16 | block, channel_keyword, connection, guild_keyword, mute, opt_out, IdDbExt, 17 | }; 18 | 19 | #[derive(Debug, Clone, Copy)] 20 | pub(crate) enum KeywordKind { 21 | Channel(ChannelId), 22 | Guild(GuildId), 23 | } 24 | 25 | impl Default for KeywordKind { 26 | fn default() -> Self { 27 | Self::Channel(ChannelId(0)) 28 | } 29 | } 30 | 31 | #[derive(Debug, Clone, Default)] 32 | pub(crate) struct Keyword { 33 | pub(crate) keyword: String, 34 | pub(crate) user_id: UserId, 35 | pub(crate) kind: KeywordKind, 36 | } 37 | 38 | enum EitherModel { 39 | Channel(channel_keyword::Model), 40 | Guild(guild_keyword::Model), 41 | } 42 | 43 | impl Keyword { 44 | fn into_model(self) -> EitherModel { 45 | match self.kind { 46 | KeywordKind::Guild(guild_id) => { 47 | EitherModel::Guild(guild_keyword::Model { 48 | keyword: self.keyword, 49 | user_id: self.user_id.into_db(), 50 | guild_id: guild_id.into_db(), 51 | }) 52 | } 53 | KeywordKind::Channel(channel_id) => { 54 | EitherModel::Channel(channel_keyword::Model { 55 | keyword: self.keyword, 56 | user_id: self.user_id.into_db(), 57 | channel_id: channel_id.into_db(), 58 | }) 59 | } 60 | } 61 | } 62 | 63 | /// Gets keywords that may be relelvant to a message. 64 | /// 65 | /// Fetches all guild-wide keywords in the specified guild, as long as the 66 | /// creator of the keyword didn't mute the channel or block the author. 67 | /// 68 | /// Fetches all channel-specific keywords in the specified channel, as long 69 | /// as the creator of the keyword didn't block the author. 70 | #[tracing::instrument] 71 | pub(crate) async fn get_relevant_keywords( 72 | guild_id: GuildId, 73 | channel_id: ChannelId, 74 | author_id: UserId, 75 | ) -> Result> { 76 | let span = info_span!( 77 | "relevant_guild_keywords", 78 | author_id = %author_id, 79 | guild_id = %guild_id 80 | ); 81 | 82 | let entered = span.enter(); 83 | 84 | let opted_out = opt_out::Entity::find() 85 | .select_only() 86 | .column(opt_out::Column::UserId) 87 | .into_query(); 88 | 89 | let muted_channels = 90 | mute::Entity::find() 91 | .select_only() 92 | .column(mute::Column::ChannelId) 93 | .filter(Expr::col((mute::Entity, mute::Column::UserId)).equals( 94 | (guild_keyword::Entity, guild_keyword::Column::UserId), 95 | )) 96 | .into_query(); 97 | 98 | let users_with_block = block::Entity::find() 99 | .select_only() 100 | .column(block::Column::UserId) 101 | .filter(block::Column::BlockedId.eq(author_id.into_db())) 102 | .into_query(); 103 | 104 | let keywords: Vec = guild_keyword::Entity::find() 105 | .filter( 106 | Condition::all() 107 | .add(guild_keyword::Column::UserId.ne(author_id.into_db())) 108 | .add(guild_keyword::Column::GuildId.eq(guild_id.into_db())) 109 | .add( 110 | guild_keyword::Column::UserId 111 | .not_in_subquery(opted_out.clone()), 112 | ) 113 | .add( 114 | Expr::expr(Expr::value(author_id.into_db())) 115 | .not_in_subquery(opted_out.clone()), 116 | ) 117 | .add( 118 | guild_keyword::Column::UserId 119 | .not_in_subquery(users_with_block.clone()), 120 | ) 121 | .add( 122 | Expr::expr(Expr::value(channel_id.into_db())) 123 | .not_in_subquery(muted_channels.clone()), 124 | ), 125 | ) 126 | .stream(connection()) 127 | .await? 128 | .map_err(anyhow::Error::from) 129 | .map_ok(Keyword::from) 130 | .try_collect() 131 | .await?; 132 | 133 | drop(entered); 134 | drop(span); 135 | 136 | let span = info_span!( 137 | "relevant_channel_keywords", 138 | author_id = %author_id, 139 | channel_id = %channel_id 140 | ); 141 | 142 | let _entered = span.enter(); 143 | 144 | channel_keyword::Entity::find() 145 | .filter( 146 | Condition::all() 147 | .add( 148 | channel_keyword::Column::UserId.ne(author_id.into_db()), 149 | ) 150 | .add( 151 | channel_keyword::Column::ChannelId 152 | .eq(channel_id.into_db()), 153 | ) 154 | .add( 155 | channel_keyword::Column::UserId 156 | .not_in_subquery(opted_out.clone()), 157 | ) 158 | .add( 159 | Expr::expr(Expr::value(author_id.into_db())) 160 | .not_in_subquery(opted_out), 161 | ) 162 | .add( 163 | channel_keyword::Column::UserId 164 | .not_in_subquery(users_with_block), 165 | ), 166 | ) 167 | .stream(connection()) 168 | .await? 169 | .map_err(anyhow::Error::from) 170 | .map_ok(Keyword::from) 171 | .try_fold(keywords, |mut keywords, keyword| async move { 172 | keywords.push(keyword); 173 | Ok(keywords) 174 | }) 175 | .await 176 | } 177 | 178 | /// Fetches all guild-wide keywords created by the specified user in the 179 | /// specified guild. 180 | #[tracing::instrument] 181 | pub(crate) async fn user_guild_keywords( 182 | user_id: UserId, 183 | guild_id: GuildId, 184 | ) -> Result> { 185 | guild_keyword::Entity::find() 186 | .filter( 187 | Condition::all() 188 | .add(guild_keyword::Column::UserId.eq(user_id.into_db())) 189 | .add(guild_keyword::Column::GuildId.eq(guild_id.into_db())), 190 | ) 191 | .stream(connection()) 192 | .await? 193 | .map_err(Into::into) 194 | .map_ok(Keyword::from) 195 | .try_collect() 196 | .await 197 | } 198 | 199 | /// Fetches all channel-specific keywords created by the specified user. 200 | #[tracing::instrument] 201 | pub(crate) async fn user_channel_keywords( 202 | user_id: UserId, 203 | ) -> Result> { 204 | channel_keyword::Entity::find() 205 | .filter(channel_keyword::Column::UserId.eq(user_id.into_db())) 206 | .stream(connection()) 207 | .await? 208 | .map_err(Into::into) 209 | .map_ok(Keyword::from) 210 | .try_collect() 211 | .await 212 | } 213 | 214 | /// Fetches all guild-wide and channel-specific keywords created by the 215 | /// specified user. 216 | #[tracing::instrument] 217 | pub(crate) async fn user_keywords(user_id: UserId) -> Result> { 218 | let keywords: Vec = guild_keyword::Entity::find() 219 | .filter(guild_keyword::Column::UserId.eq(user_id.into_db())) 220 | .stream(connection()) 221 | .await? 222 | .map_err(anyhow::Error::from) 223 | .map_ok(Keyword::from) 224 | .try_collect() 225 | .await?; 226 | 227 | channel_keyword::Entity::find() 228 | .filter(channel_keyword::Column::UserId.eq(user_id.into_db())) 229 | .stream(connection()) 230 | .await? 231 | .map_err(anyhow::Error::from) 232 | .map_ok(Keyword::from) 233 | .try_fold(keywords, |mut keywords, keyword| async move { 234 | keywords.push(keyword); 235 | Ok(keywords) 236 | }) 237 | .await 238 | } 239 | 240 | /// Checks if this keyword has already been created by this user. 241 | #[tracing::instrument( 242 | skip(self), 243 | fields( 244 | self.user_id = %self.user_id, 245 | self.kind = ?self.kind, 246 | ))] 247 | pub(crate) async fn exists(self) -> Result { 248 | match self.kind { 249 | KeywordKind::Guild(guild_id) => { 250 | let count = guild_keyword::Entity::find() 251 | .select_only() 252 | .column_as( 253 | guild_keyword::Column::UserId.count(), 254 | QueryAs::KeywordCount, 255 | ) 256 | .filter( 257 | Condition::all() 258 | .add( 259 | guild_keyword::Column::UserId 260 | .eq(self.user_id.into_db()), 261 | ) 262 | .add( 263 | guild_keyword::Column::GuildId 264 | .eq(guild_id.into_db()), 265 | ) 266 | .add( 267 | guild_keyword::Column::Keyword 268 | .eq(&*self.keyword), 269 | ), 270 | ) 271 | .into_values::() 272 | .one(connection()) 273 | .await?; 274 | 275 | let count = 276 | count.context("No count for guild keywords returned")?; 277 | Ok(count == 1) 278 | } 279 | KeywordKind::Channel(channel_id) => { 280 | let count = channel_keyword::Entity::find() 281 | .select_only() 282 | .column_as( 283 | channel_keyword::Column::UserId.count(), 284 | QueryAs::KeywordCount, 285 | ) 286 | .filter( 287 | Condition::all() 288 | .add( 289 | channel_keyword::Column::UserId 290 | .eq(self.user_id.into_db()), 291 | ) 292 | .add( 293 | channel_keyword::Column::ChannelId 294 | .eq(channel_id.into_db()), 295 | ) 296 | .add( 297 | channel_keyword::Column::Keyword 298 | .eq(&*self.keyword), 299 | ), 300 | ) 301 | .into_values::() 302 | .one(connection()) 303 | .await?; 304 | 305 | let count = 306 | count.context("No count for channel keywords returned")?; 307 | Ok(count == 1) 308 | } 309 | } 310 | } 311 | 312 | /// Returns the number of keywords this user has created across all guilds 313 | /// and channels. 314 | #[tracing::instrument] 315 | pub(crate) async fn user_keyword_count(user_id: UserId) -> Result { 316 | let guild_keywords = guild_keyword::Entity::find() 317 | .select_only() 318 | .column_as( 319 | guild_keyword::Column::UserId.count(), 320 | QueryAs::KeywordCount, 321 | ) 322 | .filter(guild_keyword::Column::UserId.eq(user_id.into_db())) 323 | .into_values::() 324 | .one(connection()) 325 | .await? 326 | .context("No count for guild keywords returned")?; 327 | 328 | let channel_keywords = channel_keyword::Entity::find() 329 | .select_only() 330 | .column_as( 331 | channel_keyword::Column::UserId.count(), 332 | QueryAs::KeywordCount, 333 | ) 334 | .filter(channel_keyword::Column::UserId.eq(user_id.into_db())) 335 | .into_values::() 336 | .one(connection()) 337 | .await? 338 | .context("No count for channel keywords returned")?; 339 | 340 | Ok(guild_keywords as u64 + channel_keywords as u64) 341 | } 342 | 343 | /// Adds this keyword to the DB. 344 | #[tracing::instrument( 345 | skip(self), 346 | fields( 347 | self.user_id = %self.user_id, 348 | self.kind = ?self.kind, 349 | ))] 350 | pub(crate) async fn insert(self) -> Result<()> { 351 | match self.into_model() { 352 | EitherModel::Guild(model) => { 353 | guild_keyword::Entity::insert(model.into_active_model()) 354 | .exec(connection()) 355 | .await?; 356 | } 357 | EitherModel::Channel(model) => { 358 | channel_keyword::Entity::insert(model.into_active_model()) 359 | .exec(connection()) 360 | .await?; 361 | } 362 | } 363 | 364 | Ok(()) 365 | } 366 | 367 | /// Deletes this keyword from the DB. 368 | #[tracing::instrument( 369 | skip(self), 370 | fields( 371 | self.user_id = %self.user_id, 372 | self.kind = ?self.kind, 373 | ))] 374 | pub(crate) async fn delete(self) -> Result<()> { 375 | match self.into_model() { 376 | EitherModel::Guild(model) => { 377 | guild_keyword::Entity::delete(model.into_active_model()) 378 | .exec(connection()) 379 | .await?; 380 | } 381 | EitherModel::Channel(model) => { 382 | channel_keyword::Entity::delete(model.into_active_model()) 383 | .exec(connection()) 384 | .await?; 385 | } 386 | } 387 | 388 | Ok(()) 389 | } 390 | 391 | /// Deletes all guild-wide keywords created by the specified user in the 392 | /// specified guild. 393 | #[tracing::instrument] 394 | pub(crate) async fn delete_in_guild( 395 | user_id: UserId, 396 | guild_id: GuildId, 397 | ) -> Result { 398 | let result = guild_keyword::Entity::delete_many() 399 | .filter( 400 | Condition::all() 401 | .add(guild_keyword::Column::UserId.eq(user_id.into_db())) 402 | .add(guild_keyword::Column::GuildId.eq(guild_id.into_db())), 403 | ) 404 | .exec(connection()) 405 | .await?; 406 | 407 | Ok(result.rows_affected) 408 | } 409 | 410 | /// Deletes all channel-specific keywords created by the specified user in 411 | /// the specified channel. 412 | #[tracing::instrument] 413 | pub(crate) async fn delete_in_channel( 414 | user_id: UserId, 415 | channel_id: ChannelId, 416 | ) -> Result { 417 | let result = channel_keyword::Entity::delete_many() 418 | .filter( 419 | Condition::all() 420 | .add(channel_keyword::Column::UserId.eq(user_id.into_db())) 421 | .add( 422 | channel_keyword::Column::ChannelId 423 | .eq(channel_id.into_db()), 424 | ), 425 | ) 426 | .exec(connection()) 427 | .await?; 428 | 429 | Ok(result.rows_affected) 430 | } 431 | } 432 | 433 | #[derive(Clone, Copy, Debug, EnumIter, DeriveColumn)] 434 | enum QueryAs { 435 | KeywordCount, 436 | MutedChannel, 437 | } 438 | 439 | impl From for Keyword { 440 | fn from(model: guild_keyword::Model) -> Self { 441 | Self { 442 | keyword: model.keyword, 443 | user_id: UserId::from_db(model.user_id), 444 | kind: KeywordKind::Guild(GuildId::from_db(model.guild_id)), 445 | } 446 | } 447 | } 448 | 449 | impl From for Keyword { 450 | fn from(model: channel_keyword::Model) -> Self { 451 | Self { 452 | keyword: model.keyword, 453 | user_id: UserId::from_db(model.user_id), 454 | kind: KeywordKind::Channel(ChannelId::from_db(model.channel_id)), 455 | } 456 | } 457 | } 458 | -------------------------------------------------------------------------------- /src/db/migration/m2022_08_04_000001_init.rs: -------------------------------------------------------------------------------- 1 | use sea_orm::sea_query::Index; 2 | use sea_orm_migration::prelude::{ 3 | async_trait, ColumnDef, DbErr, DeriveMigrationName, MigrationTrait, 4 | SchemaManager, Table, 5 | }; 6 | 7 | use crate::db::{ 8 | block, channel_keyword, guild_keyword, ignore, mute, notification, opt_out, 9 | user_state, 10 | }; 11 | 12 | #[derive(DeriveMigrationName)] 13 | pub struct Migration; 14 | 15 | #[async_trait::async_trait] 16 | impl MigrationTrait for Migration { 17 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 18 | manager 19 | .create_table( 20 | Table::create() 21 | .table(guild_keyword::Entity) 22 | .if_not_exists() 23 | .col( 24 | ColumnDef::new(guild_keyword::Column::UserId) 25 | .big_integer() 26 | .not_null(), 27 | ) 28 | .col( 29 | ColumnDef::new(guild_keyword::Column::GuildId) 30 | .big_integer() 31 | .not_null(), 32 | ) 33 | .col( 34 | ColumnDef::new(guild_keyword::Column::Keyword) 35 | .string() 36 | .not_null(), 37 | ) 38 | .primary_key( 39 | Index::create() 40 | .col(guild_keyword::Column::UserId) 41 | .col(guild_keyword::Column::GuildId) 42 | .col(guild_keyword::Column::Keyword), 43 | ) 44 | .to_owned(), 45 | ) 46 | .await?; 47 | 48 | manager 49 | .create_table( 50 | Table::create() 51 | .table(channel_keyword::Entity) 52 | .if_not_exists() 53 | .col( 54 | ColumnDef::new(channel_keyword::Column::UserId) 55 | .big_integer() 56 | .not_null(), 57 | ) 58 | .col( 59 | ColumnDef::new(channel_keyword::Column::ChannelId) 60 | .big_integer() 61 | .not_null(), 62 | ) 63 | .col( 64 | ColumnDef::new(channel_keyword::Column::Keyword) 65 | .string() 66 | .not_null(), 67 | ) 68 | .primary_key( 69 | Index::create() 70 | .col(channel_keyword::Column::UserId) 71 | .col(channel_keyword::Column::ChannelId) 72 | .col(channel_keyword::Column::Keyword), 73 | ) 74 | .to_owned(), 75 | ) 76 | .await?; 77 | 78 | manager 79 | .create_table( 80 | Table::create() 81 | .table(ignore::Entity) 82 | .if_not_exists() 83 | .col( 84 | ColumnDef::new(ignore::Column::Phrase) 85 | .string() 86 | .not_null(), 87 | ) 88 | .col( 89 | ColumnDef::new(ignore::Column::UserId) 90 | .big_integer() 91 | .not_null(), 92 | ) 93 | .col( 94 | ColumnDef::new(ignore::Column::GuildId) 95 | .big_integer() 96 | .not_null(), 97 | ) 98 | .primary_key( 99 | Index::create() 100 | .col(ignore::Column::Phrase) 101 | .col(ignore::Column::UserId) 102 | .col(ignore::Column::GuildId), 103 | ) 104 | .to_owned(), 105 | ) 106 | .await?; 107 | 108 | manager 109 | .create_table( 110 | Table::create() 111 | .table(mute::Entity) 112 | .if_not_exists() 113 | .col( 114 | ColumnDef::new(mute::Column::UserId) 115 | .big_integer() 116 | .not_null(), 117 | ) 118 | .col( 119 | ColumnDef::new(mute::Column::ChannelId) 120 | .big_integer() 121 | .not_null(), 122 | ) 123 | .primary_key( 124 | Index::create() 125 | .col(mute::Column::UserId) 126 | .col(mute::Column::ChannelId), 127 | ) 128 | .to_owned(), 129 | ) 130 | .await?; 131 | 132 | manager 133 | .create_table( 134 | Table::create() 135 | .table(block::Entity) 136 | .if_not_exists() 137 | .col( 138 | ColumnDef::new(block::Column::UserId) 139 | .big_integer() 140 | .not_null(), 141 | ) 142 | .col( 143 | ColumnDef::new(block::Column::BlockedId) 144 | .big_integer() 145 | .not_null(), 146 | ) 147 | .primary_key( 148 | Index::create() 149 | .col(block::Column::UserId) 150 | .col(block::Column::BlockedId), 151 | ) 152 | .to_owned(), 153 | ) 154 | .await?; 155 | 156 | manager 157 | .create_table( 158 | Table::create() 159 | .table(notification::Entity) 160 | .if_not_exists() 161 | .col( 162 | ColumnDef::new(notification::Column::UserId) 163 | .big_integer() 164 | .not_null(), 165 | ) 166 | .col( 167 | ColumnDef::new(notification::Column::OriginalMessage) 168 | .big_integer() 169 | .not_null(), 170 | ) 171 | .col( 172 | ColumnDef::new( 173 | notification::Column::NotificationMessage, 174 | ) 175 | .big_integer() 176 | .not_null() 177 | .primary_key(), 178 | ) 179 | .col( 180 | ColumnDef::new(notification::Column::Keyword) 181 | .string() 182 | .not_null(), 183 | ) 184 | .to_owned(), 185 | ) 186 | .await?; 187 | 188 | manager 189 | .create_table( 190 | Table::create() 191 | .table(opt_out::Entity) 192 | .if_not_exists() 193 | .col( 194 | ColumnDef::new(opt_out::Column::UserId) 195 | .big_integer() 196 | .not_null() 197 | .primary_key(), 198 | ) 199 | .to_owned(), 200 | ) 201 | .await?; 202 | 203 | manager 204 | .create_table( 205 | Table::create() 206 | .table(user_state::Entity) 207 | .if_not_exists() 208 | .col( 209 | ColumnDef::new(user_state::Column::UserId) 210 | .big_integer() 211 | .not_null() 212 | .primary_key(), 213 | ) 214 | .col( 215 | ColumnDef::new(user_state::Column::State) 216 | .small_integer() 217 | .not_null(), 218 | ) 219 | .to_owned(), 220 | ) 221 | .await?; 222 | 223 | Ok(()) 224 | } 225 | 226 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 227 | manager 228 | .drop_table(Table::drop().table(guild_keyword::Entity).to_owned()) 229 | .await?; 230 | manager 231 | .drop_table(Table::drop().table(channel_keyword::Entity).to_owned()) 232 | .await?; 233 | manager 234 | .drop_table(Table::drop().table(mute::Entity).to_owned()) 235 | .await?; 236 | manager 237 | .drop_table(Table::drop().table(block::Entity).to_owned()) 238 | .await?; 239 | manager 240 | .drop_table(Table::drop().table(ignore::Entity).to_owned()) 241 | .await?; 242 | manager 243 | .drop_table(Table::drop().table(opt_out::Entity).to_owned()) 244 | .await?; 245 | manager 246 | .drop_table(Table::drop().table(notification::Entity).to_owned()) 247 | .await?; 248 | manager 249 | .drop_table(Table::drop().table(user_state::Entity).to_owned()) 250 | .await?; 251 | 252 | Ok(()) 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /src/db/migration/m2023_01_08_000001_composite_notification_key.rs: -------------------------------------------------------------------------------- 1 | use sea_orm::sea_query::{Alias, Index, Query}; 2 | use sea_orm_migration::prelude::{ 3 | async_trait, ColumnDef, DbErr, DeriveMigrationName, MigrationTrait, 4 | SchemaManager, Table, 5 | }; 6 | 7 | use crate::db::notification::{self, Column}; 8 | 9 | #[derive(DeriveMigrationName)] 10 | pub(crate) struct Migration; 11 | 12 | #[async_trait::async_trait] 13 | impl MigrationTrait for Migration { 14 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 15 | let tmp_table = Alias::new("__migrated_sent_notifications"); 16 | manager 17 | .create_table( 18 | Table::create() 19 | .table(tmp_table.clone()) 20 | .col( 21 | ColumnDef::new(Column::UserId).big_integer().not_null(), 22 | ) 23 | .col( 24 | ColumnDef::new(Column::OriginalMessage) 25 | .big_integer() 26 | .not_null(), 27 | ) 28 | .col( 29 | ColumnDef::new(Column::NotificationMessage) 30 | .big_integer() 31 | .not_null(), 32 | ) 33 | .col(ColumnDef::new(Column::Keyword).string().not_null()) 34 | .primary_key( 35 | Index::create() 36 | .col(Column::NotificationMessage) 37 | .col(Column::Keyword), 38 | ) 39 | .to_owned(), 40 | ) 41 | .await?; 42 | 43 | manager 44 | .exec_stmt( 45 | Query::insert() 46 | .into_table(tmp_table.clone()) 47 | .columns([ 48 | Column::UserId, 49 | Column::OriginalMessage, 50 | Column::NotificationMessage, 51 | Column::Keyword, 52 | ]) 53 | .select_from( 54 | Query::select() 55 | .from(notification::Entity) 56 | .columns([ 57 | Column::UserId, 58 | Column::OriginalMessage, 59 | Column::NotificationMessage, 60 | Column::Keyword, 61 | ]) 62 | .to_owned(), 63 | ) 64 | .map_err(|e| DbErr::Migration(e.to_string()))? 65 | .to_owned(), 66 | ) 67 | .await?; 68 | 69 | manager 70 | .drop_table(Table::drop().table(notification::Entity).to_owned()) 71 | .await?; 72 | 73 | manager 74 | .rename_table( 75 | Table::rename() 76 | .table(tmp_table, notification::Entity) 77 | .to_owned(), 78 | ) 79 | .await 80 | } 81 | 82 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 83 | let tmp_table = Alias::new("__migrated_sent_notifications"); 84 | manager 85 | .create_table( 86 | Table::create() 87 | .table(tmp_table.clone()) 88 | .col( 89 | ColumnDef::new(Column::UserId).big_integer().not_null(), 90 | ) 91 | .col( 92 | ColumnDef::new(Column::OriginalMessage) 93 | .big_integer() 94 | .not_null(), 95 | ) 96 | .col( 97 | ColumnDef::new(Column::NotificationMessage) 98 | .big_integer() 99 | .not_null() 100 | .primary_key(), 101 | ) 102 | .col(ColumnDef::new(Column::Keyword).string().not_null()) 103 | .to_owned(), 104 | ) 105 | .await?; 106 | 107 | manager 108 | .exec_stmt( 109 | Query::insert() 110 | .into_table(tmp_table.clone()) 111 | .columns([ 112 | Column::UserId, 113 | Column::OriginalMessage, 114 | Column::NotificationMessage, 115 | Column::Keyword, 116 | ]) 117 | .select_from( 118 | Query::select() 119 | .from(notification::Entity) 120 | .columns([ 121 | Column::UserId, 122 | Column::OriginalMessage, 123 | Column::NotificationMessage, 124 | Column::Keyword, 125 | ]) 126 | .to_owned(), 127 | ) 128 | .map_err(|e| DbErr::Migration(e.to_string()))? 129 | .to_owned(), 130 | ) 131 | .await?; 132 | 133 | manager 134 | .drop_table(Table::drop().table(notification::Entity).to_owned()) 135 | .await?; 136 | 137 | manager 138 | .rename_table( 139 | Table::rename() 140 | .table(tmp_table, notification::Entity) 141 | .to_owned(), 142 | ) 143 | .await 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/db/migration/m2023_05_18_000001_rename_pkey_index.rs: -------------------------------------------------------------------------------- 1 | use sea_orm::sea_query::{Alias, Index, Query}; 2 | use sea_orm_migration::prelude::{ 3 | async_trait, ColumnDef, DbErr, DeriveMigrationName, MigrationTrait, 4 | SchemaManager, Table, 5 | }; 6 | 7 | use crate::db::notification::{self, Column}; 8 | 9 | #[derive(DeriveMigrationName)] 10 | pub(crate) struct Migration; 11 | 12 | #[async_trait::async_trait] 13 | impl MigrationTrait for Migration { 14 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 15 | let tmp_table = Alias::new("__migrated_sent_notifications"); 16 | manager 17 | .create_table( 18 | Table::create() 19 | .table(tmp_table.clone()) 20 | .col( 21 | ColumnDef::new(Column::UserId).big_integer().not_null(), 22 | ) 23 | .col( 24 | ColumnDef::new(Column::OriginalMessage) 25 | .big_integer() 26 | .not_null(), 27 | ) 28 | .col( 29 | ColumnDef::new(Column::NotificationMessage) 30 | .big_integer() 31 | .not_null(), 32 | ) 33 | .col(ColumnDef::new(Column::Keyword).string().not_null()) 34 | .primary_key( 35 | Index::create() 36 | .name("sent_notifications_pkey") 37 | .col(Column::NotificationMessage) 38 | .col(Column::Keyword), 39 | ) 40 | .to_owned(), 41 | ) 42 | .await?; 43 | 44 | manager 45 | .exec_stmt( 46 | Query::insert() 47 | .into_table(tmp_table.clone()) 48 | .columns([ 49 | Column::UserId, 50 | Column::OriginalMessage, 51 | Column::NotificationMessage, 52 | Column::Keyword, 53 | ]) 54 | .select_from( 55 | Query::select() 56 | .from(notification::Entity) 57 | .columns([ 58 | Column::UserId, 59 | Column::OriginalMessage, 60 | Column::NotificationMessage, 61 | Column::Keyword, 62 | ]) 63 | .to_owned(), 64 | ) 65 | .map_err(|e| DbErr::Migration(e.to_string()))? 66 | .to_owned(), 67 | ) 68 | .await?; 69 | 70 | manager 71 | .drop_table(Table::drop().table(notification::Entity).to_owned()) 72 | .await?; 73 | 74 | manager 75 | .rename_table( 76 | Table::rename() 77 | .table(tmp_table, notification::Entity) 78 | .to_owned(), 79 | ) 80 | .await 81 | } 82 | 83 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 84 | let tmp_table = Alias::new("__migrated_sent_notifications"); 85 | manager 86 | .create_table( 87 | Table::create() 88 | .table(tmp_table.clone()) 89 | .col( 90 | ColumnDef::new(Column::UserId).big_integer().not_null(), 91 | ) 92 | .col( 93 | ColumnDef::new(Column::OriginalMessage) 94 | .big_integer() 95 | .not_null(), 96 | ) 97 | .col( 98 | ColumnDef::new(Column::NotificationMessage) 99 | .big_integer() 100 | .not_null(), 101 | ) 102 | .col(ColumnDef::new(Column::Keyword).string().not_null()) 103 | .primary_key( 104 | Index::create() 105 | .col(Column::NotificationMessage) 106 | .col(Column::Keyword), 107 | ) 108 | .to_owned(), 109 | ) 110 | .await?; 111 | 112 | manager 113 | .exec_stmt( 114 | Query::insert() 115 | .into_table(tmp_table.clone()) 116 | .columns([ 117 | Column::UserId, 118 | Column::OriginalMessage, 119 | Column::NotificationMessage, 120 | Column::Keyword, 121 | ]) 122 | .select_from( 123 | Query::select() 124 | .from(notification::Entity) 125 | .columns([ 126 | Column::UserId, 127 | Column::OriginalMessage, 128 | Column::NotificationMessage, 129 | Column::Keyword, 130 | ]) 131 | .to_owned(), 132 | ) 133 | .map_err(|e| DbErr::Migration(e.to_string()))? 134 | .to_owned(), 135 | ) 136 | .await?; 137 | 138 | manager 139 | .drop_table(Table::drop().table(notification::Entity).to_owned()) 140 | .await?; 141 | 142 | manager 143 | .rename_table( 144 | Table::rename() 145 | .table(tmp_table, notification::Entity) 146 | .to_owned(), 147 | ) 148 | .await 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/db/migration/mod.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::{MigrationTrait, MigratorTrait}; 2 | 3 | mod m2022_08_04_000001_init; 4 | mod m2023_01_08_000001_composite_notification_key; 5 | mod m2023_05_18_000001_rename_pkey_index; 6 | 7 | pub(crate) struct Migrator; 8 | 9 | impl MigratorTrait for Migrator { 10 | fn migrations() -> Vec> { 11 | vec![ 12 | Box::new(m2022_08_04_000001_init::Migration), 13 | Box::new(m2023_01_08_000001_composite_notification_key::Migration), 14 | Box::new(m2023_05_18_000001_rename_pkey_index::Migration), 15 | ] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/db/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Interface for interacting with the sqlite database of keywords and other persistent user 5 | //! information. 6 | 7 | #![cfg_attr(not(feature = "bot"), allow(dead_code))] 8 | 9 | #[cfg(not(any(feature = "sqlite", feature = "postgresql")))] 10 | compile_error!("The sqlite feature or the postgresql feature must be enabled"); 11 | 12 | #[cfg(feature = "backup")] 13 | mod backup; 14 | mod block; 15 | mod channel_keyword; 16 | mod guild_keyword; 17 | mod ignore; 18 | mod keyword; 19 | mod migration; 20 | mod mute; 21 | mod notification; 22 | mod opt_out; 23 | mod user_state; 24 | 25 | use anyhow::{anyhow, Result}; 26 | use once_cell::sync::OnceCell; 27 | use sea_orm::{Database, DatabaseConnection}; 28 | use sea_orm_migration::MigratorTrait; 29 | use serenity::model::id::{ChannelId, GuildId, MessageId, UserId}; 30 | use tracing::info; 31 | 32 | use self::migration::Migrator; 33 | #[cfg(feature = "bot")] 34 | pub(crate) use self::{ 35 | block::Block, 36 | ignore::Ignore, 37 | keyword::{Keyword, KeywordKind}, 38 | mute::Mute, 39 | notification::Notification, 40 | opt_out::OptOut, 41 | user_state::{UserState, UserStateKind}, 42 | }; 43 | use crate::settings::settings; 44 | 45 | /// Global connection pool to the database. 46 | static CONNECTION: OnceCell = OnceCell::new(); 47 | 48 | /// Gets a connection from the global connection pool. 49 | #[tracing::instrument] 50 | pub(crate) fn connection() -> &'static DatabaseConnection { 51 | CONNECTION 52 | .get() 53 | .expect("Database connection was not initialized") 54 | } 55 | 56 | /// Initializes the database. 57 | /// 58 | /// Creates the data folder and database file if necessary, and starts backups 59 | /// if enabled. 60 | pub(crate) async fn init() -> Result<()> { 61 | #[cfg(feature = "sqlite")] 62 | { 63 | use std::{fs::create_dir, io::ErrorKind}; 64 | 65 | use anyhow::{bail, Context as _}; 66 | 67 | let (path, url) = { 68 | let s = settings(); 69 | (s.database.path.as_ref(), s.database.url.as_ref()) 70 | }; 71 | 72 | match (path, url) { 73 | (Some(data_dir), None) => { 74 | if let Err(error) = create_dir(data_dir) { 75 | if error.kind() != ErrorKind::AlreadyExists { 76 | Err::<(), _>(error) 77 | .context("Failed to create data directory")?; 78 | } 79 | } 80 | 81 | let db_path = data_dir.join("data.db"); 82 | 83 | init_connection(format!( 84 | "sqlite://{}?mode=rwc", 85 | db_path.display() 86 | )) 87 | .await?; 88 | 89 | #[cfg(feature = "backup")] 90 | if settings().database.backup != Some(false) { 91 | let backup_dir = data_dir.join("backup"); 92 | 93 | backup::start_backup_cycle(db_path, backup_dir); 94 | } 95 | } 96 | (None, Some(url)) => { 97 | init_connection(url.to_string()).await?; 98 | #[cfg(feature = "backup")] 99 | if settings().database.backup == Some(true) { 100 | tracing::warn!( 101 | "Backups cannot be done using a URL to \ 102 | connect to the database. Use a path instead." 103 | ); 104 | } 105 | } 106 | (None, None) => { 107 | bail!("One of database.path and database.url must be set") 108 | } 109 | (Some(_), Some(_)) => { 110 | bail!("Only one of database.path and database.url can be set") 111 | } 112 | } 113 | } 114 | 115 | #[cfg(not(feature = "sqlite"))] 116 | init_connection(settings().database.url.to_string()).await?; 117 | 118 | let conn = connection(); 119 | 120 | let migrations = Migrator::get_pending_migrations(conn).await?.len(); 121 | 122 | if migrations > 0 { 123 | info!("Applying {migrations} database migrations"); 124 | Migrator::up(conn, None).await?; 125 | } 126 | 127 | Ok(()) 128 | } 129 | 130 | async fn init_connection(url: String) -> Result<()> { 131 | let conn = Database::connect(url).await?; 132 | 133 | CONNECTION 134 | .set(conn) 135 | .map_err(|_| anyhow!("Database connection already initialized"))?; 136 | 137 | Ok(()) 138 | } 139 | 140 | /// Convenience macro to make a blocking tokio task and await it, creating a 141 | /// [`tracing`] span for the operation. 142 | #[macro_export] 143 | macro_rules! await_db { 144 | ($name:literal: |$conn:ident| $body:block) => {{ 145 | use ::anyhow::Context as _; 146 | 147 | let parent = ::tracing::Span::current(); 148 | 149 | ::tokio::task::spawn_blocking(move || -> ::anyhow::Result<_> { 150 | let span = ::tracing::info_span!(parent: &parent, "await_db"); 151 | let _entered = span.enter(); 152 | #[allow(unused_mut)] 153 | let mut $conn = $crate::db::pool(); 154 | 155 | $body 156 | }) 157 | .await 158 | .expect(concat!("Failed to join ", $name, " task")) 159 | .context(concat!("Failed to run DB query ", $name)) 160 | }}; 161 | } 162 | 163 | type DbInt = i64; 164 | 165 | /// Convenience trait for converting IDs to and from `DbInt`. 166 | trait IdDbExt { 167 | fn into_db(self) -> DbInt; 168 | 169 | fn from_db(x: DbInt) -> Self; 170 | } 171 | 172 | macro_rules! impl_id_ext { 173 | ($ty:ty $(, $($tys:ty),*)?) => { 174 | impl IdDbExt for $ty { 175 | fn into_db(self) -> DbInt { 176 | self.0.try_into().unwrap() 177 | } 178 | 179 | fn from_db(x: DbInt) -> Self { 180 | Self(x.try_into().unwrap()) 181 | } 182 | } 183 | 184 | impl_id_ext!($($($tys),*)?); 185 | }; 186 | () => {}; 187 | } 188 | 189 | impl_id_ext!(UserId, ChannelId, GuildId, MessageId); 190 | -------------------------------------------------------------------------------- /src/db/mute.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Handling for mutes. 5 | 6 | use anyhow::{Context as _, Result}; 7 | use futures_util::TryStreamExt; 8 | use sea_orm::{ 9 | entity::prelude::{ 10 | DeriveActiveModelBehavior, DeriveColumn, DeriveEntityModel, 11 | DerivePrimaryKey, DeriveRelation, EntityTrait, EnumIter, 12 | PrimaryKeyTrait, 13 | }, 14 | ColumnTrait, Condition, IntoActiveModel, QueryFilter, QuerySelect, 15 | }; 16 | use serenity::model::id::{ChannelId, UserId}; 17 | 18 | use super::{connection, DbInt, IdDbExt}; 19 | 20 | #[derive( 21 | Clone, Debug, PartialEq, Eq, DeriveEntityModel, DeriveActiveModelBehavior, 22 | )] 23 | #[sea_orm(table_name = "mutes")] 24 | pub struct Model { 25 | #[sea_orm(primary_key)] 26 | pub(crate) user_id: DbInt, 27 | #[sea_orm(primary_key)] 28 | pub(crate) channel_id: DbInt, 29 | } 30 | 31 | #[derive(Debug, EnumIter, DeriveRelation)] 32 | pub enum Relation {} 33 | 34 | /// Represents a muted channel. 35 | #[derive(Debug, Clone)] 36 | pub(crate) struct Mute { 37 | /// The ID of the user who muted the channel. 38 | pub(crate) user_id: UserId, 39 | /// The ID of the channel that was muted. 40 | pub(crate) channel_id: ChannelId, 41 | } 42 | 43 | impl Mute { 44 | /// Fetches a list of mutes for the user with the given ID from the DB. 45 | #[tracing::instrument] 46 | pub(crate) async fn user_mutes(user_id: UserId) -> Result> { 47 | Entity::find() 48 | .filter(Column::UserId.eq(user_id.into_db())) 49 | .stream(connection()) 50 | .await? 51 | .map_err(Into::into) 52 | .map_ok(Mute::from) 53 | .try_collect() 54 | .await 55 | } 56 | 57 | /// Checks if this mute exists in the DB. 58 | /// 59 | /// Returns true if a mute with `self.user_id` and `self.channel_id` exists 60 | /// in the DB. 61 | #[tracing::instrument] 62 | pub(crate) async fn exists(self) -> Result { 63 | let count = Entity::find() 64 | .select_only() 65 | .column_as(Column::UserId.count(), QueryAs::MuteCount) 66 | .filter( 67 | Condition::all() 68 | .add(Column::UserId.eq(self.user_id.into_db())) 69 | .add(Column::ChannelId.eq(self.channel_id.into_db())), 70 | ) 71 | .into_values::() 72 | .one(connection()) 73 | .await?; 74 | 75 | let count = count.context("No count for mutes returned")?; 76 | Ok(count == 1) 77 | } 78 | 79 | /// Inserts this mute into the DB. 80 | #[tracing::instrument] 81 | pub(crate) async fn insert(self) -> Result<()> { 82 | Entity::insert(Model::from(self).into_active_model()) 83 | .exec(connection()) 84 | .await?; 85 | 86 | Ok(()) 87 | } 88 | 89 | /// Deletes this mute from the DB. 90 | #[tracing::instrument] 91 | pub(crate) async fn delete(self) -> Result<()> { 92 | Entity::delete(Model::from(self).into_active_model()) 93 | .exec(connection()) 94 | .await?; 95 | 96 | Ok(()) 97 | } 98 | } 99 | 100 | #[derive(Clone, Copy, Debug, EnumIter, DeriveColumn)] 101 | enum QueryAs { 102 | MuteCount, 103 | } 104 | 105 | impl From for Mute { 106 | fn from(model: Model) -> Self { 107 | Self { 108 | user_id: UserId::from_db(model.user_id), 109 | channel_id: ChannelId::from_db(model.channel_id), 110 | } 111 | } 112 | } 113 | 114 | impl From for Model { 115 | fn from(mute: Mute) -> Self { 116 | Self { 117 | user_id: mute.user_id.into_db(), 118 | channel_id: mute.channel_id.into_db(), 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/db/notification.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Handling for sent notification messages. 5 | 6 | use std::time::{SystemTime, UNIX_EPOCH}; 7 | 8 | use anyhow::Result; 9 | use futures_util::TryStreamExt; 10 | use sea_orm::{ 11 | entity::prelude::{ 12 | DeriveActiveModelBehavior, DeriveEntityModel, DerivePrimaryKey, 13 | DeriveRelation, EntityTrait, EnumIter, PrimaryKeyTrait, 14 | }, 15 | ColumnTrait, Condition, IntoActiveModel, QueryFilter, QueryOrder, 16 | QuerySelect, 17 | }; 18 | use serenity::model::id::{MessageId, UserId}; 19 | 20 | use super::{connection, DbInt, IdDbExt}; 21 | use crate::global::DISCORD_EPOCH; 22 | 23 | #[derive( 24 | Clone, Debug, PartialEq, Eq, DeriveEntityModel, DeriveActiveModelBehavior, 25 | )] 26 | #[sea_orm(table_name = "sent_notifications")] 27 | pub struct Model { 28 | pub(crate) user_id: DbInt, 29 | pub(crate) original_message: DbInt, 30 | #[sea_orm(primary_key)] 31 | pub(crate) notification_message: DbInt, 32 | #[sea_orm(primary_key)] 33 | pub(crate) keyword: String, 34 | } 35 | 36 | #[derive(Debug, EnumIter, DeriveRelation)] 37 | pub enum Relation {} 38 | 39 | /// Represents a sent notification message. 40 | #[derive(Debug, Clone)] 41 | pub(crate) struct Notification { 42 | /// The ID of the message that caused the notification to be sent. 43 | pub(crate) original_message: MessageId, 44 | /// The ID of the sent notification message. 45 | pub(crate) notification_message: MessageId, 46 | /// The keyword in the original message that caused the notification to be 47 | /// sent. 48 | pub(crate) keyword: String, 49 | /// The ID of the user that the notification was sent to. 50 | pub(crate) user_id: UserId, 51 | } 52 | 53 | impl Notification { 54 | /// Fetches the notifications that were sent because of the given message 55 | /// from the DB. 56 | #[tracing::instrument] 57 | pub(crate) async fn notifications_of_message( 58 | message_id: MessageId, 59 | ) -> Result> { 60 | Entity::find() 61 | .filter(Column::OriginalMessage.eq(message_id.into_db())) 62 | .stream(connection()) 63 | .await? 64 | .map_err(Into::into) 65 | .map_ok(Notification::from) 66 | .try_collect() 67 | .await 68 | } 69 | 70 | /// Inserts this notification into the DB. 71 | #[tracing::instrument( 72 | skip(self), 73 | fields( 74 | self.user_id = %self.user_id, 75 | self.original_message = %self.original_message, 76 | self.notification_message = %self.notification_message, 77 | ))] 78 | pub(crate) async fn insert(self) -> Result<()> { 79 | Entity::insert(Model::from(self).into_active_model()) 80 | .exec(connection()) 81 | .await?; 82 | 83 | Ok(()) 84 | } 85 | 86 | /// Removes notifications in the given message from the DB. 87 | #[tracing::instrument] 88 | pub(crate) async fn delete_notification_message( 89 | message_id: MessageId, 90 | ) -> Result<()> { 91 | Entity::delete_many() 92 | .filter(Column::NotificationMessage.eq(message_id.into_db())) 93 | .exec(connection()) 94 | .await?; 95 | 96 | Ok(()) 97 | } 98 | 99 | /// Removes all notifications sent because of the given message from the DB. 100 | #[tracing::instrument] 101 | pub(crate) async fn delete_notifications_of_message( 102 | message_id: MessageId, 103 | ) -> Result<()> { 104 | Entity::delete_many() 105 | .filter(Column::OriginalMessage.eq(message_id.into_db())) 106 | .exec(connection()) 107 | .await?; 108 | 109 | Ok(()) 110 | } 111 | 112 | /// Gets notifications older than a certain duration from the DB. 113 | #[tracing::instrument] 114 | pub(crate) async fn notifications_before( 115 | count: u64, 116 | time: SystemTime, 117 | ) -> Result> { 118 | Entity::find() 119 | .filter(Column::OriginalMessage.lte(time_to_max_snowflake(time)?)) 120 | .order_by_asc(Column::OriginalMessage) 121 | .limit(count) 122 | .stream(connection()) 123 | .await? 124 | .map_err(Into::into) 125 | .map_ok(Notification::from) 126 | .try_collect() 127 | .await 128 | } 129 | 130 | /// Deletes a list of notifications from the DB. 131 | #[tracing::instrument(skip_all)] 132 | pub(crate) async fn delete_notifications( 133 | message_ids: impl IntoIterator, 134 | ) -> Result<()> { 135 | Entity::delete_many() 136 | .filter(message_ids.into_iter().fold( 137 | Condition::any(), 138 | |cond, id| { 139 | cond.add(Column::NotificationMessage.eq(id.into_db())) 140 | }, 141 | )) 142 | .exec(connection()) 143 | .await?; 144 | 145 | Ok(()) 146 | } 147 | } 148 | 149 | fn time_to_min_snowflake(time: SystemTime) -> Result { 150 | let unix = time.duration_since(UNIX_EPOCH)?.as_millis() as u64; 151 | let oldest_discord = unix - DISCORD_EPOCH; 152 | Ok(oldest_discord << 22) 153 | } 154 | 155 | fn time_to_max_snowflake(time: SystemTime) -> Result { 156 | let min = time_to_min_snowflake(time)?; 157 | Ok(min | (!0 >> 22)) 158 | } 159 | 160 | impl From for Notification { 161 | fn from(model: Model) -> Self { 162 | Self { 163 | user_id: UserId::from_db(model.user_id), 164 | original_message: MessageId::from_db(model.original_message), 165 | notification_message: MessageId::from_db( 166 | model.notification_message, 167 | ), 168 | keyword: model.keyword, 169 | } 170 | } 171 | } 172 | 173 | impl From for Model { 174 | fn from(notification: Notification) -> Self { 175 | Self { 176 | user_id: notification.user_id.into_db(), 177 | original_message: notification.original_message.into_db(), 178 | notification_message: notification.notification_message.into_db(), 179 | keyword: notification.keyword, 180 | } 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /src/db/opt_out.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Handling for user opt-outs. 5 | 6 | use anyhow::Result; 7 | use futures_util::FutureExt; 8 | use sea_orm::{ 9 | entity::prelude::{ 10 | DeriveActiveModelBehavior, DeriveEntityModel, DerivePrimaryKey, 11 | DeriveRelation, EntityTrait, EnumIter, PrimaryKeyTrait, 12 | }, 13 | ActiveValue, ColumnTrait, DbErr, QueryFilter, TransactionTrait, 14 | }; 15 | use serenity::model::id::UserId; 16 | 17 | use super::{ 18 | block, channel_keyword, connection, guild_keyword, ignore, mute, DbInt, 19 | IdDbExt, 20 | }; 21 | 22 | #[derive( 23 | Clone, Debug, PartialEq, Eq, DeriveEntityModel, DeriveActiveModelBehavior, 24 | )] 25 | #[sea_orm(table_name = "opt_outs")] 26 | pub struct Model { 27 | #[sea_orm(primary_key)] 28 | pub(crate) user_id: DbInt, 29 | } 30 | 31 | #[derive(Debug, EnumIter, DeriveRelation)] 32 | pub enum Relation {} 33 | 34 | /// Represents an opt-out made by a user. 35 | /// 36 | /// Users that opt-out will not have their messages highlighted. 37 | #[derive(Debug, Clone)] 38 | pub(crate) struct OptOut { 39 | /// The user that opted out. 40 | pub(crate) user_id: UserId, 41 | } 42 | 43 | impl OptOut { 44 | /// Checks if this opt-out already exists in the DB. 45 | #[tracing::instrument] 46 | pub(crate) async fn exists(self) -> Result { 47 | let result = Entity::find_by_id(self.user_id.into_db()) 48 | .one(connection()) 49 | .await?; 50 | 51 | Ok(result.is_some()) 52 | } 53 | 54 | /// Adds this opt-out to the DB. 55 | #[tracing::instrument] 56 | pub(crate) async fn insert(self) -> Result<()> { 57 | Entity::insert(ActiveModel { 58 | user_id: ActiveValue::Set(self.user_id.into_db()), 59 | }) 60 | .exec(connection()) 61 | .await?; 62 | 63 | Ok(()) 64 | } 65 | 66 | /// Deletes this opt-out from the DB. 67 | #[tracing::instrument] 68 | pub(crate) async fn delete(self) -> Result<()> { 69 | Entity::delete(ActiveModel { 70 | user_id: ActiveValue::Set(self.user_id.into_db()), 71 | }) 72 | .exec(connection()) 73 | .await?; 74 | 75 | Ok(()) 76 | } 77 | 78 | /// Deletes this user's data from the DB as they opt out. 79 | #[tracing::instrument] 80 | pub(crate) async fn delete_user_data(self) -> Result<()> { 81 | let user_id = self.user_id.into_db(); 82 | 83 | connection() 84 | .transaction(|transaction| { 85 | async move { 86 | guild_keyword::Entity::delete_many() 87 | .filter(guild_keyword::Column::UserId.eq(user_id)) 88 | .exec(transaction) 89 | .await?; 90 | 91 | channel_keyword::Entity::delete_many() 92 | .filter(channel_keyword::Column::UserId.eq(user_id)) 93 | .exec(transaction) 94 | .await?; 95 | 96 | block::Entity::delete_many() 97 | .filter(block::Column::UserId.eq(user_id)) 98 | .exec(transaction) 99 | .await?; 100 | 101 | mute::Entity::delete_many() 102 | .filter(mute::Column::UserId.eq(user_id)) 103 | .exec(transaction) 104 | .await?; 105 | 106 | ignore::Entity::delete_many() 107 | .filter(ignore::Column::UserId.eq(user_id)) 108 | .exec(transaction) 109 | .await?; 110 | 111 | Ok::<(), DbErr>(()) 112 | } 113 | .boxed() 114 | }) 115 | .await 116 | .map_err(Into::into) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/db/user_state.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Handling for user states; whether or not the last notification DM was 5 | //! successful. 6 | 7 | use anyhow::{bail, Result}; 8 | use sea_orm::{ 9 | entity::prelude::{ 10 | DeriveActiveModelBehavior, DeriveEntityModel, DerivePrimaryKey, 11 | DeriveRelation, EntityTrait, EnumIter, PrimaryKeyTrait, 12 | }, 13 | sea_query::OnConflict, 14 | IntoActiveModel, 15 | }; 16 | use serenity::model::id::UserId; 17 | 18 | use super::{connection, DbInt, IdDbExt}; 19 | 20 | #[derive( 21 | Clone, Debug, PartialEq, Eq, DeriveEntityModel, DeriveActiveModelBehavior, 22 | )] 23 | #[sea_orm(table_name = "user_states")] 24 | pub struct Model { 25 | #[sea_orm(primary_key)] 26 | pub(crate) user_id: DbInt, 27 | pub(crate) state: u8, 28 | } 29 | 30 | #[derive(Debug, EnumIter, DeriveRelation)] 31 | pub enum Relation {} 32 | 33 | /// Description of a user's state. 34 | #[derive(Debug, Clone)] 35 | pub(crate) struct UserState { 36 | pub(crate) user_id: UserId, 37 | pub(crate) state: UserStateKind, 38 | } 39 | 40 | #[derive(Debug, Clone, Copy)] 41 | #[repr(u8)] 42 | pub(crate) enum UserStateKind { 43 | /// Indicates that the last DM sent to notify this user failed. 44 | CannotDm = 0, 45 | } 46 | 47 | impl UserState { 48 | const CANNOT_DM_STATE: u8 = UserStateKind::CannotDm as u8; 49 | 50 | /// Fetches the state of the user with the given ID from the DB. 51 | /// 52 | /// Returns `None` if the user has no recorded state. 53 | #[tracing::instrument] 54 | pub(crate) async fn user_state(user_id: UserId) -> Result> { 55 | Entity::find_by_id(user_id.into_db()) 56 | .one(connection()) 57 | .await? 58 | .map(Self::try_from) 59 | .transpose() 60 | } 61 | 62 | /// Sets the state of the user in the DB. 63 | #[tracing::instrument] 64 | pub(crate) async fn set(self) -> Result<()> { 65 | Entity::insert(Model::from(self).into_active_model()) 66 | .on_conflict( 67 | OnConflict::column(Column::State) 68 | .update_column(Column::State) 69 | .to_owned(), 70 | ) 71 | .exec(connection()) 72 | .await?; 73 | 74 | Ok(()) 75 | } 76 | 77 | /// Deletes this user state from the DB. 78 | #[tracing::instrument] 79 | pub(crate) async fn delete(self) -> Result<()> { 80 | Self::clear(self.user_id).await 81 | } 82 | 83 | /// Clears any state of the user with the given ID. 84 | #[tracing::instrument] 85 | pub(crate) async fn clear(user_id: UserId) -> Result<()> { 86 | Entity::delete_by_id(user_id.into_db()) 87 | .exec(connection()) 88 | .await?; 89 | 90 | Ok(()) 91 | } 92 | } 93 | 94 | impl TryFrom for UserState { 95 | type Error = anyhow::Error; 96 | 97 | fn try_from(model: Model) -> Result { 98 | Ok(Self { 99 | user_id: UserId::from_db(model.user_id), 100 | state: match model.state { 101 | Self::CANNOT_DM_STATE => UserStateKind::CannotDm, 102 | other => bail!("Unknown user state: {other}"), 103 | }, 104 | }) 105 | } 106 | } 107 | 108 | impl From for Model { 109 | fn from(state: UserState) -> Self { 110 | Model { 111 | user_id: state.user_id.into_db(), 112 | state: state.state as u8, 113 | } 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/global.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Global constants. 5 | 6 | /// How many times to retry notifications after internal server errors from Discord. 7 | pub(crate) const NOTIFICATION_RETRIES: u8 = 5; 8 | 9 | /// Color of normal embeds (from help command and notifications). 10 | pub(crate) const EMBED_COLOR: u32 = 0xefff47; 11 | /// Color of embeds reporting an error to the user. 12 | pub(crate) const ERROR_COLOR: u32 = 0xff4747; 13 | 14 | pub(crate) const DISCORD_EPOCH: u64 = 1420070400000; 15 | -------------------------------------------------------------------------------- /src/logging/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Logging setup using [`tracing`]. 5 | 6 | use anyhow::Result; 7 | #[cfg(any(feature = "monitoring", feature = "reporting"))] 8 | use tracing::warn; 9 | use tracing::{Metadata, Subscriber}; 10 | use tracing_subscriber::{ 11 | filter::FilterFn, 12 | layer::{Layer, Layered, SubscriberExt}, 13 | registry::LookupSpan, 14 | util::SubscriberInitExt, 15 | }; 16 | 17 | use crate::settings::{settings, LogFormat, Settings}; 18 | 19 | #[cfg(feature = "monitoring")] 20 | mod monitoring; 21 | 22 | #[cfg(feature = "reporting")] 23 | mod reporting; 24 | 25 | /// Applies configured filters to the given tracing metadata. 26 | /// 27 | /// Returns true if the metadata passed configured filters and should be logged, 28 | /// and false if it should be filtered out. 29 | /// 30 | /// Uses [`LoggingSettings::level`](crate::settings::LoggingSettings::level) and 31 | /// [`LoggingSettings::filters`](crate::settings::LoggingSettings::filters). 32 | fn use_filters(settings: &Settings, metadata: &Metadata) -> bool { 33 | std::iter::successors(metadata.module_path(), |path| { 34 | path.rsplit_once("::").map(|(prefix, _)| prefix) 35 | }) 36 | .filter_map(|path| { 37 | settings 38 | .logging 39 | .filters 40 | .get(path) 41 | .map(|filter| filter >= metadata.level()) 42 | }) 43 | .chain(Some(&settings.logging.level >= metadata.level())) 44 | .next() 45 | .unwrap_or(true) 46 | } 47 | 48 | /// Initializes logging via [`tracing`]. 49 | /// 50 | /// This initializes [`reporting`] and [`monitoring`], if 51 | /// enabled, as well as basic stdout logging. 52 | pub(crate) fn init() -> Result<()> { 53 | let fmt = 54 | tracing_subscriber::fmt::layer().with_ansi(settings().logging.color); 55 | 56 | let filter = { 57 | let settings = settings(); 58 | FilterFn::new(|metadata| use_filters(settings, metadata)) 59 | }; 60 | 61 | fn init_rest(subscriber: Layered) -> Result<()> 62 | where 63 | L: Layer + Send + Sync + 'static, 64 | S: Subscriber + for<'span> LookupSpan<'span> + Send + Sync + 'static, 65 | { 66 | #[cfg(feature = "monitoring")] 67 | let (is_monitoring, subscriber) = { 68 | let layer = monitoring::init()?; 69 | (layer.is_some(), subscriber.with(layer)) 70 | }; 71 | 72 | #[cfg(feature = "reporting")] 73 | let (is_reporting, subscriber) = { 74 | let layer = reporting::init(); 75 | (layer.is_some(), subscriber.with(layer)) 76 | }; 77 | 78 | subscriber.try_init()?; 79 | 80 | #[cfg(feature = "monitoring")] 81 | if !is_monitoring { 82 | warn!("Jaeger agent address not provided; not reporting traces"); 83 | } 84 | 85 | #[cfg(feature = "reporting")] 86 | if !is_reporting { 87 | warn!("Webhook URL is not present, not reporting panics"); 88 | } 89 | 90 | Ok(()) 91 | } 92 | 93 | match &settings().logging.format { 94 | LogFormat::Compact => { 95 | let subscriber = tracing_subscriber::registry() 96 | .with(fmt.compact().with_filter(filter)); 97 | init_rest(subscriber) 98 | } 99 | LogFormat::Pretty => { 100 | let subscriber = tracing_subscriber::registry() 101 | .with(fmt.pretty().with_filter(filter)); 102 | init_rest(subscriber) 103 | } 104 | LogFormat::Json => { 105 | let subscriber = tracing_subscriber::registry() 106 | .with(fmt.json().with_filter(filter)); 107 | init_rest(subscriber) 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/logging/monitoring.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Monitoring with OpenTelemetry and Jaeger 5 | 6 | use anyhow::Result; 7 | use opentelemetry::sdk::trace::{self, Sampler, Tracer}; 8 | use tracing::Subscriber; 9 | use tracing_opentelemetry::OpenTelemetryLayer; 10 | use tracing_subscriber::{ 11 | filter::{FilterFn, Filtered}, 12 | layer::Layer as _, 13 | registry::LookupSpan, 14 | }; 15 | 16 | use crate::settings::settings; 17 | 18 | /// Composed [`Layer`](tracing_subscriber::layer::Layer) used for monitoring. 19 | pub(crate) type Layer = Filtered, FilterFn, S>; 20 | 21 | /// Initializes monitoring using [`opentelemetry_jaeger`]. 22 | pub(crate) fn init LookupSpan<'span>>( 23 | ) -> Result>> { 24 | if let Some(address) = settings().logging.jaeger { 25 | let tracer = opentelemetry_jaeger::new_agent_pipeline() 26 | .with_endpoint(address.socket_addr) 27 | .with_service_name(env!("CARGO_PKG_NAME")) 28 | .with_trace_config(trace::config().with_sampler( 29 | Sampler::TraceIdRatioBased(settings().logging.sample_ratio), 30 | )) 31 | .with_auto_split_batch(true) 32 | .install_batch(opentelemetry::runtime::Tokio)?; 33 | let opentelemetry = tracing_opentelemetry::layer().with_tracer(tracer); 34 | 35 | Ok(Some(opentelemetry.with_filter(FilterFn::new(|metadata| { 36 | metadata.is_event() 37 | || metadata 38 | .module_path() 39 | .map_or(true, |path| !path.starts_with("h2::")) 40 | })))) 41 | } else { 42 | Ok(None) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/logging/reporting.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Error and panic reporting to a Discord webhook. 5 | 6 | use std::{fmt::Write, mem, panic, time::Duration}; 7 | 8 | use anyhow::Result; 9 | use reqwest::{ 10 | blocking::{self, Client as BlockingClient}, 11 | Client, Url, 12 | }; 13 | use serde::Serialize; 14 | use tracing::{ 15 | error, 16 | metadata::LevelFilter, 17 | span::{Attributes, Record}, 18 | warn, Event, Id, Subscriber, 19 | }; 20 | use tracing_subscriber::{ 21 | field::RecordFields, 22 | fmt::{ 23 | format::{DefaultFields, Writer}, 24 | FormatFields, FormattedFields, 25 | }, 26 | layer::{Context, Layer}, 27 | registry::LookupSpan, 28 | }; 29 | 30 | use crate::settings::settings; 31 | 32 | /// Message that can be serialized to be sent to a webhook. 33 | #[derive(Serialize)] 34 | struct WebhookMessage { 35 | content: String, 36 | } 37 | 38 | /// [`Layer`] for reporting errors to a webhook. 39 | pub(crate) struct WebhookLayer { 40 | url: Url, 41 | client: Client, 42 | } 43 | 44 | impl WebhookLayer { 45 | /// Create a new `WebhookLayer` that reports to the given Discord webhook URL. 46 | pub(super) fn new(url: Url) -> Self { 47 | WebhookLayer { 48 | url, 49 | client: Client::new(), 50 | } 51 | } 52 | } 53 | 54 | /// Proxy type for [`DefaultFields`]. 55 | /// 56 | /// This ensures that the webhook fields don't end up with ANSI control 57 | /// sequences, as they would if they shared a [`DefaultFields`] buffer with 58 | /// the [`tracing_subscriber::fmt::Layer`] outputting to stdout. 59 | struct WebhookFields; 60 | 61 | impl<'w> FormatFields<'w> for WebhookFields { 62 | fn format_fields( 63 | &self, 64 | writer: Writer<'w>, 65 | fields: R, 66 | ) -> std::fmt::Result { 67 | DefaultFields::new().format_fields(writer, fields) 68 | } 69 | 70 | fn add_fields( 71 | &self, 72 | current: &'w mut FormattedFields, 73 | fields: &Record<'_>, 74 | ) -> std::fmt::Result { 75 | let content = mem::take(&mut current.fields); 76 | let mut new = FormattedFields::new(content); 77 | let res = DefaultFields::new().add_fields(&mut new, fields); 78 | 79 | current.fields = new.fields; 80 | res 81 | } 82 | } 83 | 84 | /// Formats `event` in the context `ctx` for display in a Discord webhook. 85 | fn format_event(event: &Event<'_>, ctx: Context<'_, S>) -> String 86 | where 87 | S: Subscriber + for<'s> LookupSpan<'s>, 88 | { 89 | let metadata = event.metadata(); 90 | let mut contents = "**[ERROR]** ".to_owned(); 91 | 92 | if let Some(scope) = ctx.event_scope(event) { 93 | for span in scope.from_root() { 94 | if let Some(fields) = 95 | span.extensions().get::>() 96 | { 97 | let _ = write!(contents, "__{}__", span.name()); 98 | if !fields.is_empty() { 99 | let _ = write!(contents, "{{*{}*}}", fields); 100 | } 101 | 102 | contents.push_str(": "); 103 | } 104 | } 105 | } 106 | 107 | if let Some(file) = metadata.file() { 108 | let _ = write!(contents, "*{}:", file); 109 | 110 | if let Some(line) = metadata.line() { 111 | let _ = write!(contents, "{}:* ", line); 112 | } else { 113 | contents.push_str("* "); 114 | } 115 | } 116 | 117 | let _ = write!(contents, "__{}__: ", metadata.target()); 118 | 119 | let mut formatter = FormattedFields::::new(contents); 120 | 121 | let writer = formatter.as_writer(); 122 | 123 | let _ = WebhookFields.format_fields(writer, event); 124 | 125 | formatter.fields 126 | } 127 | 128 | impl LookupSpan<'a>> Layer for WebhookLayer { 129 | fn on_new_span( 130 | &self, 131 | attrs: &Attributes<'_>, 132 | id: &Id, 133 | ctx: Context<'_, S>, 134 | ) { 135 | let span = ctx.span(id).expect("Couldn't get span for attributes"); 136 | let mut extensions = span.extensions_mut(); 137 | 138 | if extensions 139 | .get_mut::>() 140 | .is_none() 141 | { 142 | let mut fields = 143 | FormattedFields::::new(String::new()); 144 | if WebhookFields 145 | .format_fields(fields.as_writer(), attrs) 146 | .is_ok() 147 | { 148 | extensions.insert(fields); 149 | } 150 | } 151 | } 152 | 153 | fn on_record(&self, id: &Id, values: &Record<'_>, ctx: Context<'_, S>) { 154 | let span = ctx.span(id).expect("Couldn't get span for record"); 155 | let mut extensions = span.extensions_mut(); 156 | 157 | if let Some(fields) = 158 | extensions.get_mut::>() 159 | { 160 | let _ = WebhookFields.add_fields(fields, values); 161 | return; 162 | } 163 | 164 | let mut fields = FormattedFields::::new(String::new()); 165 | if WebhookFields 166 | .format_fields(fields.as_writer(), values) 167 | .is_ok() 168 | { 169 | extensions.insert(fields); 170 | } 171 | } 172 | 173 | fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { 174 | if &LevelFilter::ERROR < event.metadata().level() { 175 | return; 176 | } 177 | 178 | let message = WebhookMessage { 179 | content: format_event(event, ctx), 180 | }; 181 | 182 | let client = self.client.clone(); 183 | let url = self.url.clone(); 184 | 185 | tokio::spawn(async move { 186 | if let Err(e) = client 187 | .post(url) 188 | .json(&message) 189 | .timeout(Duration::from_secs(5)) 190 | .send() 191 | .await 192 | { 193 | warn!("Error reporting error: {}", e) 194 | } 195 | }); 196 | } 197 | } 198 | 199 | /// Reports a panic to the configured webhook URL. 200 | pub(crate) fn report_panic( 201 | info: &panic::PanicInfo, 202 | url: Url, 203 | ) -> Result { 204 | let client = BlockingClient::builder().build()?; 205 | 206 | let message = WebhookMessage { 207 | content: format!("**[PANIC]** {}", info), 208 | }; 209 | 210 | Ok(client 211 | .post(url) 212 | .json(&message) 213 | .timeout(Duration::from_secs(5)) 214 | .send()?) 215 | } 216 | 217 | /// Initializes webhook reporting. 218 | /// 219 | /// If a [webhook URL](crate::settings::LoggingSettings::webhook) is configured, 220 | /// registers [`report_panic`] as a panic hook and returns a [`WebhookLayer`] to 221 | /// be registered with [`tracing_subscriber`]. 222 | /// 223 | /// If no webhook URL is configured, returns None. 224 | pub(crate) fn init() -> Option { 225 | if let Some(url) = settings().logging.webhook.clone() { 226 | let default_panic_hook = panic::take_hook(); 227 | 228 | let reporting_panic_hook: Box< 229 | dyn Fn(&panic::PanicInfo<'_>) + Send + Sync + 'static, 230 | > = { 231 | let url = url.clone(); 232 | Box::new(move |info| { 233 | if let Err(e) = report_panic(info, url.clone()) { 234 | error!("Error reporting panic: {}", e); 235 | } 236 | 237 | default_panic_hook(info); 238 | }) 239 | }; 240 | 241 | panic::set_hook(reporting_panic_hook); 242 | 243 | Some(WebhookLayer::new(url)) 244 | } else { 245 | None 246 | } 247 | } 248 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Highlights is a simple but flexible keyword highlighting bot for Discord. 5 | //! 6 | //! The code for highlights is organized into mostly independent modules. This 7 | //! module handles creating the client and registering event listeners. 8 | 9 | #![allow(clippy::tabs_in_doc_comments)] 10 | 11 | use anyhow::Result; 12 | use tracing::warn; 13 | 14 | use crate::settings::settings; 15 | 16 | pub(crate) mod db; 17 | 18 | pub(crate) mod settings; 19 | 20 | #[cfg(feature = "bot")] 21 | pub(crate) mod global; 22 | 23 | pub(crate) mod logging; 24 | 25 | #[cfg(feature = "bot")] 26 | mod bot; 27 | 28 | /// Entrypoint function to initialize other modules. 29 | #[tokio::main] 30 | async fn main() -> Result<()> { 31 | settings::init()?; 32 | 33 | logging::init()?; 34 | 35 | if settings().behavior.patience_seconds.is_some() { 36 | warn!( 37 | "Your configuration includes behavior.patience_seconds. \ 38 | This setting is deprecated; please use behavior.patience instead. \ 39 | For example, patience = \"2m\"." 40 | ); 41 | } 42 | 43 | db::init().await?; 44 | 45 | #[cfg(feature = "bot")] 46 | bot::init().await?; 47 | 48 | #[cfg(not(feature = "bot"))] 49 | futures_util::future::pending::<()>().await; 50 | 51 | Ok(()) 52 | } 53 | -------------------------------------------------------------------------------- /src/settings.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 joshyrobot, ThatsNoMoon 2 | // Licensed under the Open Software License version 3.0 3 | 4 | //! Handling of bot configuration for hosters. 5 | 6 | #[cfg(feature = "sqlite")] 7 | use std::path::PathBuf; 8 | #[cfg(feature = "bot")] 9 | use std::time::Duration; 10 | use std::{ 11 | collections::HashMap, 12 | env::{self, VarError}, 13 | fs::read_to_string, 14 | io::ErrorKind, 15 | }; 16 | 17 | use anyhow::{bail, Result}; 18 | use config::{ 19 | builder::DefaultState, ConfigBuilder, ConfigError, Environment, File, 20 | FileFormat, 21 | }; 22 | use once_cell::sync::OnceCell; 23 | use serde::Deserialize; 24 | #[cfg(feature = "bot")] 25 | use serenity::model::id::GuildId; 26 | use tracing::metadata::LevelFilter; 27 | use url::Url; 28 | 29 | #[cfg(feature = "bot")] 30 | mod duration_de { 31 | use std::{fmt, time::Duration}; 32 | 33 | use serde::{de, Deserializer}; 34 | 35 | /// Visitor to deserialize a `Duration` from a number of seconds. 36 | struct DurationVisitor; 37 | impl<'de> de::Visitor<'de> for DurationVisitor { 38 | type Value = Duration; 39 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 40 | write!(formatter, "a std::time::Duration in seconds") 41 | } 42 | fn visit_u64(self, v: u64) -> Result 43 | where 44 | E: de::Error, 45 | { 46 | Ok(Duration::from_secs(v)) 47 | } 48 | } 49 | pub(super) fn deserialize_duration<'de, D>( 50 | d: D, 51 | ) -> Result, D::Error> 52 | where 53 | D: Deserializer<'de>, 54 | { 55 | d.deserialize_u64(DurationVisitor).map(Some) 56 | } 57 | } 58 | 59 | #[cfg(feature = "bot")] 60 | use duration_de::deserialize_duration; 61 | 62 | #[cfg(feature = "monitoring")] 63 | mod user_address { 64 | use std::{ 65 | fmt, 66 | net::{SocketAddr, ToSocketAddrs}, 67 | }; 68 | 69 | use serde::{de, Deserialize, Deserializer}; 70 | 71 | #[derive(Debug, Clone, Copy)] 72 | pub(crate) struct UserAddress { 73 | pub(crate) socket_addr: SocketAddr, 74 | } 75 | 76 | impl<'de> Deserialize<'de> for UserAddress { 77 | fn deserialize(deserializer: D) -> Result 78 | where 79 | D: Deserializer<'de>, 80 | { 81 | deserializer.deserialize_str(UserAddressVisitor) 82 | } 83 | } 84 | 85 | /// Visitor to deserialize a `SocketAddr` using ToSocketAddrs. 86 | struct UserAddressVisitor; 87 | impl<'de> de::Visitor<'de> for UserAddressVisitor { 88 | type Value = UserAddress; 89 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 90 | write!(formatter, "a socket address in the form `host:port`") 91 | } 92 | 93 | fn visit_str(self, v: &str) -> Result 94 | where 95 | E: de::Error, 96 | { 97 | let socket_addr = 98 | v.to_socket_addrs().map_err(E::custom)?.next().ok_or_else( 99 | || E::custom("provided host did not resolve to an address"), 100 | )?; 101 | 102 | Ok(UserAddress { socket_addr }) 103 | } 104 | } 105 | } 106 | 107 | mod level { 108 | use std::{collections::HashMap, fmt}; 109 | 110 | use serde::{de, Deserialize, Deserializer}; 111 | use tracing::metadata::LevelFilter; 112 | 113 | struct LevelFilterWrapper(LevelFilter); 114 | 115 | /// Visitor to deserialize a `LevelFilter` from a string. 116 | struct LevelFilterVisitor; 117 | impl<'de> de::Visitor<'de> for LevelFilterVisitor { 118 | type Value = LevelFilterWrapper; 119 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 120 | write!( 121 | formatter, 122 | "a logging level (trace, debug, info, warn, error, off)" 123 | ) 124 | } 125 | 126 | fn visit_str(self, v: &str) -> Result 127 | where 128 | E: de::Error, 129 | { 130 | match v { 131 | "off" | "OFF" => Ok(LevelFilter::OFF), 132 | "trace" | "TRACE" => Ok(LevelFilter::TRACE), 133 | "debug" | "DEBUG" => Ok(LevelFilter::DEBUG), 134 | "info" | "INFO" => Ok(LevelFilter::INFO), 135 | "warn" | "WARN" => Ok(LevelFilter::WARN), 136 | "error" | "ERROR" => Ok(LevelFilter::ERROR), 137 | _ => Err(E::invalid_value(de::Unexpected::Str(v), &self)), 138 | } 139 | .map(LevelFilterWrapper) 140 | } 141 | } 142 | 143 | impl<'de> Deserialize<'de> for LevelFilterWrapper { 144 | fn deserialize(d: D) -> Result 145 | where 146 | D: Deserializer<'de>, 147 | { 148 | d.deserialize_str(LevelFilterVisitor) 149 | } 150 | } 151 | 152 | pub(super) fn deserialize_level_filter<'de, D>( 153 | d: D, 154 | ) -> Result 155 | where 156 | D: Deserializer<'de>, 157 | { 158 | LevelFilterWrapper::deserialize(d).map(|LevelFilterWrapper(f)| f) 159 | } 160 | 161 | /// Visitor to deserialize a `LevelFilter` from a string. 162 | struct LevelFiltersVisitor; 163 | impl<'de> de::Visitor<'de> for LevelFiltersVisitor { 164 | type Value = HashMap; 165 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 166 | write!(formatter, "a table of modules to logging levels") 167 | } 168 | 169 | fn visit_map(self, mut filters: A) -> Result 170 | where 171 | A: de::MapAccess<'de>, 172 | { 173 | let mut map = HashMap::new(); 174 | 175 | while let Some((module, LevelFilterWrapper(filter))) = 176 | filters.next_entry::()? 177 | { 178 | map.insert(module, filter); 179 | } 180 | 181 | Ok(map) 182 | } 183 | } 184 | 185 | pub(super) fn deserialize_level_filters<'de, D>( 186 | d: D, 187 | ) -> Result, D::Error> 188 | where 189 | D: Deserializer<'de>, 190 | { 191 | d.deserialize_map(LevelFiltersVisitor) 192 | } 193 | } 194 | 195 | use level::{deserialize_level_filter, deserialize_level_filters}; 196 | 197 | mod log_format { 198 | use std::fmt; 199 | 200 | use serde::{de, Deserialize, Deserializer}; 201 | 202 | #[derive(Debug)] 203 | pub(crate) enum LogFormat { 204 | Compact, 205 | Pretty, 206 | Json, 207 | } 208 | 209 | /// Visitor to deserialize a `LevelFilter` from a string. 210 | struct LogFormatVisitor; 211 | impl<'de> de::Visitor<'de> for LogFormatVisitor { 212 | type Value = LogFormat; 213 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 214 | write!(formatter, "a log format (compact, pretty, json)") 215 | } 216 | 217 | fn visit_str(self, v: &str) -> Result 218 | where 219 | E: de::Error, 220 | { 221 | match v { 222 | "compact" | "COMPACT" => Ok(LogFormat::Compact), 223 | "pretty" | "PRETTY" => Ok(LogFormat::Pretty), 224 | "json" | "JSON" => Ok(LogFormat::Json), 225 | _ => Err(E::invalid_value(de::Unexpected::Str(v), &self)), 226 | } 227 | } 228 | } 229 | 230 | impl<'de> Deserialize<'de> for LogFormat { 231 | fn deserialize(d: D) -> Result 232 | where 233 | D: Deserializer<'de>, 234 | { 235 | d.deserialize_str(LogFormatVisitor) 236 | } 237 | } 238 | } 239 | 240 | pub(crate) use log_format::LogFormat; 241 | #[cfg(feature = "monitoring")] 242 | pub(crate) use user_address::UserAddress; 243 | 244 | /// Settings for the highlighting behavior of the bot. 245 | #[cfg(feature = "bot")] 246 | #[derive(Debug, Deserialize)] 247 | pub(crate) struct BehaviorSettings { 248 | /// Maximum number of keywords allowed for one user. 249 | #[serde(alias = "maxkeywords")] 250 | pub(crate) max_keywords: u32, 251 | 252 | /// Duration to wait for activity before sending a notification. 253 | #[serde(with = "humantime_serde")] 254 | #[cfg(feature = "bot")] 255 | pub(crate) patience: Duration, 256 | 257 | /// Duration to wait before deleting notifications. 258 | #[serde( 259 | alias = "notificationlifetime", 260 | with = "humantime_serde::option", 261 | default 262 | )] 263 | #[cfg(feature = "bot")] 264 | pub(crate) notification_lifetime: Option, 265 | 266 | /// Deprecated method to specify patience. 267 | #[serde( 268 | deserialize_with = "deserialize_duration", 269 | alias = "patienceseconds", 270 | default 271 | )] 272 | #[cfg(feature = "bot")] 273 | pub(crate) patience_seconds: Option, 274 | } 275 | 276 | /// Settings for the account of the bot. 277 | #[cfg(feature = "bot")] 278 | #[derive(Debug, Deserialize)] 279 | pub(crate) struct BotSettings { 280 | /// Bot token to log into Discord with. 281 | pub(crate) token: String, 282 | /// ID of the bot's application. 283 | #[serde(alias = "applicationid")] 284 | pub(crate) application_id: u64, 285 | /// Whether this bot is private or not. 286 | /// 287 | /// Controls whether the `about` command outputs an invite link. 288 | pub(crate) private: bool, 289 | #[serde(alias = "testguild")] 290 | pub(crate) test_guild: Option, 291 | } 292 | 293 | /// Settings for various logging facilities. 294 | #[derive(Debug, Deserialize)] 295 | pub(crate) struct LoggingSettings { 296 | /// Webhook URL to send error/panic messages to. 297 | #[cfg(feature = "reporting")] 298 | pub(crate) webhook: Option, 299 | /// Address to find Jaeger agent to send traces to. 300 | #[cfg(feature = "monitoring")] 301 | pub(crate) jaeger: Option, 302 | 303 | /// Percentage of traces to sample. 304 | /// 305 | /// See [`TraceIdRatioBased`](opentelemetry::sdk::trace::Sampler::TraceIdRatioBased). 306 | #[cfg(feature = "monitoring")] 307 | #[serde(alias = "sampleratio")] 308 | pub(crate) sample_ratio: f64, 309 | 310 | /// Global level that log messages should be filtered to. 311 | #[serde(deserialize_with = "deserialize_level_filter")] 312 | pub(crate) level: LevelFilter, 313 | /// Per-module log level filters. 314 | #[serde(deserialize_with = "deserialize_level_filters")] 315 | pub(crate) filters: HashMap, 316 | 317 | /// Whether or not to use ANSI color codes. 318 | pub(crate) color: bool, 319 | /// Standard output logging format. 320 | pub(crate) format: LogFormat, 321 | } 322 | 323 | /// Settings for the database. 324 | #[derive(Debug, Deserialize)] 325 | pub(crate) struct DatabaseSettings { 326 | /// Path to the directory that should hold the SQLite database. 327 | #[cfg(feature = "sqlite")] 328 | pub(crate) path: Option, 329 | /// Database connection URL. 330 | #[cfg(feature = "sqlite")] 331 | pub(crate) url: Option, 332 | /// Database connection URL. 333 | #[cfg(not(feature = "sqlite"))] 334 | pub(crate) url: Url, 335 | /// Whether or not to run automatic daily backups. 336 | #[cfg(feature = "backup")] 337 | pub(crate) backup: Option, 338 | } 339 | 340 | /// Collection of settings. 341 | #[derive(Debug, Deserialize)] 342 | pub(crate) struct Settings { 343 | #[cfg(feature = "bot")] 344 | pub(crate) behavior: BehaviorSettings, 345 | #[cfg(feature = "bot")] 346 | pub(crate) bot: BotSettings, 347 | pub(crate) logging: LoggingSettings, 348 | pub(crate) database: DatabaseSettings, 349 | } 350 | 351 | impl Settings { 352 | /// Builds settings from environment variables and the configuration file. 353 | pub(crate) fn new() -> Result { 354 | let b = ConfigBuilder::::default(); 355 | 356 | #[cfg(feature = "bot")] 357 | let b = b.set_default("behavior.max_keywords", 100i64)? 358 | .set_default("behavior.patience", "2m")? 359 | .set_default("bot.private", false)?; 360 | 361 | #[cfg(feature = "monitoring")] 362 | let b = b.set_default("logging.sample_ratio", 1.0f64)?; 363 | 364 | let mut b = b 365 | .set_default("logging.level", "WARN")? 366 | .set_default("logging.filters.highlights", "INFO")? 367 | .set_default("logging.color", "true")? 368 | .set_default("logging.format", "compact")?; 369 | 370 | let filename = env::var("HIGHLIGHTS_CONFIG").or_else(|e| match e { 371 | VarError::NotPresent => Ok("./config.toml".to_owned()), 372 | e => Err(ConfigError::Foreign(Box::new(e))), 373 | })?; 374 | match read_to_string(filename) { 375 | Ok(conf) => { 376 | b = b.add_source(File::from_str(&conf, FileFormat::Toml)); 377 | } 378 | Err(e) if e.kind() == ErrorKind::NotFound => (), 379 | Err(e) => return Err(ConfigError::Foreign(Box::new(e))), 380 | } 381 | 382 | b.add_source(Environment::with_prefix("HIGHLIGHTS").separator("_")) 383 | .build()? 384 | .try_deserialize() 385 | .map(|mut settings: Settings| { 386 | if let Some(old) = settings.behavior.patience_seconds { 387 | settings.behavior.patience = old; 388 | } 389 | settings 390 | }) 391 | } 392 | } 393 | 394 | /// Settings configured by the hoster. 395 | static SETTINGS: OnceCell = OnceCell::new(); 396 | 397 | /// Gets the settings configured by the hoster. 398 | pub(crate) fn settings() -> &'static Settings { 399 | SETTINGS.get().expect("Settings were not initialized") 400 | } 401 | 402 | /// Initialize the bot's [`Settings`]. 403 | pub(crate) fn init() -> Result<()> { 404 | match Settings::new() { 405 | Ok(settings) => { 406 | let _ = SETTINGS.set(settings); 407 | Ok(()) 408 | } 409 | Err(e) => { 410 | bail!("Failed to parse settings: {}", e); 411 | } 412 | } 413 | } 414 | --------------------------------------------------------------------------------